1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/IR/Constant.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Module.h"
27 #include "llvm/IR/NoFolder.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/Statepoint.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/IR/Value.h"
32 #include "llvm/Support/Casting.h"
40 /// CreateGlobalString - Make a new global variable with an initializer that
41 /// has array of i8 type filled in with the nul terminated string value
42 /// specified. If Name is specified, it is the name of the global variable
44 GlobalVariable
*IRBuilderBase::CreateGlobalString(StringRef Str
,
46 unsigned AddressSpace
,
47 Module
*M
, bool AddNull
) {
48 Constant
*StrConstant
= ConstantDataArray::getString(Context
, Str
, AddNull
);
50 M
= BB
->getParent()->getParent();
51 auto *GV
= new GlobalVariable(
52 *M
, StrConstant
->getType(), true, GlobalValue::PrivateLinkage
,
53 StrConstant
, Name
, nullptr, GlobalVariable::NotThreadLocal
, AddressSpace
);
54 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
55 GV
->setAlignment(Align(1));
59 Type
*IRBuilderBase::getCurrentFunctionReturnType() const {
60 assert(BB
&& BB
->getParent() && "No current function!");
61 return BB
->getParent()->getReturnType();
64 DebugLoc
IRBuilderBase::getCurrentDebugLocation() const {
65 for (auto &KV
: MetadataToCopy
)
66 if (KV
.first
== LLVMContext::MD_dbg
)
67 return {cast
<DILocation
>(KV
.second
)};
71 void IRBuilderBase::SetInstDebugLocation(Instruction
*I
) const {
72 for (const auto &KV
: MetadataToCopy
)
73 if (KV
.first
== LLVMContext::MD_dbg
) {
74 I
->setDebugLoc(DebugLoc(KV
.second
));
80 IRBuilderBase::createCallHelper(Function
*Callee
, ArrayRef
<Value
*> Ops
,
81 const Twine
&Name
, FMFSource FMFSource
,
82 ArrayRef
<OperandBundleDef
> OpBundles
) {
83 CallInst
*CI
= CreateCall(Callee
, Ops
, OpBundles
, Name
);
84 if (isa
<FPMathOperator
>(CI
))
85 CI
->setFastMathFlags(FMFSource
.get(FMF
));
89 Value
*IRBuilderBase::CreateVScale(Constant
*Scaling
, const Twine
&Name
) {
90 assert(isa
<ConstantInt
>(Scaling
) && "Expected constant integer");
91 if (cast
<ConstantInt
>(Scaling
)->isZero())
94 CreateIntrinsic(Intrinsic::vscale
, {Scaling
->getType()}, {}, {}, Name
);
95 return cast
<ConstantInt
>(Scaling
)->isOne() ? CI
: CreateMul(CI
, Scaling
);
98 Value
*IRBuilderBase::CreateElementCount(Type
*DstType
, ElementCount EC
) {
99 Constant
*MinEC
= ConstantInt::get(DstType
, EC
.getKnownMinValue());
100 return EC
.isScalable() ? CreateVScale(MinEC
) : MinEC
;
103 Value
*IRBuilderBase::CreateTypeSize(Type
*DstType
, TypeSize Size
) {
104 Constant
*MinSize
= ConstantInt::get(DstType
, Size
.getKnownMinValue());
105 return Size
.isScalable() ? CreateVScale(MinSize
) : MinSize
;
108 Value
*IRBuilderBase::CreateStepVector(Type
*DstType
, const Twine
&Name
) {
109 Type
*STy
= DstType
->getScalarType();
110 if (isa
<ScalableVectorType
>(DstType
)) {
111 Type
*StepVecType
= DstType
;
112 // TODO: We expect this special case (element type < 8 bits) to be
113 // temporary - once the intrinsic properly supports < 8 bits this code
115 if (STy
->getScalarSizeInBits() < 8)
117 VectorType::get(getInt8Ty(), cast
<ScalableVectorType
>(DstType
));
118 Value
*Res
= CreateIntrinsic(Intrinsic::stepvector
, {StepVecType
}, {},
120 if (StepVecType
!= DstType
)
121 Res
= CreateTrunc(Res
, DstType
);
125 unsigned NumEls
= cast
<FixedVectorType
>(DstType
)->getNumElements();
127 // Create a vector of consecutive numbers from zero to VF.
128 SmallVector
<Constant
*, 8> Indices
;
129 for (unsigned i
= 0; i
< NumEls
; ++i
)
130 Indices
.push_back(ConstantInt::get(STy
, i
));
132 // Add the consecutive indices to the vector value.
133 return ConstantVector::get(Indices
);
136 CallInst
*IRBuilderBase::CreateMemSet(Value
*Ptr
, Value
*Val
, Value
*Size
,
137 MaybeAlign Align
, bool isVolatile
,
138 MDNode
*TBAATag
, MDNode
*ScopeTag
,
139 MDNode
*NoAliasTag
) {
140 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt1(isVolatile
)};
141 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
143 CallInst
*CI
= CreateIntrinsic(Intrinsic::memset
, Tys
, Ops
);
146 cast
<MemSetInst
>(CI
)->setDestAlignment(*Align
);
148 // Set the TBAA info if present.
150 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
153 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
156 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
161 CallInst
*IRBuilderBase::CreateMemSetInline(Value
*Dst
, MaybeAlign DstAlign
,
162 Value
*Val
, Value
*Size
,
163 bool IsVolatile
, MDNode
*TBAATag
,
165 MDNode
*NoAliasTag
) {
166 Value
*Ops
[] = {Dst
, Val
, Size
, getInt1(IsVolatile
)};
167 Type
*Tys
[] = {Dst
->getType(), Size
->getType()};
169 CallInst
*CI
= CreateIntrinsic(Intrinsic::memset_inline
, Tys
, Ops
);
172 cast
<MemSetInlineInst
>(CI
)->setDestAlignment(*DstAlign
);
174 // Set the TBAA info if present.
176 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
179 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
182 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
187 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemSet(
188 Value
*Ptr
, Value
*Val
, Value
*Size
, Align Alignment
, uint32_t ElementSize
,
189 MDNode
*TBAATag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
191 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt32(ElementSize
)};
192 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
195 CreateIntrinsic(Intrinsic::memset_element_unordered_atomic
, Tys
, Ops
);
197 cast
<AtomicMemSetInst
>(CI
)->setDestAlignment(Alignment
);
199 // Set the TBAA info if present.
201 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
204 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
207 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
212 CallInst
*IRBuilderBase::CreateMemTransferInst(
213 Intrinsic::ID IntrID
, Value
*Dst
, MaybeAlign DstAlign
, Value
*Src
,
214 MaybeAlign SrcAlign
, Value
*Size
, bool isVolatile
, MDNode
*TBAATag
,
215 MDNode
*TBAAStructTag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
216 assert((IntrID
== Intrinsic::memcpy
|| IntrID
== Intrinsic::memcpy_inline
||
217 IntrID
== Intrinsic::memmove
) &&
218 "Unexpected intrinsic ID");
219 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
220 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
222 CallInst
*CI
= CreateIntrinsic(IntrID
, Tys
, Ops
);
224 auto* MCI
= cast
<MemTransferInst
>(CI
);
226 MCI
->setDestAlignment(*DstAlign
);
228 MCI
->setSourceAlignment(*SrcAlign
);
230 // Set the TBAA info if present.
232 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
234 // Set the TBAA Struct info if present.
236 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
239 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
242 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
247 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
248 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
249 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
250 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
251 assert(DstAlign
>= ElementSize
&&
252 "Pointer alignment must be at least element size");
253 assert(SrcAlign
>= ElementSize
&&
254 "Pointer alignment must be at least element size");
255 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
256 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
259 CreateIntrinsic(Intrinsic::memcpy_element_unordered_atomic
, Tys
, Ops
);
261 // Set the alignment of the pointer args.
262 auto *AMCI
= cast
<AtomicMemCpyInst
>(CI
);
263 AMCI
->setDestAlignment(DstAlign
);
264 AMCI
->setSourceAlignment(SrcAlign
);
266 // Set the TBAA info if present.
268 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
270 // Set the TBAA Struct info if present.
272 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
275 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
278 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
283 /// isConstantOne - Return true only if val is constant int 1
284 static bool isConstantOne(const Value
*Val
) {
285 assert(Val
&& "isConstantOne does not work with nullptr Val");
286 const ConstantInt
*CVal
= dyn_cast
<ConstantInt
>(Val
);
287 return CVal
&& CVal
->isOne();
290 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
291 Value
*AllocSize
, Value
*ArraySize
,
292 ArrayRef
<OperandBundleDef
> OpB
,
293 Function
*MallocF
, const Twine
&Name
) {
294 // malloc(type) becomes:
295 // i8* malloc(typeSize)
296 // malloc(type, arraySize) becomes:
297 // i8* malloc(typeSize*arraySize)
299 ArraySize
= ConstantInt::get(IntPtrTy
, 1);
300 else if (ArraySize
->getType() != IntPtrTy
)
301 ArraySize
= CreateIntCast(ArraySize
, IntPtrTy
, false);
303 if (!isConstantOne(ArraySize
)) {
304 if (isConstantOne(AllocSize
)) {
305 AllocSize
= ArraySize
; // Operand * 1 = Operand
307 // Multiply type size by the array size...
308 AllocSize
= CreateMul(ArraySize
, AllocSize
, "mallocsize");
312 assert(AllocSize
->getType() == IntPtrTy
&& "malloc arg is wrong size");
313 // Create the call to Malloc.
314 Module
*M
= BB
->getParent()->getParent();
315 Type
*BPTy
= PointerType::getUnqual(Context
);
316 FunctionCallee MallocFunc
= MallocF
;
318 // prototype malloc as "void *malloc(size_t)"
319 MallocFunc
= M
->getOrInsertFunction("malloc", BPTy
, IntPtrTy
);
320 CallInst
*MCall
= CreateCall(MallocFunc
, AllocSize
, OpB
, Name
);
322 MCall
->setTailCall();
323 if (Function
*F
= dyn_cast
<Function
>(MallocFunc
.getCallee())) {
324 MCall
->setCallingConv(F
->getCallingConv());
325 F
->setReturnDoesNotAlias();
328 assert(!MCall
->getType()->isVoidTy() && "Malloc has void return type");
333 CallInst
*IRBuilderBase::CreateMalloc(Type
*IntPtrTy
, Type
*AllocTy
,
334 Value
*AllocSize
, Value
*ArraySize
,
335 Function
*MallocF
, const Twine
&Name
) {
337 return CreateMalloc(IntPtrTy
, AllocTy
, AllocSize
, ArraySize
, {}, MallocF
,
341 /// CreateFree - Generate the IR for a call to the builtin free function.
342 CallInst
*IRBuilderBase::CreateFree(Value
*Source
,
343 ArrayRef
<OperandBundleDef
> Bundles
) {
344 assert(Source
->getType()->isPointerTy() &&
345 "Can not free something of nonpointer type!");
347 Module
*M
= BB
->getParent()->getParent();
349 Type
*VoidTy
= Type::getVoidTy(M
->getContext());
350 Type
*VoidPtrTy
= PointerType::getUnqual(M
->getContext());
351 // prototype free as "void free(void*)"
352 FunctionCallee FreeFunc
= M
->getOrInsertFunction("free", VoidTy
, VoidPtrTy
);
353 CallInst
*Result
= CreateCall(FreeFunc
, Source
, Bundles
, "");
354 Result
->setTailCall();
355 if (Function
*F
= dyn_cast
<Function
>(FreeFunc
.getCallee()))
356 Result
->setCallingConv(F
->getCallingConv());
361 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemMove(
362 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
363 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
364 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
365 assert(DstAlign
>= ElementSize
&&
366 "Pointer alignment must be at least element size");
367 assert(SrcAlign
>= ElementSize
&&
368 "Pointer alignment must be at least element size");
369 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
370 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
373 CreateIntrinsic(Intrinsic::memmove_element_unordered_atomic
, Tys
, Ops
);
375 // Set the alignment of the pointer args.
376 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), DstAlign
));
377 CI
->addParamAttr(1, Attribute::getWithAlignment(CI
->getContext(), SrcAlign
));
379 // Set the TBAA info if present.
381 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
383 // Set the TBAA Struct info if present.
385 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
388 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
391 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
396 CallInst
*IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID
, Value
*Src
) {
397 Value
*Ops
[] = {Src
};
398 Type
*Tys
[] = { Src
->getType() };
399 return CreateIntrinsic(ID
, Tys
, Ops
);
402 CallInst
*IRBuilderBase::CreateFAddReduce(Value
*Acc
, Value
*Src
) {
403 Value
*Ops
[] = {Acc
, Src
};
404 return CreateIntrinsic(Intrinsic::vector_reduce_fadd
, {Src
->getType()}, Ops
);
407 CallInst
*IRBuilderBase::CreateFMulReduce(Value
*Acc
, Value
*Src
) {
408 Value
*Ops
[] = {Acc
, Src
};
409 return CreateIntrinsic(Intrinsic::vector_reduce_fmul
, {Src
->getType()}, Ops
);
412 CallInst
*IRBuilderBase::CreateAddReduce(Value
*Src
) {
413 return getReductionIntrinsic(Intrinsic::vector_reduce_add
, Src
);
416 CallInst
*IRBuilderBase::CreateMulReduce(Value
*Src
) {
417 return getReductionIntrinsic(Intrinsic::vector_reduce_mul
, Src
);
420 CallInst
*IRBuilderBase::CreateAndReduce(Value
*Src
) {
421 return getReductionIntrinsic(Intrinsic::vector_reduce_and
, Src
);
424 CallInst
*IRBuilderBase::CreateOrReduce(Value
*Src
) {
425 return getReductionIntrinsic(Intrinsic::vector_reduce_or
, Src
);
428 CallInst
*IRBuilderBase::CreateXorReduce(Value
*Src
) {
429 return getReductionIntrinsic(Intrinsic::vector_reduce_xor
, Src
);
432 CallInst
*IRBuilderBase::CreateIntMaxReduce(Value
*Src
, bool IsSigned
) {
434 IsSigned
? Intrinsic::vector_reduce_smax
: Intrinsic::vector_reduce_umax
;
435 return getReductionIntrinsic(ID
, Src
);
438 CallInst
*IRBuilderBase::CreateIntMinReduce(Value
*Src
, bool IsSigned
) {
440 IsSigned
? Intrinsic::vector_reduce_smin
: Intrinsic::vector_reduce_umin
;
441 return getReductionIntrinsic(ID
, Src
);
444 CallInst
*IRBuilderBase::CreateFPMaxReduce(Value
*Src
) {
445 return getReductionIntrinsic(Intrinsic::vector_reduce_fmax
, Src
);
448 CallInst
*IRBuilderBase::CreateFPMinReduce(Value
*Src
) {
449 return getReductionIntrinsic(Intrinsic::vector_reduce_fmin
, Src
);
452 CallInst
*IRBuilderBase::CreateFPMaximumReduce(Value
*Src
) {
453 return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum
, Src
);
456 CallInst
*IRBuilderBase::CreateFPMinimumReduce(Value
*Src
) {
457 return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum
, Src
);
460 CallInst
*IRBuilderBase::CreateLifetimeStart(Value
*Ptr
, ConstantInt
*Size
) {
461 assert(isa
<PointerType
>(Ptr
->getType()) &&
462 "lifetime.start only applies to pointers.");
466 assert(Size
->getType() == getInt64Ty() &&
467 "lifetime.start requires the size to be an i64");
468 Value
*Ops
[] = { Size
, Ptr
};
469 return CreateIntrinsic(Intrinsic::lifetime_start
, {Ptr
->getType()}, Ops
);
472 CallInst
*IRBuilderBase::CreateLifetimeEnd(Value
*Ptr
, ConstantInt
*Size
) {
473 assert(isa
<PointerType
>(Ptr
->getType()) &&
474 "lifetime.end only applies to pointers.");
478 assert(Size
->getType() == getInt64Ty() &&
479 "lifetime.end requires the size to be an i64");
480 Value
*Ops
[] = { Size
, Ptr
};
481 return CreateIntrinsic(Intrinsic::lifetime_end
, {Ptr
->getType()}, Ops
);
484 CallInst
*IRBuilderBase::CreateInvariantStart(Value
*Ptr
, ConstantInt
*Size
) {
486 assert(isa
<PointerType
>(Ptr
->getType()) &&
487 "invariant.start only applies to pointers.");
491 assert(Size
->getType() == getInt64Ty() &&
492 "invariant.start requires the size to be an i64");
494 Value
*Ops
[] = {Size
, Ptr
};
495 // Fill in the single overloaded type: memory object type.
496 Type
*ObjectPtr
[1] = {Ptr
->getType()};
497 return CreateIntrinsic(Intrinsic::invariant_start
, ObjectPtr
, Ops
);
500 static MaybeAlign
getAlign(Value
*Ptr
) {
501 if (auto *O
= dyn_cast
<GlobalObject
>(Ptr
))
502 return O
->getAlign();
503 if (auto *A
= dyn_cast
<GlobalAlias
>(Ptr
))
504 return A
->getAliaseeObject()->getAlign();
508 CallInst
*IRBuilderBase::CreateThreadLocalAddress(Value
*Ptr
) {
509 assert(isa
<GlobalValue
>(Ptr
) && cast
<GlobalValue
>(Ptr
)->isThreadLocal() &&
510 "threadlocal_address only applies to thread local variables.");
511 CallInst
*CI
= CreateIntrinsic(llvm::Intrinsic::threadlocal_address
,
512 {Ptr
->getType()}, {Ptr
});
513 if (MaybeAlign A
= getAlign(Ptr
)) {
514 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), *A
));
515 CI
->addRetAttr(Attribute::getWithAlignment(CI
->getContext(), *A
));
521 IRBuilderBase::CreateAssumption(Value
*Cond
,
522 ArrayRef
<OperandBundleDef
> OpBundles
) {
523 assert(Cond
->getType() == getInt1Ty() &&
524 "an assumption condition must be of type i1");
526 Value
*Ops
[] = { Cond
};
527 Module
*M
= BB
->getParent()->getParent();
528 Function
*FnAssume
= Intrinsic::getOrInsertDeclaration(M
, Intrinsic::assume
);
529 return CreateCall(FnAssume
, Ops
, OpBundles
);
532 Instruction
*IRBuilderBase::CreateNoAliasScopeDeclaration(Value
*Scope
) {
533 return CreateIntrinsic(Intrinsic::experimental_noalias_scope_decl
, {},
537 /// Create a call to a Masked Load intrinsic.
538 /// \p Ty - vector type to load
539 /// \p Ptr - base pointer for the load
540 /// \p Alignment - alignment of the source location
541 /// \p Mask - vector of booleans which indicates what vector lanes should
542 /// be accessed in memory
543 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
545 /// \p Name - name of the result variable
546 CallInst
*IRBuilderBase::CreateMaskedLoad(Type
*Ty
, Value
*Ptr
, Align Alignment
,
547 Value
*Mask
, Value
*PassThru
,
549 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
550 assert(Ty
->isVectorTy() && "Type should be vector");
551 assert(Mask
&& "Mask should not be all-ones (null)");
553 PassThru
= PoisonValue::get(Ty
);
554 Type
*OverloadedTypes
[] = { Ty
, PtrTy
};
555 Value
*Ops
[] = {Ptr
, getInt32(Alignment
.value()), Mask
, PassThru
};
556 return CreateMaskedIntrinsic(Intrinsic::masked_load
, Ops
,
557 OverloadedTypes
, Name
);
560 /// Create a call to a Masked Store intrinsic.
561 /// \p Val - data to be stored,
562 /// \p Ptr - base pointer for the store
563 /// \p Alignment - alignment of the destination location
564 /// \p Mask - vector of booleans which indicates what vector lanes should
565 /// be accessed in memory
566 CallInst
*IRBuilderBase::CreateMaskedStore(Value
*Val
, Value
*Ptr
,
567 Align Alignment
, Value
*Mask
) {
568 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
569 Type
*DataTy
= Val
->getType();
570 assert(DataTy
->isVectorTy() && "Val should be a vector");
571 assert(Mask
&& "Mask should not be all-ones (null)");
572 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
573 Value
*Ops
[] = {Val
, Ptr
, getInt32(Alignment
.value()), Mask
};
574 return CreateMaskedIntrinsic(Intrinsic::masked_store
, Ops
, OverloadedTypes
);
577 /// Create a call to a Masked intrinsic, with given intrinsic Id,
578 /// an array of operands - Ops, and an array of overloaded types -
580 CallInst
*IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id
,
581 ArrayRef
<Value
*> Ops
,
582 ArrayRef
<Type
*> OverloadedTypes
,
584 return CreateIntrinsic(Id
, OverloadedTypes
, Ops
, {}, Name
);
587 /// Create a call to a Masked Gather intrinsic.
588 /// \p Ty - vector type to gather
589 /// \p Ptrs - vector of pointers for loading
590 /// \p Align - alignment for one element
591 /// \p Mask - vector of booleans which indicates what vector lanes should
592 /// be accessed in memory
593 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
595 /// \p Name - name of the result variable
596 CallInst
*IRBuilderBase::CreateMaskedGather(Type
*Ty
, Value
*Ptrs
,
597 Align Alignment
, Value
*Mask
,
600 auto *VecTy
= cast
<VectorType
>(Ty
);
601 ElementCount NumElts
= VecTy
->getElementCount();
602 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
603 assert(NumElts
== PtrsTy
->getElementCount() && "Element count mismatch");
606 Mask
= getAllOnesMask(NumElts
);
609 PassThru
= PoisonValue::get(Ty
);
611 Type
*OverloadedTypes
[] = {Ty
, PtrsTy
};
612 Value
*Ops
[] = {Ptrs
, getInt32(Alignment
.value()), Mask
, PassThru
};
614 // We specify only one type when we create this intrinsic. Types of other
615 // arguments are derived from this type.
616 return CreateMaskedIntrinsic(Intrinsic::masked_gather
, Ops
, OverloadedTypes
,
620 /// Create a call to a Masked Scatter intrinsic.
621 /// \p Data - data to be stored,
622 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
624 /// \p Align - alignment for one element
625 /// \p Mask - vector of booleans which indicates what vector lanes should
626 /// be accessed in memory
627 CallInst
*IRBuilderBase::CreateMaskedScatter(Value
*Data
, Value
*Ptrs
,
628 Align Alignment
, Value
*Mask
) {
629 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
630 auto *DataTy
= cast
<VectorType
>(Data
->getType());
631 ElementCount NumElts
= PtrsTy
->getElementCount();
634 Mask
= getAllOnesMask(NumElts
);
636 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
637 Value
*Ops
[] = {Data
, Ptrs
, getInt32(Alignment
.value()), Mask
};
639 // We specify only one type when we create this intrinsic. Types of other
640 // arguments are derived from this type.
641 return CreateMaskedIntrinsic(Intrinsic::masked_scatter
, Ops
, OverloadedTypes
);
644 /// Create a call to Masked Expand Load intrinsic
645 /// \p Ty - vector type to load
646 /// \p Ptr - base pointer for the load
647 /// \p Align - alignment of \p Ptr
648 /// \p Mask - vector of booleans which indicates what vector lanes should
649 /// be accessed in memory
650 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
652 /// \p Name - name of the result variable
653 CallInst
*IRBuilderBase::CreateMaskedExpandLoad(Type
*Ty
, Value
*Ptr
,
654 MaybeAlign Align
, Value
*Mask
,
657 assert(Ty
->isVectorTy() && "Type should be vector");
658 assert(Mask
&& "Mask should not be all-ones (null)");
660 PassThru
= PoisonValue::get(Ty
);
661 Type
*OverloadedTypes
[] = {Ty
};
662 Value
*Ops
[] = {Ptr
, Mask
, PassThru
};
663 CallInst
*CI
= CreateMaskedIntrinsic(Intrinsic::masked_expandload
, Ops
,
664 OverloadedTypes
, Name
);
666 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), *Align
));
670 /// Create a call to Masked Compress Store intrinsic
671 /// \p Val - data to be stored,
672 /// \p Ptr - base pointer for the store
673 /// \p Align - alignment of \p Ptr
674 /// \p Mask - vector of booleans which indicates what vector lanes should
675 /// be accessed in memory
676 CallInst
*IRBuilderBase::CreateMaskedCompressStore(Value
*Val
, Value
*Ptr
,
679 Type
*DataTy
= Val
->getType();
680 assert(DataTy
->isVectorTy() && "Val should be a vector");
681 assert(Mask
&& "Mask should not be all-ones (null)");
682 Type
*OverloadedTypes
[] = {DataTy
};
683 Value
*Ops
[] = {Val
, Ptr
, Mask
};
684 CallInst
*CI
= CreateMaskedIntrinsic(Intrinsic::masked_compressstore
, Ops
,
687 CI
->addParamAttr(1, Attribute::getWithAlignment(CI
->getContext(), *Align
));
691 template <typename T0
>
692 static std::vector
<Value
*>
693 getStatepointArgs(IRBuilderBase
&B
, uint64_t ID
, uint32_t NumPatchBytes
,
694 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
) {
695 std::vector
<Value
*> Args
;
696 Args
.push_back(B
.getInt64(ID
));
697 Args
.push_back(B
.getInt32(NumPatchBytes
));
698 Args
.push_back(ActualCallee
);
699 Args
.push_back(B
.getInt32(CallArgs
.size()));
700 Args
.push_back(B
.getInt32(Flags
));
701 llvm::append_range(Args
, CallArgs
);
702 // GC Transition and Deopt args are now always handled via operand bundle.
703 // They will be removed from the signature of gc.statepoint shortly.
704 Args
.push_back(B
.getInt32(0));
705 Args
.push_back(B
.getInt32(0));
706 // GC args are now encoded in the gc-live operand bundle
710 template<typename T1
, typename T2
, typename T3
>
711 static std::vector
<OperandBundleDef
>
712 getStatepointBundles(std::optional
<ArrayRef
<T1
>> TransitionArgs
,
713 std::optional
<ArrayRef
<T2
>> DeoptArgs
,
714 ArrayRef
<T3
> GCArgs
) {
715 std::vector
<OperandBundleDef
> Rval
;
717 SmallVector
<Value
*, 16> DeoptValues
;
718 llvm::append_range(DeoptValues
, *DeoptArgs
);
719 Rval
.emplace_back("deopt", DeoptValues
);
721 if (TransitionArgs
) {
722 SmallVector
<Value
*, 16> TransitionValues
;
723 llvm::append_range(TransitionValues
, *TransitionArgs
);
724 Rval
.emplace_back("gc-transition", TransitionValues
);
727 SmallVector
<Value
*, 16> LiveValues
;
728 llvm::append_range(LiveValues
, GCArgs
);
729 Rval
.emplace_back("gc-live", LiveValues
);
734 template <typename T0
, typename T1
, typename T2
, typename T3
>
735 static CallInst
*CreateGCStatepointCallCommon(
736 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
737 FunctionCallee ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
738 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
739 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
741 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
742 // Fill in the one generic type'd argument (the function is also vararg)
743 Function
*FnStatepoint
= Intrinsic::getOrInsertDeclaration(
744 M
, Intrinsic::experimental_gc_statepoint
,
745 {ActualCallee
.getCallee()->getType()});
747 std::vector
<Value
*> Args
= getStatepointArgs(
748 *Builder
, ID
, NumPatchBytes
, ActualCallee
.getCallee(), Flags
, CallArgs
);
750 CallInst
*CI
= Builder
->CreateCall(
752 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
754 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
755 ActualCallee
.getFunctionType()));
759 CallInst
*IRBuilderBase::CreateGCStatepointCall(
760 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
761 ArrayRef
<Value
*> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
762 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
763 return CreateGCStatepointCallCommon
<Value
*, Value
*, Value
*, Value
*>(
764 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
765 CallArgs
, std::nullopt
/* No Transition Args */, DeoptArgs
, GCArgs
, Name
);
768 CallInst
*IRBuilderBase::CreateGCStatepointCall(
769 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
770 uint32_t Flags
, ArrayRef
<Value
*> CallArgs
,
771 std::optional
<ArrayRef
<Use
>> TransitionArgs
,
772 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
774 return CreateGCStatepointCallCommon
<Value
*, Use
, Use
, Value
*>(
775 this, ID
, NumPatchBytes
, ActualCallee
, Flags
, CallArgs
, TransitionArgs
,
776 DeoptArgs
, GCArgs
, Name
);
779 CallInst
*IRBuilderBase::CreateGCStatepointCall(
780 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualCallee
,
781 ArrayRef
<Use
> CallArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
782 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
783 return CreateGCStatepointCallCommon
<Use
, Value
*, Value
*, Value
*>(
784 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
785 CallArgs
, std::nullopt
, DeoptArgs
, GCArgs
, Name
);
788 template <typename T0
, typename T1
, typename T2
, typename T3
>
789 static InvokeInst
*CreateGCStatepointInvokeCommon(
790 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
791 FunctionCallee ActualInvokee
, BasicBlock
*NormalDest
,
792 BasicBlock
*UnwindDest
, uint32_t Flags
, ArrayRef
<T0
> InvokeArgs
,
793 std::optional
<ArrayRef
<T1
>> TransitionArgs
,
794 std::optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
796 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
797 // Fill in the one generic type'd argument (the function is also vararg)
798 Function
*FnStatepoint
= Intrinsic::getOrInsertDeclaration(
799 M
, Intrinsic::experimental_gc_statepoint
,
800 {ActualInvokee
.getCallee()->getType()});
802 std::vector
<Value
*> Args
=
803 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualInvokee
.getCallee(),
806 InvokeInst
*II
= Builder
->CreateInvoke(
807 FnStatepoint
, NormalDest
, UnwindDest
, Args
,
808 getStatepointBundles(TransitionArgs
, DeoptArgs
, GCArgs
), Name
);
810 Attribute::get(Builder
->getContext(), Attribute::ElementType
,
811 ActualInvokee
.getFunctionType()));
815 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
816 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
817 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
818 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Value
*>> DeoptArgs
,
819 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
820 return CreateGCStatepointInvokeCommon
<Value
*, Value
*, Value
*, Value
*>(
821 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
822 uint32_t(StatepointFlags::None
), InvokeArgs
,
823 std::nullopt
/* No Transition Args*/, DeoptArgs
, GCArgs
, Name
);
826 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
827 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
828 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, uint32_t Flags
,
829 ArrayRef
<Value
*> InvokeArgs
, std::optional
<ArrayRef
<Use
>> TransitionArgs
,
830 std::optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
832 return CreateGCStatepointInvokeCommon
<Value
*, Use
, Use
, Value
*>(
833 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
, Flags
,
834 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
, Name
);
837 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
838 uint64_t ID
, uint32_t NumPatchBytes
, FunctionCallee ActualInvokee
,
839 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, ArrayRef
<Use
> InvokeArgs
,
840 std::optional
<ArrayRef
<Value
*>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
842 return CreateGCStatepointInvokeCommon
<Use
, Value
*, Value
*, Value
*>(
843 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
844 uint32_t(StatepointFlags::None
), InvokeArgs
, std::nullopt
, DeoptArgs
,
848 CallInst
*IRBuilderBase::CreateGCResult(Instruction
*Statepoint
,
849 Type
*ResultType
, const Twine
&Name
) {
850 Intrinsic::ID ID
= Intrinsic::experimental_gc_result
;
851 Type
*Types
[] = {ResultType
};
853 Value
*Args
[] = {Statepoint
};
854 return CreateIntrinsic(ID
, Types
, Args
, {}, Name
);
857 CallInst
*IRBuilderBase::CreateGCRelocate(Instruction
*Statepoint
,
858 int BaseOffset
, int DerivedOffset
,
859 Type
*ResultType
, const Twine
&Name
) {
860 Type
*Types
[] = {ResultType
};
862 Value
*Args
[] = {Statepoint
, getInt32(BaseOffset
), getInt32(DerivedOffset
)};
863 return CreateIntrinsic(Intrinsic::experimental_gc_relocate
, Types
, Args
, {},
867 CallInst
*IRBuilderBase::CreateGCGetPointerBase(Value
*DerivedPtr
,
869 Type
*PtrTy
= DerivedPtr
->getType();
870 return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_base
,
871 {PtrTy
, PtrTy
}, {DerivedPtr
}, {}, Name
);
874 CallInst
*IRBuilderBase::CreateGCGetPointerOffset(Value
*DerivedPtr
,
876 Type
*PtrTy
= DerivedPtr
->getType();
877 return CreateIntrinsic(Intrinsic::experimental_gc_get_pointer_offset
, {PtrTy
},
878 {DerivedPtr
}, {}, Name
);
881 CallInst
*IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID
, Value
*V
,
884 Module
*M
= BB
->getModule();
885 Function
*Fn
= Intrinsic::getOrInsertDeclaration(M
, ID
, {V
->getType()});
886 return createCallHelper(Fn
, {V
}, Name
, FMFSource
);
889 Value
*IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID
, Value
*LHS
,
890 Value
*RHS
, FMFSource FMFSource
,
892 Module
*M
= BB
->getModule();
893 Function
*Fn
= Intrinsic::getOrInsertDeclaration(M
, ID
, {LHS
->getType()});
894 if (Value
*V
= Folder
.FoldBinaryIntrinsic(ID
, LHS
, RHS
, Fn
->getReturnType(),
895 /*FMFSource=*/nullptr))
897 return createCallHelper(Fn
, {LHS
, RHS
}, Name
, FMFSource
);
900 CallInst
*IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID
,
901 ArrayRef
<Type
*> Types
,
902 ArrayRef
<Value
*> Args
,
905 Module
*M
= BB
->getModule();
906 Function
*Fn
= Intrinsic::getOrInsertDeclaration(M
, ID
, Types
);
907 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
910 CallInst
*IRBuilderBase::CreateIntrinsic(Type
*RetTy
, Intrinsic::ID ID
,
911 ArrayRef
<Value
*> Args
,
914 Module
*M
= BB
->getModule();
916 SmallVector
<Intrinsic::IITDescriptor
> Table
;
917 Intrinsic::getIntrinsicInfoTableEntries(ID
, Table
);
918 ArrayRef
<Intrinsic::IITDescriptor
> TableRef(Table
);
920 SmallVector
<Type
*> ArgTys
;
921 ArgTys
.reserve(Args
.size());
923 ArgTys
.push_back(I
->getType());
924 FunctionType
*FTy
= FunctionType::get(RetTy
, ArgTys
, false);
925 SmallVector
<Type
*> OverloadTys
;
926 Intrinsic::MatchIntrinsicTypesResult Res
=
927 matchIntrinsicSignature(FTy
, TableRef
, OverloadTys
);
929 assert(Res
== Intrinsic::MatchIntrinsicTypes_Match
&& TableRef
.empty() &&
930 "Wrong types for intrinsic!");
931 // TODO: Handle varargs intrinsics.
933 Function
*Fn
= Intrinsic::getOrInsertDeclaration(M
, ID
, OverloadTys
);
934 return createCallHelper(Fn
, Args
, Name
, FMFSource
);
937 CallInst
*IRBuilderBase::CreateConstrainedFPBinOp(
938 Intrinsic::ID ID
, Value
*L
, Value
*R
, FMFSource FMFSource
,
939 const Twine
&Name
, MDNode
*FPMathTag
, std::optional
<RoundingMode
> Rounding
,
940 std::optional
<fp::ExceptionBehavior
> Except
) {
941 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
942 Value
*ExceptV
= getConstrainedFPExcept(Except
);
944 FastMathFlags UseFMF
= FMFSource
.get(FMF
);
946 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
947 {L
, R
, RoundingV
, ExceptV
}, nullptr, Name
);
948 setConstrainedFPCallAttr(C
);
949 setFPAttrs(C
, FPMathTag
, UseFMF
);
953 CallInst
*IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
954 Intrinsic::ID ID
, Value
*L
, Value
*R
, FMFSource FMFSource
,
955 const Twine
&Name
, MDNode
*FPMathTag
,
956 std::optional
<fp::ExceptionBehavior
> Except
) {
957 Value
*ExceptV
= getConstrainedFPExcept(Except
);
959 FastMathFlags UseFMF
= FMFSource
.get(FMF
);
962 CreateIntrinsic(ID
, {L
->getType()}, {L
, R
, ExceptV
}, nullptr, Name
);
963 setConstrainedFPCallAttr(C
);
964 setFPAttrs(C
, FPMathTag
, UseFMF
);
968 Value
*IRBuilderBase::CreateNAryOp(unsigned Opc
, ArrayRef
<Value
*> Ops
,
969 const Twine
&Name
, MDNode
*FPMathTag
) {
970 if (Instruction::isBinaryOp(Opc
)) {
971 assert(Ops
.size() == 2 && "Invalid number of operands!");
972 return CreateBinOp(static_cast<Instruction::BinaryOps
>(Opc
),
973 Ops
[0], Ops
[1], Name
, FPMathTag
);
975 if (Instruction::isUnaryOp(Opc
)) {
976 assert(Ops
.size() == 1 && "Invalid number of operands!");
977 return CreateUnOp(static_cast<Instruction::UnaryOps
>(Opc
),
978 Ops
[0], Name
, FPMathTag
);
980 llvm_unreachable("Unexpected opcode!");
983 CallInst
*IRBuilderBase::CreateConstrainedFPCast(
984 Intrinsic::ID ID
, Value
*V
, Type
*DestTy
, FMFSource FMFSource
,
985 const Twine
&Name
, MDNode
*FPMathTag
, std::optional
<RoundingMode
> Rounding
,
986 std::optional
<fp::ExceptionBehavior
> Except
) {
987 Value
*ExceptV
= getConstrainedFPExcept(Except
);
989 FastMathFlags UseFMF
= FMFSource
.get(FMF
);
992 if (Intrinsic::hasConstrainedFPRoundingModeOperand(ID
)) {
993 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
994 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, RoundingV
, ExceptV
},
997 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, ExceptV
}, nullptr,
1000 setConstrainedFPCallAttr(C
);
1002 if (isa
<FPMathOperator
>(C
))
1003 setFPAttrs(C
, FPMathTag
, UseFMF
);
1007 Value
*IRBuilderBase::CreateFCmpHelper(CmpInst::Predicate P
, Value
*LHS
,
1008 Value
*RHS
, const Twine
&Name
,
1009 MDNode
*FPMathTag
, FMFSource FMFSource
,
1011 if (IsFPConstrained
) {
1012 auto ID
= IsSignaling
? Intrinsic::experimental_constrained_fcmps
1013 : Intrinsic::experimental_constrained_fcmp
;
1014 return CreateConstrainedFPCmp(ID
, P
, LHS
, RHS
, Name
);
1017 if (auto *V
= Folder
.FoldCmp(P
, LHS
, RHS
))
1020 setFPAttrs(new FCmpInst(P
, LHS
, RHS
), FPMathTag
, FMFSource
.get(FMF
)),
1024 CallInst
*IRBuilderBase::CreateConstrainedFPCmp(
1025 Intrinsic::ID ID
, CmpInst::Predicate P
, Value
*L
, Value
*R
,
1026 const Twine
&Name
, std::optional
<fp::ExceptionBehavior
> Except
) {
1027 Value
*PredicateV
= getConstrainedFPPredicate(P
);
1028 Value
*ExceptV
= getConstrainedFPExcept(Except
);
1030 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
1031 {L
, R
, PredicateV
, ExceptV
}, nullptr, Name
);
1032 setConstrainedFPCallAttr(C
);
1036 CallInst
*IRBuilderBase::CreateConstrainedFPCall(
1037 Function
*Callee
, ArrayRef
<Value
*> Args
, const Twine
&Name
,
1038 std::optional
<RoundingMode
> Rounding
,
1039 std::optional
<fp::ExceptionBehavior
> Except
) {
1040 llvm::SmallVector
<Value
*, 6> UseArgs
;
1042 append_range(UseArgs
, Args
);
1044 if (Intrinsic::hasConstrainedFPRoundingModeOperand(Callee
->getIntrinsicID()))
1045 UseArgs
.push_back(getConstrainedFPRounding(Rounding
));
1046 UseArgs
.push_back(getConstrainedFPExcept(Except
));
1048 CallInst
*C
= CreateCall(Callee
, UseArgs
, Name
);
1049 setConstrainedFPCallAttr(C
);
1053 Value
*IRBuilderBase::CreateSelect(Value
*C
, Value
*True
, Value
*False
,
1054 const Twine
&Name
, Instruction
*MDFrom
) {
1055 return CreateSelectFMF(C
, True
, False
, {}, Name
, MDFrom
);
1058 Value
*IRBuilderBase::CreateSelectFMF(Value
*C
, Value
*True
, Value
*False
,
1059 FMFSource FMFSource
, const Twine
&Name
,
1060 Instruction
*MDFrom
) {
1061 if (auto *V
= Folder
.FoldSelect(C
, True
, False
))
1064 SelectInst
*Sel
= SelectInst::Create(C
, True
, False
);
1066 MDNode
*Prof
= MDFrom
->getMetadata(LLVMContext::MD_prof
);
1067 MDNode
*Unpred
= MDFrom
->getMetadata(LLVMContext::MD_unpredictable
);
1068 Sel
= addBranchMetadata(Sel
, Prof
, Unpred
);
1070 if (isa
<FPMathOperator
>(Sel
))
1071 setFPAttrs(Sel
, /*MDNode=*/nullptr, FMFSource
.get(FMF
));
1072 return Insert(Sel
, Name
);
1075 Value
*IRBuilderBase::CreatePtrDiff(Type
*ElemTy
, Value
*LHS
, Value
*RHS
,
1076 const Twine
&Name
) {
1077 assert(LHS
->getType() == RHS
->getType() &&
1078 "Pointer subtraction operand types must match!");
1079 Value
*LHS_int
= CreatePtrToInt(LHS
, Type::getInt64Ty(Context
));
1080 Value
*RHS_int
= CreatePtrToInt(RHS
, Type::getInt64Ty(Context
));
1081 Value
*Difference
= CreateSub(LHS_int
, RHS_int
);
1082 return CreateExactSDiv(Difference
, ConstantExpr::getSizeOf(ElemTy
),
1086 Value
*IRBuilderBase::CreateLaunderInvariantGroup(Value
*Ptr
) {
1087 assert(isa
<PointerType
>(Ptr
->getType()) &&
1088 "launder.invariant.group only applies to pointers.");
1089 auto *PtrType
= Ptr
->getType();
1090 Module
*M
= BB
->getParent()->getParent();
1091 Function
*FnLaunderInvariantGroup
= Intrinsic::getOrInsertDeclaration(
1092 M
, Intrinsic::launder_invariant_group
, {PtrType
});
1094 assert(FnLaunderInvariantGroup
->getReturnType() == PtrType
&&
1095 FnLaunderInvariantGroup
->getFunctionType()->getParamType(0) ==
1097 "LaunderInvariantGroup should take and return the same type");
1099 return CreateCall(FnLaunderInvariantGroup
, {Ptr
});
1102 Value
*IRBuilderBase::CreateStripInvariantGroup(Value
*Ptr
) {
1103 assert(isa
<PointerType
>(Ptr
->getType()) &&
1104 "strip.invariant.group only applies to pointers.");
1106 auto *PtrType
= Ptr
->getType();
1107 Module
*M
= BB
->getParent()->getParent();
1108 Function
*FnStripInvariantGroup
= Intrinsic::getOrInsertDeclaration(
1109 M
, Intrinsic::strip_invariant_group
, {PtrType
});
1111 assert(FnStripInvariantGroup
->getReturnType() == PtrType
&&
1112 FnStripInvariantGroup
->getFunctionType()->getParamType(0) ==
1114 "StripInvariantGroup should take and return the same type");
1116 return CreateCall(FnStripInvariantGroup
, {Ptr
});
1119 Value
*IRBuilderBase::CreateVectorReverse(Value
*V
, const Twine
&Name
) {
1120 auto *Ty
= cast
<VectorType
>(V
->getType());
1121 if (isa
<ScalableVectorType
>(Ty
)) {
1122 Module
*M
= BB
->getParent()->getParent();
1124 Intrinsic::getOrInsertDeclaration(M
, Intrinsic::vector_reverse
, Ty
);
1125 return Insert(CallInst::Create(F
, V
), Name
);
1127 // Keep the original behaviour for fixed vector
1128 SmallVector
<int, 8> ShuffleMask
;
1129 int NumElts
= Ty
->getElementCount().getKnownMinValue();
1130 for (int i
= 0; i
< NumElts
; ++i
)
1131 ShuffleMask
.push_back(NumElts
- i
- 1);
1132 return CreateShuffleVector(V
, ShuffleMask
, Name
);
1135 Value
*IRBuilderBase::CreateVectorSplice(Value
*V1
, Value
*V2
, int64_t Imm
,
1136 const Twine
&Name
) {
1137 assert(isa
<VectorType
>(V1
->getType()) && "Unexpected type");
1138 assert(V1
->getType() == V2
->getType() &&
1139 "Splice expects matching operand types!");
1141 if (auto *VTy
= dyn_cast
<ScalableVectorType
>(V1
->getType())) {
1142 Module
*M
= BB
->getParent()->getParent();
1144 Intrinsic::getOrInsertDeclaration(M
, Intrinsic::vector_splice
, VTy
);
1146 Value
*Ops
[] = {V1
, V2
, getInt32(Imm
)};
1147 return Insert(CallInst::Create(F
, Ops
), Name
);
1150 unsigned NumElts
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
1151 assert(((-Imm
<= NumElts
) || (Imm
< NumElts
)) &&
1152 "Invalid immediate for vector splice!");
1154 // Keep the original behaviour for fixed vector
1155 unsigned Idx
= (NumElts
+ Imm
) % NumElts
;
1156 SmallVector
<int, 8> Mask
;
1157 for (unsigned I
= 0; I
< NumElts
; ++I
)
1158 Mask
.push_back(Idx
+ I
);
1160 return CreateShuffleVector(V1
, V2
, Mask
);
1163 Value
*IRBuilderBase::CreateVectorSplat(unsigned NumElts
, Value
*V
,
1164 const Twine
&Name
) {
1165 auto EC
= ElementCount::getFixed(NumElts
);
1166 return CreateVectorSplat(EC
, V
, Name
);
1169 Value
*IRBuilderBase::CreateVectorSplat(ElementCount EC
, Value
*V
,
1170 const Twine
&Name
) {
1171 assert(EC
.isNonZero() && "Cannot splat to an empty vector!");
1173 // First insert it into a poison vector so we can shuffle it.
1174 Value
*Poison
= PoisonValue::get(VectorType::get(V
->getType(), EC
));
1175 V
= CreateInsertElement(Poison
, V
, getInt64(0), Name
+ ".splatinsert");
1177 // Shuffle the value across the desired number of elements.
1178 SmallVector
<int, 16> Zeros
;
1179 Zeros
.resize(EC
.getKnownMinValue());
1180 return CreateShuffleVector(V
, Zeros
, Name
+ ".splat");
1183 Value
*IRBuilderBase::CreatePreserveArrayAccessIndex(
1184 Type
*ElTy
, Value
*Base
, unsigned Dimension
, unsigned LastIndex
,
1186 auto *BaseType
= Base
->getType();
1187 assert(isa
<PointerType
>(BaseType
) &&
1188 "Invalid Base ptr type for preserve.array.access.index.");
1190 Value
*LastIndexV
= getInt32(LastIndex
);
1191 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1192 SmallVector
<Value
*, 4> IdxList(Dimension
, Zero
);
1193 IdxList
.push_back(LastIndexV
);
1195 Type
*ResultType
= GetElementPtrInst::getGEPReturnType(Base
, IdxList
);
1197 Value
*DimV
= getInt32(Dimension
);
1199 CreateIntrinsic(Intrinsic::preserve_array_access_index
,
1200 {ResultType
, BaseType
}, {Base
, DimV
, LastIndexV
});
1202 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1204 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1209 Value
*IRBuilderBase::CreatePreserveUnionAccessIndex(
1210 Value
*Base
, unsigned FieldIndex
, MDNode
*DbgInfo
) {
1211 assert(isa
<PointerType
>(Base
->getType()) &&
1212 "Invalid Base ptr type for preserve.union.access.index.");
1213 auto *BaseType
= Base
->getType();
1215 Value
*DIIndex
= getInt32(FieldIndex
);
1216 CallInst
*Fn
= CreateIntrinsic(Intrinsic::preserve_union_access_index
,
1217 {BaseType
, BaseType
}, {Base
, DIIndex
});
1219 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1224 Value
*IRBuilderBase::CreatePreserveStructAccessIndex(
1225 Type
*ElTy
, Value
*Base
, unsigned Index
, unsigned FieldIndex
,
1227 auto *BaseType
= Base
->getType();
1228 assert(isa
<PointerType
>(BaseType
) &&
1229 "Invalid Base ptr type for preserve.struct.access.index.");
1231 Value
*GEPIndex
= getInt32(Index
);
1232 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1234 GetElementPtrInst::getGEPReturnType(Base
, {Zero
, GEPIndex
});
1236 Value
*DIIndex
= getInt32(FieldIndex
);
1238 CreateIntrinsic(Intrinsic::preserve_struct_access_index
,
1239 {ResultType
, BaseType
}, {Base
, GEPIndex
, DIIndex
});
1241 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1243 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1248 Value
*IRBuilderBase::createIsFPClass(Value
*FPNum
, unsigned Test
) {
1249 ConstantInt
*TestV
= getInt32(Test
);
1250 return CreateIntrinsic(Intrinsic::is_fpclass
, {FPNum
->getType()},
1254 CallInst
*IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout
&DL
,
1257 Value
*OffsetValue
) {
1258 SmallVector
<Value
*, 4> Vals({PtrValue
, AlignValue
});
1260 Vals
.push_back(OffsetValue
);
1261 OperandBundleDefT
<Value
*> AlignOpB("align", Vals
);
1262 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB
});
1265 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1268 Value
*OffsetValue
) {
1269 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1270 "trying to create an alignment assumption on a non-pointer?");
1271 assert(Alignment
!= 0 && "Invalid Alignment");
1272 auto *PtrTy
= cast
<PointerType
>(PtrValue
->getType());
1273 Type
*IntPtrTy
= getIntPtrTy(DL
, PtrTy
->getAddressSpace());
1274 Value
*AlignValue
= ConstantInt::get(IntPtrTy
, Alignment
);
1275 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, AlignValue
, OffsetValue
);
1278 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1281 Value
*OffsetValue
) {
1282 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1283 "trying to create an alignment assumption on a non-pointer?");
1284 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, Alignment
, OffsetValue
);
1287 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1288 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1289 IRBuilderFolder::~IRBuilderFolder() = default;
1290 void ConstantFolder::anchor() {}
1291 void NoFolder::anchor() {}