1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/Statepoint.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/MathExtras.h"
38 /// CreateGlobalString - Make a new global variable with an initializer that
39 /// has array of i8 type filled in with the nul terminated string value
40 /// specified. If Name is specified, it is the name of the global variable
42 GlobalVariable
*IRBuilderBase::CreateGlobalString(StringRef Str
,
44 unsigned AddressSpace
) {
45 Constant
*StrConstant
= ConstantDataArray::getString(Context
, Str
);
46 Module
&M
= *BB
->getParent()->getParent();
47 auto *GV
= new GlobalVariable(M
, StrConstant
->getType(), true,
48 GlobalValue::PrivateLinkage
, StrConstant
, Name
,
49 nullptr, GlobalVariable::NotThreadLocal
,
51 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
52 GV
->setAlignment(Align::None());
56 Type
*IRBuilderBase::getCurrentFunctionReturnType() const {
57 assert(BB
&& BB
->getParent() && "No current function!");
58 return BB
->getParent()->getReturnType();
61 Value
*IRBuilderBase::getCastedInt8PtrValue(Value
*Ptr
) {
62 auto *PT
= cast
<PointerType
>(Ptr
->getType());
63 if (PT
->getElementType()->isIntegerTy(8))
66 // Otherwise, we need to insert a bitcast.
67 PT
= getInt8PtrTy(PT
->getAddressSpace());
68 BitCastInst
*BCI
= new BitCastInst(Ptr
, PT
, "");
69 BB
->getInstList().insert(InsertPt
, BCI
);
70 SetInstDebugLocation(BCI
);
74 static CallInst
*createCallHelper(Function
*Callee
, ArrayRef
<Value
*> Ops
,
75 IRBuilderBase
*Builder
,
76 const Twine
&Name
= "",
77 Instruction
*FMFSource
= nullptr) {
78 CallInst
*CI
= CallInst::Create(Callee
, Ops
, Name
);
80 CI
->copyFastMathFlags(FMFSource
);
81 Builder
->GetInsertBlock()->getInstList().insert(Builder
->GetInsertPoint(),CI
);
82 Builder
->SetInstDebugLocation(CI
);
86 static InvokeInst
*createInvokeHelper(Function
*Invokee
, BasicBlock
*NormalDest
,
87 BasicBlock
*UnwindDest
,
88 ArrayRef
<Value
*> Ops
,
89 IRBuilderBase
*Builder
,
90 const Twine
&Name
= "") {
92 InvokeInst::Create(Invokee
, NormalDest
, UnwindDest
, Ops
, Name
);
93 Builder
->GetInsertBlock()->getInstList().insert(Builder
->GetInsertPoint(),
95 Builder
->SetInstDebugLocation(II
);
99 CallInst
*IRBuilderBase::
100 CreateMemSet(Value
*Ptr
, Value
*Val
, Value
*Size
, unsigned Align
,
101 bool isVolatile
, MDNode
*TBAATag
, MDNode
*ScopeTag
,
102 MDNode
*NoAliasTag
) {
103 Ptr
= getCastedInt8PtrValue(Ptr
);
104 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt1(isVolatile
)};
105 Type
*Tys
[] = { Ptr
->getType(), Size
->getType() };
106 Module
*M
= BB
->getParent()->getParent();
107 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset
, Tys
);
109 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
112 cast
<MemSetInst
>(CI
)->setDestAlignment(Align
);
114 // Set the TBAA info if present.
116 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
119 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
122 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
127 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemSet(
128 Value
*Ptr
, Value
*Val
, Value
*Size
, unsigned Align
, uint32_t ElementSize
,
129 MDNode
*TBAATag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
130 assert(Align
>= ElementSize
&&
131 "Pointer alignment must be at least element size.");
133 Ptr
= getCastedInt8PtrValue(Ptr
);
134 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt32(ElementSize
)};
135 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
136 Module
*M
= BB
->getParent()->getParent();
137 Function
*TheFn
= Intrinsic::getDeclaration(
138 M
, Intrinsic::memset_element_unordered_atomic
, Tys
);
140 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
142 cast
<AtomicMemSetInst
>(CI
)->setDestAlignment(Align
);
144 // Set the TBAA info if present.
146 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
149 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
152 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
157 CallInst
*IRBuilderBase::
158 CreateMemCpy(Value
*Dst
, unsigned DstAlign
, Value
*Src
, unsigned SrcAlign
,
159 Value
*Size
, bool isVolatile
, MDNode
*TBAATag
,
160 MDNode
*TBAAStructTag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
161 assert((DstAlign
== 0 || isPowerOf2_32(DstAlign
)) && "Must be 0 or a power of 2");
162 assert((SrcAlign
== 0 || isPowerOf2_32(SrcAlign
)) && "Must be 0 or a power of 2");
163 Dst
= getCastedInt8PtrValue(Dst
);
164 Src
= getCastedInt8PtrValue(Src
);
166 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
167 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
168 Module
*M
= BB
->getParent()->getParent();
169 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memcpy
, Tys
);
171 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
173 auto* MCI
= cast
<MemCpyInst
>(CI
);
175 MCI
->setDestAlignment(DstAlign
);
177 MCI
->setSourceAlignment(SrcAlign
);
179 // Set the TBAA info if present.
181 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
183 // Set the TBAA Struct info if present.
185 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
188 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
191 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
196 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
197 Value
*Dst
, unsigned DstAlign
, Value
*Src
, unsigned SrcAlign
, Value
*Size
,
198 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
199 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
200 assert(DstAlign
>= ElementSize
&&
201 "Pointer alignment must be at least element size");
202 assert(SrcAlign
>= ElementSize
&&
203 "Pointer alignment must be at least element size");
204 Dst
= getCastedInt8PtrValue(Dst
);
205 Src
= getCastedInt8PtrValue(Src
);
207 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
208 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
209 Module
*M
= BB
->getParent()->getParent();
210 Function
*TheFn
= Intrinsic::getDeclaration(
211 M
, Intrinsic::memcpy_element_unordered_atomic
, Tys
);
213 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
215 // Set the alignment of the pointer args.
216 auto *AMCI
= cast
<AtomicMemCpyInst
>(CI
);
217 AMCI
->setDestAlignment(DstAlign
);
218 AMCI
->setSourceAlignment(SrcAlign
);
220 // Set the TBAA info if present.
222 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
224 // Set the TBAA Struct info if present.
226 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
229 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
232 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
237 CallInst
*IRBuilderBase::
238 CreateMemMove(Value
*Dst
, unsigned DstAlign
, Value
*Src
, unsigned SrcAlign
,
239 Value
*Size
, bool isVolatile
, MDNode
*TBAATag
, MDNode
*ScopeTag
,
240 MDNode
*NoAliasTag
) {
241 assert((DstAlign
== 0 || isPowerOf2_32(DstAlign
)) && "Must be 0 or a power of 2");
242 assert((SrcAlign
== 0 || isPowerOf2_32(SrcAlign
)) && "Must be 0 or a power of 2");
243 Dst
= getCastedInt8PtrValue(Dst
);
244 Src
= getCastedInt8PtrValue(Src
);
246 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
247 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
248 Module
*M
= BB
->getParent()->getParent();
249 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memmove
, Tys
);
251 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
253 auto *MMI
= cast
<MemMoveInst
>(CI
);
255 MMI
->setDestAlignment(DstAlign
);
257 MMI
->setSourceAlignment(SrcAlign
);
259 // Set the TBAA info if present.
261 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
264 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
267 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
272 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemMove(
273 Value
*Dst
, unsigned DstAlign
, Value
*Src
, unsigned SrcAlign
, Value
*Size
,
274 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
275 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
276 assert(DstAlign
>= ElementSize
&&
277 "Pointer alignment must be at least element size");
278 assert(SrcAlign
>= ElementSize
&&
279 "Pointer alignment must be at least element size");
280 Dst
= getCastedInt8PtrValue(Dst
);
281 Src
= getCastedInt8PtrValue(Src
);
283 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
284 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
285 Module
*M
= BB
->getParent()->getParent();
286 Function
*TheFn
= Intrinsic::getDeclaration(
287 M
, Intrinsic::memmove_element_unordered_atomic
, Tys
);
289 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
291 // Set the alignment of the pointer args.
293 0, Attribute::getWithAlignment(CI
->getContext(), Align(DstAlign
)));
295 1, Attribute::getWithAlignment(CI
->getContext(), Align(SrcAlign
)));
297 // Set the TBAA info if present.
299 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
301 // Set the TBAA Struct info if present.
303 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
306 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
309 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
314 static CallInst
*getReductionIntrinsic(IRBuilderBase
*Builder
, Intrinsic::ID ID
,
316 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
317 Value
*Ops
[] = {Src
};
318 Type
*Tys
[] = { Src
->getType() };
319 auto Decl
= Intrinsic::getDeclaration(M
, ID
, Tys
);
320 return createCallHelper(Decl
, Ops
, Builder
);
323 CallInst
*IRBuilderBase::CreateFAddReduce(Value
*Acc
, Value
*Src
) {
324 Module
*M
= GetInsertBlock()->getParent()->getParent();
325 Value
*Ops
[] = {Acc
, Src
};
326 Type
*Tys
[] = {Acc
->getType(), Src
->getType()};
327 auto Decl
= Intrinsic::getDeclaration(
328 M
, Intrinsic::experimental_vector_reduce_v2_fadd
, Tys
);
329 return createCallHelper(Decl
, Ops
, this);
332 CallInst
*IRBuilderBase::CreateFMulReduce(Value
*Acc
, Value
*Src
) {
333 Module
*M
= GetInsertBlock()->getParent()->getParent();
334 Value
*Ops
[] = {Acc
, Src
};
335 Type
*Tys
[] = {Acc
->getType(), Src
->getType()};
336 auto Decl
= Intrinsic::getDeclaration(
337 M
, Intrinsic::experimental_vector_reduce_v2_fmul
, Tys
);
338 return createCallHelper(Decl
, Ops
, this);
341 CallInst
*IRBuilderBase::CreateAddReduce(Value
*Src
) {
342 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_add
,
346 CallInst
*IRBuilderBase::CreateMulReduce(Value
*Src
) {
347 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_mul
,
351 CallInst
*IRBuilderBase::CreateAndReduce(Value
*Src
) {
352 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_and
,
356 CallInst
*IRBuilderBase::CreateOrReduce(Value
*Src
) {
357 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_or
,
361 CallInst
*IRBuilderBase::CreateXorReduce(Value
*Src
) {
362 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_xor
,
366 CallInst
*IRBuilderBase::CreateIntMaxReduce(Value
*Src
, bool IsSigned
) {
367 auto ID
= IsSigned
? Intrinsic::experimental_vector_reduce_smax
368 : Intrinsic::experimental_vector_reduce_umax
;
369 return getReductionIntrinsic(this, ID
, Src
);
372 CallInst
*IRBuilderBase::CreateIntMinReduce(Value
*Src
, bool IsSigned
) {
373 auto ID
= IsSigned
? Intrinsic::experimental_vector_reduce_smin
374 : Intrinsic::experimental_vector_reduce_umin
;
375 return getReductionIntrinsic(this, ID
, Src
);
378 CallInst
*IRBuilderBase::CreateFPMaxReduce(Value
*Src
, bool NoNaN
) {
379 auto Rdx
= getReductionIntrinsic(
380 this, Intrinsic::experimental_vector_reduce_fmax
, Src
);
384 Rdx
->setFastMathFlags(FMF
);
389 CallInst
*IRBuilderBase::CreateFPMinReduce(Value
*Src
, bool NoNaN
) {
390 auto Rdx
= getReductionIntrinsic(
391 this, Intrinsic::experimental_vector_reduce_fmin
, Src
);
395 Rdx
->setFastMathFlags(FMF
);
400 CallInst
*IRBuilderBase::CreateLifetimeStart(Value
*Ptr
, ConstantInt
*Size
) {
401 assert(isa
<PointerType
>(Ptr
->getType()) &&
402 "lifetime.start only applies to pointers.");
403 Ptr
= getCastedInt8PtrValue(Ptr
);
407 assert(Size
->getType() == getInt64Ty() &&
408 "lifetime.start requires the size to be an i64");
409 Value
*Ops
[] = { Size
, Ptr
};
410 Module
*M
= BB
->getParent()->getParent();
412 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_start
, {Ptr
->getType()});
413 return createCallHelper(TheFn
, Ops
, this);
416 CallInst
*IRBuilderBase::CreateLifetimeEnd(Value
*Ptr
, ConstantInt
*Size
) {
417 assert(isa
<PointerType
>(Ptr
->getType()) &&
418 "lifetime.end only applies to pointers.");
419 Ptr
= getCastedInt8PtrValue(Ptr
);
423 assert(Size
->getType() == getInt64Ty() &&
424 "lifetime.end requires the size to be an i64");
425 Value
*Ops
[] = { Size
, Ptr
};
426 Module
*M
= BB
->getParent()->getParent();
428 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_end
, {Ptr
->getType()});
429 return createCallHelper(TheFn
, Ops
, this);
432 CallInst
*IRBuilderBase::CreateInvariantStart(Value
*Ptr
, ConstantInt
*Size
) {
434 assert(isa
<PointerType
>(Ptr
->getType()) &&
435 "invariant.start only applies to pointers.");
436 Ptr
= getCastedInt8PtrValue(Ptr
);
440 assert(Size
->getType() == getInt64Ty() &&
441 "invariant.start requires the size to be an i64");
443 Value
*Ops
[] = {Size
, Ptr
};
444 // Fill in the single overloaded type: memory object type.
445 Type
*ObjectPtr
[1] = {Ptr
->getType()};
446 Module
*M
= BB
->getParent()->getParent();
448 Intrinsic::getDeclaration(M
, Intrinsic::invariant_start
, ObjectPtr
);
449 return createCallHelper(TheFn
, Ops
, this);
452 CallInst
*IRBuilderBase::CreateAssumption(Value
*Cond
) {
453 assert(Cond
->getType() == getInt1Ty() &&
454 "an assumption condition must be of type i1");
456 Value
*Ops
[] = { Cond
};
457 Module
*M
= BB
->getParent()->getParent();
458 Function
*FnAssume
= Intrinsic::getDeclaration(M
, Intrinsic::assume
);
459 return createCallHelper(FnAssume
, Ops
, this);
462 /// Create a call to a Masked Load intrinsic.
463 /// \p Ptr - base pointer for the load
464 /// \p Align - alignment of the source location
465 /// \p Mask - vector of booleans which indicates what vector lanes should
466 /// be accessed in memory
467 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
469 /// \p Name - name of the result variable
470 CallInst
*IRBuilderBase::CreateMaskedLoad(Value
*Ptr
, unsigned Align
,
471 Value
*Mask
, Value
*PassThru
,
473 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
474 Type
*DataTy
= PtrTy
->getElementType();
475 assert(DataTy
->isVectorTy() && "Ptr should point to a vector");
476 assert(Mask
&& "Mask should not be all-ones (null)");
478 PassThru
= UndefValue::get(DataTy
);
479 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
480 Value
*Ops
[] = { Ptr
, getInt32(Align
), Mask
, PassThru
};
481 return CreateMaskedIntrinsic(Intrinsic::masked_load
, Ops
,
482 OverloadedTypes
, Name
);
485 /// Create a call to a Masked Store intrinsic.
486 /// \p Val - data to be stored,
487 /// \p Ptr - base pointer for the store
488 /// \p Align - alignment of the destination location
489 /// \p Mask - vector of booleans which indicates what vector lanes should
490 /// be accessed in memory
491 CallInst
*IRBuilderBase::CreateMaskedStore(Value
*Val
, Value
*Ptr
,
492 unsigned Align
, Value
*Mask
) {
493 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
494 Type
*DataTy
= PtrTy
->getElementType();
495 assert(DataTy
->isVectorTy() && "Ptr should point to a vector");
496 assert(Mask
&& "Mask should not be all-ones (null)");
497 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
498 Value
*Ops
[] = { Val
, Ptr
, getInt32(Align
), Mask
};
499 return CreateMaskedIntrinsic(Intrinsic::masked_store
, Ops
, OverloadedTypes
);
502 /// Create a call to a Masked intrinsic, with given intrinsic Id,
503 /// an array of operands - Ops, and an array of overloaded types -
505 CallInst
*IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id
,
506 ArrayRef
<Value
*> Ops
,
507 ArrayRef
<Type
*> OverloadedTypes
,
509 Module
*M
= BB
->getParent()->getParent();
510 Function
*TheFn
= Intrinsic::getDeclaration(M
, Id
, OverloadedTypes
);
511 return createCallHelper(TheFn
, Ops
, this, Name
);
514 /// Create a call to a Masked Gather intrinsic.
515 /// \p Ptrs - vector of pointers for loading
516 /// \p Align - alignment for one element
517 /// \p Mask - vector of booleans which indicates what vector lanes should
518 /// be accessed in memory
519 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
521 /// \p Name - name of the result variable
522 CallInst
*IRBuilderBase::CreateMaskedGather(Value
*Ptrs
, unsigned Align
,
523 Value
*Mask
, Value
*PassThru
,
525 auto PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
526 auto PtrTy
= cast
<PointerType
>(PtrsTy
->getElementType());
527 unsigned NumElts
= PtrsTy
->getVectorNumElements();
528 Type
*DataTy
= VectorType::get(PtrTy
->getElementType(), NumElts
);
531 Mask
= Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context
),
535 PassThru
= UndefValue::get(DataTy
);
537 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
538 Value
* Ops
[] = {Ptrs
, getInt32(Align
), Mask
, PassThru
};
540 // We specify only one type when we create this intrinsic. Types of other
541 // arguments are derived from this type.
542 return CreateMaskedIntrinsic(Intrinsic::masked_gather
, Ops
, OverloadedTypes
,
546 /// Create a call to a Masked Scatter intrinsic.
547 /// \p Data - data to be stored,
548 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
550 /// \p Align - alignment for one element
551 /// \p Mask - vector of booleans which indicates what vector lanes should
552 /// be accessed in memory
553 CallInst
*IRBuilderBase::CreateMaskedScatter(Value
*Data
, Value
*Ptrs
,
554 unsigned Align
, Value
*Mask
) {
555 auto PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
556 auto DataTy
= cast
<VectorType
>(Data
->getType());
557 unsigned NumElts
= PtrsTy
->getVectorNumElements();
560 auto PtrTy
= cast
<PointerType
>(PtrsTy
->getElementType());
561 assert(NumElts
== DataTy
->getVectorNumElements() &&
562 PtrTy
->getElementType() == DataTy
->getElementType() &&
563 "Incompatible pointer and data types");
567 Mask
= Constant::getAllOnesValue(VectorType::get(Type::getInt1Ty(Context
),
570 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
571 Value
* Ops
[] = {Data
, Ptrs
, getInt32(Align
), Mask
};
573 // We specify only one type when we create this intrinsic. Types of other
574 // arguments are derived from this type.
575 return CreateMaskedIntrinsic(Intrinsic::masked_scatter
, Ops
, OverloadedTypes
);
578 template <typename T0
, typename T1
, typename T2
, typename T3
>
579 static std::vector
<Value
*>
580 getStatepointArgs(IRBuilderBase
&B
, uint64_t ID
, uint32_t NumPatchBytes
,
581 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
582 ArrayRef
<T1
> TransitionArgs
, ArrayRef
<T2
> DeoptArgs
,
583 ArrayRef
<T3
> GCArgs
) {
584 std::vector
<Value
*> Args
;
585 Args
.push_back(B
.getInt64(ID
));
586 Args
.push_back(B
.getInt32(NumPatchBytes
));
587 Args
.push_back(ActualCallee
);
588 Args
.push_back(B
.getInt32(CallArgs
.size()));
589 Args
.push_back(B
.getInt32(Flags
));
590 Args
.insert(Args
.end(), CallArgs
.begin(), CallArgs
.end());
591 Args
.push_back(B
.getInt32(TransitionArgs
.size()));
592 Args
.insert(Args
.end(), TransitionArgs
.begin(), TransitionArgs
.end());
593 Args
.push_back(B
.getInt32(DeoptArgs
.size()));
594 Args
.insert(Args
.end(), DeoptArgs
.begin(), DeoptArgs
.end());
595 Args
.insert(Args
.end(), GCArgs
.begin(), GCArgs
.end());
600 template <typename T0
, typename T1
, typename T2
, typename T3
>
601 static CallInst
*CreateGCStatepointCallCommon(
602 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
603 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
604 ArrayRef
<T1
> TransitionArgs
, ArrayRef
<T2
> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
606 // Extract out the type of the callee.
607 auto *FuncPtrType
= cast
<PointerType
>(ActualCallee
->getType());
608 assert(isa
<FunctionType
>(FuncPtrType
->getElementType()) &&
609 "actual callee must be a callable value");
611 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
612 // Fill in the one generic type'd argument (the function is also vararg)
613 Type
*ArgTypes
[] = { FuncPtrType
};
614 Function
*FnStatepoint
=
615 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
618 std::vector
<Value
*> Args
=
619 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualCallee
, Flags
,
620 CallArgs
, TransitionArgs
, DeoptArgs
, GCArgs
);
621 return createCallHelper(FnStatepoint
, Args
, Builder
, Name
);
624 CallInst
*IRBuilderBase::CreateGCStatepointCall(
625 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
,
626 ArrayRef
<Value
*> CallArgs
, ArrayRef
<Value
*> DeoptArgs
,
627 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
628 return CreateGCStatepointCallCommon
<Value
*, Value
*, Value
*, Value
*>(
629 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
630 CallArgs
, None
/* No Transition Args */, DeoptArgs
, GCArgs
, Name
);
633 CallInst
*IRBuilderBase::CreateGCStatepointCall(
634 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
, uint32_t Flags
,
635 ArrayRef
<Use
> CallArgs
, ArrayRef
<Use
> TransitionArgs
,
636 ArrayRef
<Use
> DeoptArgs
, ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
637 return CreateGCStatepointCallCommon
<Use
, Use
, Use
, Value
*>(
638 this, ID
, NumPatchBytes
, ActualCallee
, Flags
, CallArgs
, TransitionArgs
,
639 DeoptArgs
, GCArgs
, Name
);
642 CallInst
*IRBuilderBase::CreateGCStatepointCall(
643 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
,
644 ArrayRef
<Use
> CallArgs
, ArrayRef
<Value
*> DeoptArgs
,
645 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
646 return CreateGCStatepointCallCommon
<Use
, Value
*, Value
*, Value
*>(
647 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
648 CallArgs
, None
, DeoptArgs
, GCArgs
, Name
);
651 template <typename T0
, typename T1
, typename T2
, typename T3
>
652 static InvokeInst
*CreateGCStatepointInvokeCommon(
653 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
654 Value
*ActualInvokee
, BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
655 uint32_t Flags
, ArrayRef
<T0
> InvokeArgs
, ArrayRef
<T1
> TransitionArgs
,
656 ArrayRef
<T2
> DeoptArgs
, ArrayRef
<T3
> GCArgs
, const Twine
&Name
) {
657 // Extract out the type of the callee.
658 auto *FuncPtrType
= cast
<PointerType
>(ActualInvokee
->getType());
659 assert(isa
<FunctionType
>(FuncPtrType
->getElementType()) &&
660 "actual callee must be a callable value");
662 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
663 // Fill in the one generic type'd argument (the function is also vararg)
664 Function
*FnStatepoint
= Intrinsic::getDeclaration(
665 M
, Intrinsic::experimental_gc_statepoint
, {FuncPtrType
});
667 std::vector
<Value
*> Args
=
668 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualInvokee
, Flags
,
669 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
);
670 return createInvokeHelper(FnStatepoint
, NormalDest
, UnwindDest
, Args
, Builder
,
674 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
675 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
676 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
677 ArrayRef
<Value
*> InvokeArgs
, ArrayRef
<Value
*> DeoptArgs
,
678 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
679 return CreateGCStatepointInvokeCommon
<Value
*, Value
*, Value
*, Value
*>(
680 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
681 uint32_t(StatepointFlags::None
), InvokeArgs
, None
/* No Transition Args*/,
682 DeoptArgs
, GCArgs
, Name
);
685 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
686 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
687 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, uint32_t Flags
,
688 ArrayRef
<Use
> InvokeArgs
, ArrayRef
<Use
> TransitionArgs
,
689 ArrayRef
<Use
> DeoptArgs
, ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
690 return CreateGCStatepointInvokeCommon
<Use
, Use
, Use
, Value
*>(
691 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
, Flags
,
692 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
, Name
);
695 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
696 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
697 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, ArrayRef
<Use
> InvokeArgs
,
698 ArrayRef
<Value
*> DeoptArgs
, ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
699 return CreateGCStatepointInvokeCommon
<Use
, Value
*, Value
*, Value
*>(
700 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
701 uint32_t(StatepointFlags::None
), InvokeArgs
, None
, DeoptArgs
, GCArgs
,
705 CallInst
*IRBuilderBase::CreateGCResult(Instruction
*Statepoint
,
708 Intrinsic::ID ID
= Intrinsic::experimental_gc_result
;
709 Module
*M
= BB
->getParent()->getParent();
710 Type
*Types
[] = {ResultType
};
711 Function
*FnGCResult
= Intrinsic::getDeclaration(M
, ID
, Types
);
713 Value
*Args
[] = {Statepoint
};
714 return createCallHelper(FnGCResult
, Args
, this, Name
);
717 CallInst
*IRBuilderBase::CreateGCRelocate(Instruction
*Statepoint
,
722 Module
*M
= BB
->getParent()->getParent();
723 Type
*Types
[] = {ResultType
};
724 Function
*FnGCRelocate
=
725 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_relocate
, Types
);
727 Value
*Args
[] = {Statepoint
,
728 getInt32(BaseOffset
),
729 getInt32(DerivedOffset
)};
730 return createCallHelper(FnGCRelocate
, Args
, this, Name
);
733 CallInst
*IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID
, Value
*V
,
734 Instruction
*FMFSource
,
736 Module
*M
= BB
->getModule();
737 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, {V
->getType()});
738 return createCallHelper(Fn
, {V
}, this, Name
, FMFSource
);
741 CallInst
*IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID
, Value
*LHS
,
743 Instruction
*FMFSource
,
745 Module
*M
= BB
->getModule();
746 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, { LHS
->getType() });
747 return createCallHelper(Fn
, {LHS
, RHS
}, this, Name
, FMFSource
);
750 CallInst
*IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID
,
751 ArrayRef
<Type
*> Types
,
752 ArrayRef
<Value
*> Args
,
753 Instruction
*FMFSource
,
755 Module
*M
= BB
->getModule();
756 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, Types
);
757 return createCallHelper(Fn
, Args
, this, Name
, FMFSource
);