1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/MathExtras.h"
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
43 GlobalVariable
*IRBuilderBase::CreateGlobalString(StringRef Str
,
45 unsigned AddressSpace
,
47 Constant
*StrConstant
= ConstantDataArray::getString(Context
, Str
);
49 M
= BB
->getParent()->getParent();
50 auto *GV
= new GlobalVariable(
51 *M
, StrConstant
->getType(), true, GlobalValue::PrivateLinkage
,
52 StrConstant
, Name
, nullptr, GlobalVariable::NotThreadLocal
, AddressSpace
);
53 GV
->setUnnamedAddr(GlobalValue::UnnamedAddr::Global
);
54 GV
->setAlignment(Align(1));
58 Type
*IRBuilderBase::getCurrentFunctionReturnType() const {
59 assert(BB
&& BB
->getParent() && "No current function!");
60 return BB
->getParent()->getReturnType();
63 Value
*IRBuilderBase::getCastedInt8PtrValue(Value
*Ptr
) {
64 auto *PT
= cast
<PointerType
>(Ptr
->getType());
65 if (PT
->isOpaqueOrPointeeTypeMatches(getInt8Ty()))
68 // Otherwise, we need to insert a bitcast.
69 return CreateBitCast(Ptr
, getInt8PtrTy(PT
->getAddressSpace()));
72 static CallInst
*createCallHelper(Function
*Callee
, ArrayRef
<Value
*> Ops
,
73 IRBuilderBase
*Builder
,
74 const Twine
&Name
= "",
75 Instruction
*FMFSource
= nullptr,
76 ArrayRef
<OperandBundleDef
> OpBundles
= {}) {
77 CallInst
*CI
= Builder
->CreateCall(Callee
, Ops
, OpBundles
, Name
);
79 CI
->copyFastMathFlags(FMFSource
);
83 Value
*IRBuilderBase::CreateVScale(Constant
*Scaling
, const Twine
&Name
) {
84 assert(isa
<ConstantInt
>(Scaling
) && "Expected constant integer");
85 if (cast
<ConstantInt
>(Scaling
)->isZero())
87 Module
*M
= GetInsertBlock()->getParent()->getParent();
89 Intrinsic::getDeclaration(M
, Intrinsic::vscale
, {Scaling
->getType()});
90 CallInst
*CI
= createCallHelper(TheFn
, {}, this, Name
);
91 return cast
<ConstantInt
>(Scaling
)->getSExtValue() == 1
93 : CreateMul(CI
, Scaling
);
96 Value
*IRBuilderBase::CreateStepVector(Type
*DstType
, const Twine
&Name
) {
97 if (isa
<ScalableVectorType
>(DstType
))
98 return CreateIntrinsic(Intrinsic::experimental_stepvector
, {DstType
}, {},
101 Type
*STy
= DstType
->getScalarType();
102 unsigned NumEls
= cast
<FixedVectorType
>(DstType
)->getNumElements();
104 // Create a vector of consecutive numbers from zero to VF.
105 SmallVector
<Constant
*, 8> Indices
;
106 for (unsigned i
= 0; i
< NumEls
; ++i
)
107 Indices
.push_back(ConstantInt::get(STy
, i
));
109 // Add the consecutive indices to the vector value.
110 return ConstantVector::get(Indices
);
113 CallInst
*IRBuilderBase::CreateMemSet(Value
*Ptr
, Value
*Val
, Value
*Size
,
114 MaybeAlign Align
, bool isVolatile
,
115 MDNode
*TBAATag
, MDNode
*ScopeTag
,
116 MDNode
*NoAliasTag
) {
117 Ptr
= getCastedInt8PtrValue(Ptr
);
118 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt1(isVolatile
)};
119 Type
*Tys
[] = { Ptr
->getType(), Size
->getType() };
120 Module
*M
= BB
->getParent()->getParent();
121 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memset
, Tys
);
123 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
126 cast
<MemSetInst
>(CI
)->setDestAlignment(Align
->value());
128 // Set the TBAA info if present.
130 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
133 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
136 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
141 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemSet(
142 Value
*Ptr
, Value
*Val
, Value
*Size
, Align Alignment
, uint32_t ElementSize
,
143 MDNode
*TBAATag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
145 Ptr
= getCastedInt8PtrValue(Ptr
);
146 Value
*Ops
[] = {Ptr
, Val
, Size
, getInt32(ElementSize
)};
147 Type
*Tys
[] = {Ptr
->getType(), Size
->getType()};
148 Module
*M
= BB
->getParent()->getParent();
149 Function
*TheFn
= Intrinsic::getDeclaration(
150 M
, Intrinsic::memset_element_unordered_atomic
, Tys
);
152 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
154 cast
<AtomicMemSetInst
>(CI
)->setDestAlignment(Alignment
);
156 // Set the TBAA info if present.
158 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
161 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
164 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
169 CallInst
*IRBuilderBase::CreateMemTransferInst(
170 Intrinsic::ID IntrID
, Value
*Dst
, MaybeAlign DstAlign
, Value
*Src
,
171 MaybeAlign SrcAlign
, Value
*Size
, bool isVolatile
, MDNode
*TBAATag
,
172 MDNode
*TBAAStructTag
, MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
173 Dst
= getCastedInt8PtrValue(Dst
);
174 Src
= getCastedInt8PtrValue(Src
);
176 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
177 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
178 Module
*M
= BB
->getParent()->getParent();
179 Function
*TheFn
= Intrinsic::getDeclaration(M
, IntrID
, Tys
);
181 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
183 auto* MCI
= cast
<MemTransferInst
>(CI
);
185 MCI
->setDestAlignment(*DstAlign
);
187 MCI
->setSourceAlignment(*SrcAlign
);
189 // Set the TBAA info if present.
191 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
193 // Set the TBAA Struct info if present.
195 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
198 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
201 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
206 CallInst
*IRBuilderBase::CreateMemCpyInline(
207 Value
*Dst
, MaybeAlign DstAlign
, Value
*Src
, MaybeAlign SrcAlign
,
208 Value
*Size
, bool IsVolatile
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
209 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
210 Dst
= getCastedInt8PtrValue(Dst
);
211 Src
= getCastedInt8PtrValue(Src
);
213 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(IsVolatile
)};
214 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
215 Function
*F
= BB
->getParent();
216 Module
*M
= F
->getParent();
217 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memcpy_inline
, Tys
);
219 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
221 auto *MCI
= cast
<MemCpyInlineInst
>(CI
);
223 MCI
->setDestAlignment(*DstAlign
);
225 MCI
->setSourceAlignment(*SrcAlign
);
227 // Set the TBAA info if present.
229 MCI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
231 // Set the TBAA Struct info if present.
233 MCI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
236 MCI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
239 MCI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
244 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
245 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
246 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
247 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
248 assert(DstAlign
>= ElementSize
&&
249 "Pointer alignment must be at least element size");
250 assert(SrcAlign
>= ElementSize
&&
251 "Pointer alignment must be at least element size");
252 Dst
= getCastedInt8PtrValue(Dst
);
253 Src
= getCastedInt8PtrValue(Src
);
255 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
256 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
257 Module
*M
= BB
->getParent()->getParent();
258 Function
*TheFn
= Intrinsic::getDeclaration(
259 M
, Intrinsic::memcpy_element_unordered_atomic
, Tys
);
261 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
263 // Set the alignment of the pointer args.
264 auto *AMCI
= cast
<AtomicMemCpyInst
>(CI
);
265 AMCI
->setDestAlignment(DstAlign
);
266 AMCI
->setSourceAlignment(SrcAlign
);
268 // Set the TBAA info if present.
270 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
272 // Set the TBAA Struct info if present.
274 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
277 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
280 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
285 CallInst
*IRBuilderBase::CreateMemMove(Value
*Dst
, MaybeAlign DstAlign
,
286 Value
*Src
, MaybeAlign SrcAlign
,
287 Value
*Size
, bool isVolatile
,
288 MDNode
*TBAATag
, MDNode
*ScopeTag
,
289 MDNode
*NoAliasTag
) {
290 Dst
= getCastedInt8PtrValue(Dst
);
291 Src
= getCastedInt8PtrValue(Src
);
293 Value
*Ops
[] = {Dst
, Src
, Size
, getInt1(isVolatile
)};
294 Type
*Tys
[] = { Dst
->getType(), Src
->getType(), Size
->getType() };
295 Module
*M
= BB
->getParent()->getParent();
296 Function
*TheFn
= Intrinsic::getDeclaration(M
, Intrinsic::memmove
, Tys
);
298 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
300 auto *MMI
= cast
<MemMoveInst
>(CI
);
302 MMI
->setDestAlignment(*DstAlign
);
304 MMI
->setSourceAlignment(*SrcAlign
);
306 // Set the TBAA info if present.
308 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
311 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
314 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
319 CallInst
*IRBuilderBase::CreateElementUnorderedAtomicMemMove(
320 Value
*Dst
, Align DstAlign
, Value
*Src
, Align SrcAlign
, Value
*Size
,
321 uint32_t ElementSize
, MDNode
*TBAATag
, MDNode
*TBAAStructTag
,
322 MDNode
*ScopeTag
, MDNode
*NoAliasTag
) {
323 assert(DstAlign
>= ElementSize
&&
324 "Pointer alignment must be at least element size");
325 assert(SrcAlign
>= ElementSize
&&
326 "Pointer alignment must be at least element size");
327 Dst
= getCastedInt8PtrValue(Dst
);
328 Src
= getCastedInt8PtrValue(Src
);
330 Value
*Ops
[] = {Dst
, Src
, Size
, getInt32(ElementSize
)};
331 Type
*Tys
[] = {Dst
->getType(), Src
->getType(), Size
->getType()};
332 Module
*M
= BB
->getParent()->getParent();
333 Function
*TheFn
= Intrinsic::getDeclaration(
334 M
, Intrinsic::memmove_element_unordered_atomic
, Tys
);
336 CallInst
*CI
= createCallHelper(TheFn
, Ops
, this);
338 // Set the alignment of the pointer args.
339 CI
->addParamAttr(0, Attribute::getWithAlignment(CI
->getContext(), DstAlign
));
340 CI
->addParamAttr(1, Attribute::getWithAlignment(CI
->getContext(), SrcAlign
));
342 // Set the TBAA info if present.
344 CI
->setMetadata(LLVMContext::MD_tbaa
, TBAATag
);
346 // Set the TBAA Struct info if present.
348 CI
->setMetadata(LLVMContext::MD_tbaa_struct
, TBAAStructTag
);
351 CI
->setMetadata(LLVMContext::MD_alias_scope
, ScopeTag
);
354 CI
->setMetadata(LLVMContext::MD_noalias
, NoAliasTag
);
359 static CallInst
*getReductionIntrinsic(IRBuilderBase
*Builder
, Intrinsic::ID ID
,
361 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
362 Value
*Ops
[] = {Src
};
363 Type
*Tys
[] = { Src
->getType() };
364 auto Decl
= Intrinsic::getDeclaration(M
, ID
, Tys
);
365 return createCallHelper(Decl
, Ops
, Builder
);
368 CallInst
*IRBuilderBase::CreateFAddReduce(Value
*Acc
, Value
*Src
) {
369 Module
*M
= GetInsertBlock()->getParent()->getParent();
370 Value
*Ops
[] = {Acc
, Src
};
371 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fadd
,
373 return createCallHelper(Decl
, Ops
, this);
376 CallInst
*IRBuilderBase::CreateFMulReduce(Value
*Acc
, Value
*Src
) {
377 Module
*M
= GetInsertBlock()->getParent()->getParent();
378 Value
*Ops
[] = {Acc
, Src
};
379 auto Decl
= Intrinsic::getDeclaration(M
, Intrinsic::vector_reduce_fmul
,
381 return createCallHelper(Decl
, Ops
, this);
384 CallInst
*IRBuilderBase::CreateAddReduce(Value
*Src
) {
385 return getReductionIntrinsic(this, Intrinsic::vector_reduce_add
, Src
);
388 CallInst
*IRBuilderBase::CreateMulReduce(Value
*Src
) {
389 return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul
, Src
);
392 CallInst
*IRBuilderBase::CreateAndReduce(Value
*Src
) {
393 return getReductionIntrinsic(this, Intrinsic::vector_reduce_and
, Src
);
396 CallInst
*IRBuilderBase::CreateOrReduce(Value
*Src
) {
397 return getReductionIntrinsic(this, Intrinsic::vector_reduce_or
, Src
);
400 CallInst
*IRBuilderBase::CreateXorReduce(Value
*Src
) {
401 return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor
, Src
);
404 CallInst
*IRBuilderBase::CreateIntMaxReduce(Value
*Src
, bool IsSigned
) {
406 IsSigned
? Intrinsic::vector_reduce_smax
: Intrinsic::vector_reduce_umax
;
407 return getReductionIntrinsic(this, ID
, Src
);
410 CallInst
*IRBuilderBase::CreateIntMinReduce(Value
*Src
, bool IsSigned
) {
412 IsSigned
? Intrinsic::vector_reduce_smin
: Intrinsic::vector_reduce_umin
;
413 return getReductionIntrinsic(this, ID
, Src
);
416 CallInst
*IRBuilderBase::CreateFPMaxReduce(Value
*Src
) {
417 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax
, Src
);
420 CallInst
*IRBuilderBase::CreateFPMinReduce(Value
*Src
) {
421 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin
, Src
);
424 CallInst
*IRBuilderBase::CreateLifetimeStart(Value
*Ptr
, ConstantInt
*Size
) {
425 assert(isa
<PointerType
>(Ptr
->getType()) &&
426 "lifetime.start only applies to pointers.");
427 Ptr
= getCastedInt8PtrValue(Ptr
);
431 assert(Size
->getType() == getInt64Ty() &&
432 "lifetime.start requires the size to be an i64");
433 Value
*Ops
[] = { Size
, Ptr
};
434 Module
*M
= BB
->getParent()->getParent();
436 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_start
, {Ptr
->getType()});
437 return createCallHelper(TheFn
, Ops
, this);
440 CallInst
*IRBuilderBase::CreateLifetimeEnd(Value
*Ptr
, ConstantInt
*Size
) {
441 assert(isa
<PointerType
>(Ptr
->getType()) &&
442 "lifetime.end only applies to pointers.");
443 Ptr
= getCastedInt8PtrValue(Ptr
);
447 assert(Size
->getType() == getInt64Ty() &&
448 "lifetime.end requires the size to be an i64");
449 Value
*Ops
[] = { Size
, Ptr
};
450 Module
*M
= BB
->getParent()->getParent();
452 Intrinsic::getDeclaration(M
, Intrinsic::lifetime_end
, {Ptr
->getType()});
453 return createCallHelper(TheFn
, Ops
, this);
456 CallInst
*IRBuilderBase::CreateInvariantStart(Value
*Ptr
, ConstantInt
*Size
) {
458 assert(isa
<PointerType
>(Ptr
->getType()) &&
459 "invariant.start only applies to pointers.");
460 Ptr
= getCastedInt8PtrValue(Ptr
);
464 assert(Size
->getType() == getInt64Ty() &&
465 "invariant.start requires the size to be an i64");
467 Value
*Ops
[] = {Size
, Ptr
};
468 // Fill in the single overloaded type: memory object type.
469 Type
*ObjectPtr
[1] = {Ptr
->getType()};
470 Module
*M
= BB
->getParent()->getParent();
472 Intrinsic::getDeclaration(M
, Intrinsic::invariant_start
, ObjectPtr
);
473 return createCallHelper(TheFn
, Ops
, this);
477 IRBuilderBase::CreateAssumption(Value
*Cond
,
478 ArrayRef
<OperandBundleDef
> OpBundles
) {
479 assert(Cond
->getType() == getInt1Ty() &&
480 "an assumption condition must be of type i1");
482 Value
*Ops
[] = { Cond
};
483 Module
*M
= BB
->getParent()->getParent();
484 Function
*FnAssume
= Intrinsic::getDeclaration(M
, Intrinsic::assume
);
485 return createCallHelper(FnAssume
, Ops
, this, "", nullptr, OpBundles
);
488 Instruction
*IRBuilderBase::CreateNoAliasScopeDeclaration(Value
*Scope
) {
489 Module
*M
= BB
->getModule();
490 auto *FnIntrinsic
= Intrinsic::getDeclaration(
491 M
, Intrinsic::experimental_noalias_scope_decl
, {});
492 return createCallHelper(FnIntrinsic
, {Scope
}, this);
495 /// Create a call to a Masked Load intrinsic.
496 /// \p Ty - vector type to load
497 /// \p Ptr - base pointer for the load
498 /// \p Alignment - alignment of the source location
499 /// \p Mask - vector of booleans which indicates what vector lanes should
500 /// be accessed in memory
501 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
503 /// \p Name - name of the result variable
504 CallInst
*IRBuilderBase::CreateMaskedLoad(Type
*Ty
, Value
*Ptr
, Align Alignment
,
505 Value
*Mask
, Value
*PassThru
,
507 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
508 assert(Ty
->isVectorTy() && "Type should be vector");
509 assert(PtrTy
->isOpaqueOrPointeeTypeMatches(Ty
) && "Wrong element type");
510 assert(Mask
&& "Mask should not be all-ones (null)");
512 PassThru
= UndefValue::get(Ty
);
513 Type
*OverloadedTypes
[] = { Ty
, PtrTy
};
514 Value
*Ops
[] = {Ptr
, getInt32(Alignment
.value()), Mask
, PassThru
};
515 return CreateMaskedIntrinsic(Intrinsic::masked_load
, Ops
,
516 OverloadedTypes
, Name
);
519 /// Create a call to a Masked Store intrinsic.
520 /// \p Val - data to be stored,
521 /// \p Ptr - base pointer for the store
522 /// \p Alignment - alignment of the destination location
523 /// \p Mask - vector of booleans which indicates what vector lanes should
524 /// be accessed in memory
525 CallInst
*IRBuilderBase::CreateMaskedStore(Value
*Val
, Value
*Ptr
,
526 Align Alignment
, Value
*Mask
) {
527 auto *PtrTy
= cast
<PointerType
>(Ptr
->getType());
528 Type
*DataTy
= Val
->getType();
529 assert(DataTy
->isVectorTy() && "Val should be a vector");
530 assert(PtrTy
->isOpaqueOrPointeeTypeMatches(DataTy
) && "Wrong element type");
531 assert(Mask
&& "Mask should not be all-ones (null)");
532 Type
*OverloadedTypes
[] = { DataTy
, PtrTy
};
533 Value
*Ops
[] = {Val
, Ptr
, getInt32(Alignment
.value()), Mask
};
534 return CreateMaskedIntrinsic(Intrinsic::masked_store
, Ops
, OverloadedTypes
);
537 /// Create a call to a Masked intrinsic, with given intrinsic Id,
538 /// an array of operands - Ops, and an array of overloaded types -
540 CallInst
*IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id
,
541 ArrayRef
<Value
*> Ops
,
542 ArrayRef
<Type
*> OverloadedTypes
,
544 Module
*M
= BB
->getParent()->getParent();
545 Function
*TheFn
= Intrinsic::getDeclaration(M
, Id
, OverloadedTypes
);
546 return createCallHelper(TheFn
, Ops
, this, Name
);
549 /// Create a call to a Masked Gather intrinsic.
550 /// \p Ty - vector type to gather
551 /// \p Ptrs - vector of pointers for loading
552 /// \p Align - alignment for one element
553 /// \p Mask - vector of booleans which indicates what vector lanes should
554 /// be accessed in memory
555 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
557 /// \p Name - name of the result variable
558 CallInst
*IRBuilderBase::CreateMaskedGather(Type
*Ty
, Value
*Ptrs
,
559 Align Alignment
, Value
*Mask
,
562 auto *VecTy
= cast
<VectorType
>(Ty
);
563 ElementCount NumElts
= VecTy
->getElementCount();
564 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
565 assert(cast
<PointerType
>(PtrsTy
->getElementType())
566 ->isOpaqueOrPointeeTypeMatches(
567 cast
<VectorType
>(Ty
)->getElementType()) &&
568 "Element type mismatch");
569 assert(NumElts
== PtrsTy
->getElementCount() && "Element count mismatch");
572 Mask
= Constant::getAllOnesValue(
573 VectorType::get(Type::getInt1Ty(Context
), NumElts
));
576 PassThru
= UndefValue::get(Ty
);
578 Type
*OverloadedTypes
[] = {Ty
, PtrsTy
};
579 Value
*Ops
[] = {Ptrs
, getInt32(Alignment
.value()), Mask
, PassThru
};
581 // We specify only one type when we create this intrinsic. Types of other
582 // arguments are derived from this type.
583 return CreateMaskedIntrinsic(Intrinsic::masked_gather
, Ops
, OverloadedTypes
,
587 /// Create a call to a Masked Scatter intrinsic.
588 /// \p Data - data to be stored,
589 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
591 /// \p Align - alignment for one element
592 /// \p Mask - vector of booleans which indicates what vector lanes should
593 /// be accessed in memory
594 CallInst
*IRBuilderBase::CreateMaskedScatter(Value
*Data
, Value
*Ptrs
,
595 Align Alignment
, Value
*Mask
) {
596 auto *PtrsTy
= cast
<VectorType
>(Ptrs
->getType());
597 auto *DataTy
= cast
<VectorType
>(Data
->getType());
598 ElementCount NumElts
= PtrsTy
->getElementCount();
601 auto *PtrTy
= cast
<PointerType
>(PtrsTy
->getElementType());
602 assert(NumElts
== DataTy
->getElementCount() &&
603 PtrTy
->isOpaqueOrPointeeTypeMatches(DataTy
->getElementType()) &&
604 "Incompatible pointer and data types");
608 Mask
= Constant::getAllOnesValue(
609 VectorType::get(Type::getInt1Ty(Context
), NumElts
));
611 Type
*OverloadedTypes
[] = {DataTy
, PtrsTy
};
612 Value
*Ops
[] = {Data
, Ptrs
, getInt32(Alignment
.value()), Mask
};
614 // We specify only one type when we create this intrinsic. Types of other
615 // arguments are derived from this type.
616 return CreateMaskedIntrinsic(Intrinsic::masked_scatter
, Ops
, OverloadedTypes
);
619 template <typename T0
>
620 static std::vector
<Value
*>
621 getStatepointArgs(IRBuilderBase
&B
, uint64_t ID
, uint32_t NumPatchBytes
,
622 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
) {
623 std::vector
<Value
*> Args
;
624 Args
.push_back(B
.getInt64(ID
));
625 Args
.push_back(B
.getInt32(NumPatchBytes
));
626 Args
.push_back(ActualCallee
);
627 Args
.push_back(B
.getInt32(CallArgs
.size()));
628 Args
.push_back(B
.getInt32(Flags
));
629 llvm::append_range(Args
, CallArgs
);
630 // GC Transition and Deopt args are now always handled via operand bundle.
631 // They will be removed from the signature of gc.statepoint shortly.
632 Args
.push_back(B
.getInt32(0));
633 Args
.push_back(B
.getInt32(0));
634 // GC args are now encoded in the gc-live operand bundle
638 template<typename T1
, typename T2
, typename T3
>
639 static std::vector
<OperandBundleDef
>
640 getStatepointBundles(Optional
<ArrayRef
<T1
>> TransitionArgs
,
641 Optional
<ArrayRef
<T2
>> DeoptArgs
,
642 ArrayRef
<T3
> GCArgs
) {
643 std::vector
<OperandBundleDef
> Rval
;
645 SmallVector
<Value
*, 16> DeoptValues
;
646 llvm::append_range(DeoptValues
, *DeoptArgs
);
647 Rval
.emplace_back("deopt", DeoptValues
);
649 if (TransitionArgs
) {
650 SmallVector
<Value
*, 16> TransitionValues
;
651 llvm::append_range(TransitionValues
, *TransitionArgs
);
652 Rval
.emplace_back("gc-transition", TransitionValues
);
655 SmallVector
<Value
*, 16> LiveValues
;
656 llvm::append_range(LiveValues
, GCArgs
);
657 Rval
.emplace_back("gc-live", LiveValues
);
662 template <typename T0
, typename T1
, typename T2
, typename T3
>
663 static CallInst
*CreateGCStatepointCallCommon(
664 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
665 Value
*ActualCallee
, uint32_t Flags
, ArrayRef
<T0
> CallArgs
,
666 Optional
<ArrayRef
<T1
>> TransitionArgs
,
667 Optional
<ArrayRef
<T2
>> DeoptArgs
, ArrayRef
<T3
> GCArgs
,
669 // Extract out the type of the callee.
670 auto *FuncPtrType
= cast
<PointerType
>(ActualCallee
->getType());
671 assert(isa
<FunctionType
>(FuncPtrType
->getElementType()) &&
672 "actual callee must be a callable value");
674 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
675 // Fill in the one generic type'd argument (the function is also vararg)
676 Type
*ArgTypes
[] = { FuncPtrType
};
677 Function
*FnStatepoint
=
678 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_statepoint
,
681 std::vector
<Value
*> Args
=
682 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualCallee
, Flags
,
685 return Builder
->CreateCall(FnStatepoint
, Args
,
686 getStatepointBundles(TransitionArgs
, DeoptArgs
,
691 CallInst
*IRBuilderBase::CreateGCStatepointCall(
692 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
,
693 ArrayRef
<Value
*> CallArgs
, Optional
<ArrayRef
<Value
*>> DeoptArgs
,
694 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
695 return CreateGCStatepointCallCommon
<Value
*, Value
*, Value
*, Value
*>(
696 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
697 CallArgs
, None
/* No Transition Args */, DeoptArgs
, GCArgs
, Name
);
700 CallInst
*IRBuilderBase::CreateGCStatepointCall(
701 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
, uint32_t Flags
,
702 ArrayRef
<Value
*> CallArgs
, Optional
<ArrayRef
<Use
>> TransitionArgs
,
703 Optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
,
705 return CreateGCStatepointCallCommon
<Value
*, Use
, Use
, Value
*>(
706 this, ID
, NumPatchBytes
, ActualCallee
, Flags
, CallArgs
, TransitionArgs
,
707 DeoptArgs
, GCArgs
, Name
);
710 CallInst
*IRBuilderBase::CreateGCStatepointCall(
711 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualCallee
,
712 ArrayRef
<Use
> CallArgs
, Optional
<ArrayRef
<Value
*>> DeoptArgs
,
713 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
714 return CreateGCStatepointCallCommon
<Use
, Value
*, Value
*, Value
*>(
715 this, ID
, NumPatchBytes
, ActualCallee
, uint32_t(StatepointFlags::None
),
716 CallArgs
, None
, DeoptArgs
, GCArgs
, Name
);
719 template <typename T0
, typename T1
, typename T2
, typename T3
>
720 static InvokeInst
*CreateGCStatepointInvokeCommon(
721 IRBuilderBase
*Builder
, uint64_t ID
, uint32_t NumPatchBytes
,
722 Value
*ActualInvokee
, BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
723 uint32_t Flags
, ArrayRef
<T0
> InvokeArgs
,
724 Optional
<ArrayRef
<T1
>> TransitionArgs
, Optional
<ArrayRef
<T2
>> DeoptArgs
,
725 ArrayRef
<T3
> GCArgs
, const Twine
&Name
) {
726 // Extract out the type of the callee.
727 auto *FuncPtrType
= cast
<PointerType
>(ActualInvokee
->getType());
728 assert(isa
<FunctionType
>(FuncPtrType
->getElementType()) &&
729 "actual callee must be a callable value");
731 Module
*M
= Builder
->GetInsertBlock()->getParent()->getParent();
732 // Fill in the one generic type'd argument (the function is also vararg)
733 Function
*FnStatepoint
= Intrinsic::getDeclaration(
734 M
, Intrinsic::experimental_gc_statepoint
, {FuncPtrType
});
736 std::vector
<Value
*> Args
=
737 getStatepointArgs(*Builder
, ID
, NumPatchBytes
, ActualInvokee
, Flags
,
740 return Builder
->CreateInvoke(FnStatepoint
, NormalDest
, UnwindDest
, Args
,
741 getStatepointBundles(TransitionArgs
, DeoptArgs
,
746 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
747 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
748 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
,
749 ArrayRef
<Value
*> InvokeArgs
, Optional
<ArrayRef
<Value
*>> DeoptArgs
,
750 ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
751 return CreateGCStatepointInvokeCommon
<Value
*, Value
*, Value
*, Value
*>(
752 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
753 uint32_t(StatepointFlags::None
), InvokeArgs
, None
/* No Transition Args*/,
754 DeoptArgs
, GCArgs
, Name
);
757 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
758 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
759 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, uint32_t Flags
,
760 ArrayRef
<Value
*> InvokeArgs
, Optional
<ArrayRef
<Use
>> TransitionArgs
,
761 Optional
<ArrayRef
<Use
>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
762 return CreateGCStatepointInvokeCommon
<Value
*, Use
, Use
, Value
*>(
763 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
, Flags
,
764 InvokeArgs
, TransitionArgs
, DeoptArgs
, GCArgs
, Name
);
767 InvokeInst
*IRBuilderBase::CreateGCStatepointInvoke(
768 uint64_t ID
, uint32_t NumPatchBytes
, Value
*ActualInvokee
,
769 BasicBlock
*NormalDest
, BasicBlock
*UnwindDest
, ArrayRef
<Use
> InvokeArgs
,
770 Optional
<ArrayRef
<Value
*>> DeoptArgs
, ArrayRef
<Value
*> GCArgs
, const Twine
&Name
) {
771 return CreateGCStatepointInvokeCommon
<Use
, Value
*, Value
*, Value
*>(
772 this, ID
, NumPatchBytes
, ActualInvokee
, NormalDest
, UnwindDest
,
773 uint32_t(StatepointFlags::None
), InvokeArgs
, None
, DeoptArgs
, GCArgs
,
777 CallInst
*IRBuilderBase::CreateGCResult(Instruction
*Statepoint
,
780 Intrinsic::ID ID
= Intrinsic::experimental_gc_result
;
781 Module
*M
= BB
->getParent()->getParent();
782 Type
*Types
[] = {ResultType
};
783 Function
*FnGCResult
= Intrinsic::getDeclaration(M
, ID
, Types
);
785 Value
*Args
[] = {Statepoint
};
786 return createCallHelper(FnGCResult
, Args
, this, Name
);
789 CallInst
*IRBuilderBase::CreateGCRelocate(Instruction
*Statepoint
,
794 Module
*M
= BB
->getParent()->getParent();
795 Type
*Types
[] = {ResultType
};
796 Function
*FnGCRelocate
=
797 Intrinsic::getDeclaration(M
, Intrinsic::experimental_gc_relocate
, Types
);
799 Value
*Args
[] = {Statepoint
,
800 getInt32(BaseOffset
),
801 getInt32(DerivedOffset
)};
802 return createCallHelper(FnGCRelocate
, Args
, this, Name
);
805 CallInst
*IRBuilderBase::CreateGCGetPointerBase(Value
*DerivedPtr
,
807 Module
*M
= BB
->getParent()->getParent();
808 Type
*PtrTy
= DerivedPtr
->getType();
809 Function
*FnGCFindBase
= Intrinsic::getDeclaration(
810 M
, Intrinsic::experimental_gc_get_pointer_base
, {PtrTy
, PtrTy
});
811 return createCallHelper(FnGCFindBase
, {DerivedPtr
}, this, Name
);
814 CallInst
*IRBuilderBase::CreateGCGetPointerOffset(Value
*DerivedPtr
,
816 Module
*M
= BB
->getParent()->getParent();
817 Type
*PtrTy
= DerivedPtr
->getType();
818 Function
*FnGCGetOffset
= Intrinsic::getDeclaration(
819 M
, Intrinsic::experimental_gc_get_pointer_offset
, {PtrTy
});
820 return createCallHelper(FnGCGetOffset
, {DerivedPtr
}, this, Name
);
823 CallInst
*IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID
, Value
*V
,
824 Instruction
*FMFSource
,
826 Module
*M
= BB
->getModule();
827 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, {V
->getType()});
828 return createCallHelper(Fn
, {V
}, this, Name
, FMFSource
);
831 CallInst
*IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID
, Value
*LHS
,
833 Instruction
*FMFSource
,
835 Module
*M
= BB
->getModule();
836 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, { LHS
->getType() });
837 return createCallHelper(Fn
, {LHS
, RHS
}, this, Name
, FMFSource
);
840 CallInst
*IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID
,
841 ArrayRef
<Type
*> Types
,
842 ArrayRef
<Value
*> Args
,
843 Instruction
*FMFSource
,
845 Module
*M
= BB
->getModule();
846 Function
*Fn
= Intrinsic::getDeclaration(M
, ID
, Types
);
847 return createCallHelper(Fn
, Args
, this, Name
, FMFSource
);
850 CallInst
*IRBuilderBase::CreateConstrainedFPBinOp(
851 Intrinsic::ID ID
, Value
*L
, Value
*R
, Instruction
*FMFSource
,
852 const Twine
&Name
, MDNode
*FPMathTag
,
853 Optional
<RoundingMode
> Rounding
,
854 Optional
<fp::ExceptionBehavior
> Except
) {
855 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
856 Value
*ExceptV
= getConstrainedFPExcept(Except
);
858 FastMathFlags UseFMF
= FMF
;
860 UseFMF
= FMFSource
->getFastMathFlags();
862 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
863 {L
, R
, RoundingV
, ExceptV
}, nullptr, Name
);
864 setConstrainedFPCallAttr(C
);
865 setFPAttrs(C
, FPMathTag
, UseFMF
);
869 Value
*IRBuilderBase::CreateNAryOp(unsigned Opc
, ArrayRef
<Value
*> Ops
,
870 const Twine
&Name
, MDNode
*FPMathTag
) {
871 if (Instruction::isBinaryOp(Opc
)) {
872 assert(Ops
.size() == 2 && "Invalid number of operands!");
873 return CreateBinOp(static_cast<Instruction::BinaryOps
>(Opc
),
874 Ops
[0], Ops
[1], Name
, FPMathTag
);
876 if (Instruction::isUnaryOp(Opc
)) {
877 assert(Ops
.size() == 1 && "Invalid number of operands!");
878 return CreateUnOp(static_cast<Instruction::UnaryOps
>(Opc
),
879 Ops
[0], Name
, FPMathTag
);
881 llvm_unreachable("Unexpected opcode!");
884 CallInst
*IRBuilderBase::CreateConstrainedFPCast(
885 Intrinsic::ID ID
, Value
*V
, Type
*DestTy
,
886 Instruction
*FMFSource
, const Twine
&Name
, MDNode
*FPMathTag
,
887 Optional
<RoundingMode
> Rounding
,
888 Optional
<fp::ExceptionBehavior
> Except
) {
889 Value
*ExceptV
= getConstrainedFPExcept(Except
);
891 FastMathFlags UseFMF
= FMF
;
893 UseFMF
= FMFSource
->getFastMathFlags();
896 bool HasRoundingMD
= false;
900 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
901 case Intrinsic::INTRINSIC: \
902 HasRoundingMD = ROUND_MODE; \
904 #include "llvm/IR/ConstrainedOps.def"
907 Value
*RoundingV
= getConstrainedFPRounding(Rounding
);
908 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, RoundingV
, ExceptV
},
911 C
= CreateIntrinsic(ID
, {DestTy
, V
->getType()}, {V
, ExceptV
}, nullptr,
914 setConstrainedFPCallAttr(C
);
916 if (isa
<FPMathOperator
>(C
))
917 setFPAttrs(C
, FPMathTag
, UseFMF
);
921 Value
*IRBuilderBase::CreateFCmpHelper(
922 CmpInst::Predicate P
, Value
*LHS
, Value
*RHS
, const Twine
&Name
,
923 MDNode
*FPMathTag
, bool IsSignaling
) {
924 if (IsFPConstrained
) {
925 auto ID
= IsSignaling
? Intrinsic::experimental_constrained_fcmps
926 : Intrinsic::experimental_constrained_fcmp
;
927 return CreateConstrainedFPCmp(ID
, P
, LHS
, RHS
, Name
);
930 if (auto *LC
= dyn_cast
<Constant
>(LHS
))
931 if (auto *RC
= dyn_cast
<Constant
>(RHS
))
932 return Insert(Folder
.CreateFCmp(P
, LC
, RC
), Name
);
933 return Insert(setFPAttrs(new FCmpInst(P
, LHS
, RHS
), FPMathTag
, FMF
), Name
);
936 CallInst
*IRBuilderBase::CreateConstrainedFPCmp(
937 Intrinsic::ID ID
, CmpInst::Predicate P
, Value
*L
, Value
*R
,
938 const Twine
&Name
, Optional
<fp::ExceptionBehavior
> Except
) {
939 Value
*PredicateV
= getConstrainedFPPredicate(P
);
940 Value
*ExceptV
= getConstrainedFPExcept(Except
);
942 CallInst
*C
= CreateIntrinsic(ID
, {L
->getType()},
943 {L
, R
, PredicateV
, ExceptV
}, nullptr, Name
);
944 setConstrainedFPCallAttr(C
);
948 CallInst
*IRBuilderBase::CreateConstrainedFPCall(
949 Function
*Callee
, ArrayRef
<Value
*> Args
, const Twine
&Name
,
950 Optional
<RoundingMode
> Rounding
,
951 Optional
<fp::ExceptionBehavior
> Except
) {
952 llvm::SmallVector
<Value
*, 6> UseArgs
;
954 append_range(UseArgs
, Args
);
955 bool HasRoundingMD
= false;
956 switch (Callee
->getIntrinsicID()) {
959 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
960 case Intrinsic::INTRINSIC: \
961 HasRoundingMD = ROUND_MODE; \
963 #include "llvm/IR/ConstrainedOps.def"
966 UseArgs
.push_back(getConstrainedFPRounding(Rounding
));
967 UseArgs
.push_back(getConstrainedFPExcept(Except
));
969 CallInst
*C
= CreateCall(Callee
, UseArgs
, Name
);
970 setConstrainedFPCallAttr(C
);
974 Value
*IRBuilderBase::CreateSelect(Value
*C
, Value
*True
, Value
*False
,
975 const Twine
&Name
, Instruction
*MDFrom
) {
976 if (auto *CC
= dyn_cast
<Constant
>(C
))
977 if (auto *TC
= dyn_cast
<Constant
>(True
))
978 if (auto *FC
= dyn_cast
<Constant
>(False
))
979 return Insert(Folder
.CreateSelect(CC
, TC
, FC
), Name
);
981 SelectInst
*Sel
= SelectInst::Create(C
, True
, False
);
983 MDNode
*Prof
= MDFrom
->getMetadata(LLVMContext::MD_prof
);
984 MDNode
*Unpred
= MDFrom
->getMetadata(LLVMContext::MD_unpredictable
);
985 Sel
= addBranchMetadata(Sel
, Prof
, Unpred
);
987 if (isa
<FPMathOperator
>(Sel
))
988 setFPAttrs(Sel
, nullptr /* MDNode* */, FMF
);
989 return Insert(Sel
, Name
);
992 Value
*IRBuilderBase::CreatePtrDiff(Value
*LHS
, Value
*RHS
,
994 assert(LHS
->getType() == RHS
->getType() &&
995 "Pointer subtraction operand types must match!");
996 auto *ArgType
= cast
<PointerType
>(LHS
->getType());
997 Value
*LHS_int
= CreatePtrToInt(LHS
, Type::getInt64Ty(Context
));
998 Value
*RHS_int
= CreatePtrToInt(RHS
, Type::getInt64Ty(Context
));
999 Value
*Difference
= CreateSub(LHS_int
, RHS_int
);
1000 return CreateExactSDiv(Difference
,
1001 ConstantExpr::getSizeOf(ArgType
->getElementType()),
1005 Value
*IRBuilderBase::CreateLaunderInvariantGroup(Value
*Ptr
) {
1006 assert(isa
<PointerType
>(Ptr
->getType()) &&
1007 "launder.invariant.group only applies to pointers.");
1008 // FIXME: we could potentially avoid casts to/from i8*.
1009 auto *PtrType
= Ptr
->getType();
1010 auto *Int8PtrTy
= getInt8PtrTy(PtrType
->getPointerAddressSpace());
1011 if (PtrType
!= Int8PtrTy
)
1012 Ptr
= CreateBitCast(Ptr
, Int8PtrTy
);
1013 Module
*M
= BB
->getParent()->getParent();
1014 Function
*FnLaunderInvariantGroup
= Intrinsic::getDeclaration(
1015 M
, Intrinsic::launder_invariant_group
, {Int8PtrTy
});
1017 assert(FnLaunderInvariantGroup
->getReturnType() == Int8PtrTy
&&
1018 FnLaunderInvariantGroup
->getFunctionType()->getParamType(0) ==
1020 "LaunderInvariantGroup should take and return the same type");
1022 CallInst
*Fn
= CreateCall(FnLaunderInvariantGroup
, {Ptr
});
1024 if (PtrType
!= Int8PtrTy
)
1025 return CreateBitCast(Fn
, PtrType
);
1029 Value
*IRBuilderBase::CreateStripInvariantGroup(Value
*Ptr
) {
1030 assert(isa
<PointerType
>(Ptr
->getType()) &&
1031 "strip.invariant.group only applies to pointers.");
1033 // FIXME: we could potentially avoid casts to/from i8*.
1034 auto *PtrType
= Ptr
->getType();
1035 auto *Int8PtrTy
= getInt8PtrTy(PtrType
->getPointerAddressSpace());
1036 if (PtrType
!= Int8PtrTy
)
1037 Ptr
= CreateBitCast(Ptr
, Int8PtrTy
);
1038 Module
*M
= BB
->getParent()->getParent();
1039 Function
*FnStripInvariantGroup
= Intrinsic::getDeclaration(
1040 M
, Intrinsic::strip_invariant_group
, {Int8PtrTy
});
1042 assert(FnStripInvariantGroup
->getReturnType() == Int8PtrTy
&&
1043 FnStripInvariantGroup
->getFunctionType()->getParamType(0) ==
1045 "StripInvariantGroup should take and return the same type");
1047 CallInst
*Fn
= CreateCall(FnStripInvariantGroup
, {Ptr
});
1049 if (PtrType
!= Int8PtrTy
)
1050 return CreateBitCast(Fn
, PtrType
);
1054 Value
*IRBuilderBase::CreateVectorReverse(Value
*V
, const Twine
&Name
) {
1055 auto *Ty
= cast
<VectorType
>(V
->getType());
1056 if (isa
<ScalableVectorType
>(Ty
)) {
1057 Module
*M
= BB
->getParent()->getParent();
1058 Function
*F
= Intrinsic::getDeclaration(
1059 M
, Intrinsic::experimental_vector_reverse
, Ty
);
1060 return Insert(CallInst::Create(F
, V
), Name
);
1062 // Keep the original behaviour for fixed vector
1063 SmallVector
<int, 8> ShuffleMask
;
1064 int NumElts
= Ty
->getElementCount().getKnownMinValue();
1065 for (int i
= 0; i
< NumElts
; ++i
)
1066 ShuffleMask
.push_back(NumElts
- i
- 1);
1067 return CreateShuffleVector(V
, ShuffleMask
, Name
);
1070 Value
*IRBuilderBase::CreateVectorSplice(Value
*V1
, Value
*V2
, int64_t Imm
,
1071 const Twine
&Name
) {
1072 assert(isa
<VectorType
>(V1
->getType()) && "Unexpected type");
1073 assert(V1
->getType() == V2
->getType() &&
1074 "Splice expects matching operand types!");
1076 if (auto *VTy
= dyn_cast
<ScalableVectorType
>(V1
->getType())) {
1077 Module
*M
= BB
->getParent()->getParent();
1078 Function
*F
= Intrinsic::getDeclaration(
1079 M
, Intrinsic::experimental_vector_splice
, VTy
);
1081 Value
*Ops
[] = {V1
, V2
, getInt32(Imm
)};
1082 return Insert(CallInst::Create(F
, Ops
), Name
);
1085 unsigned NumElts
= cast
<FixedVectorType
>(V1
->getType())->getNumElements();
1086 assert(((-Imm
<= NumElts
) || (Imm
< NumElts
)) &&
1087 "Invalid immediate for vector splice!");
1089 // Keep the original behaviour for fixed vector
1090 unsigned Idx
= (NumElts
+ Imm
) % NumElts
;
1091 SmallVector
<int, 8> Mask
;
1092 for (unsigned I
= 0; I
< NumElts
; ++I
)
1093 Mask
.push_back(Idx
+ I
);
1095 return CreateShuffleVector(V1
, V2
, Mask
);
1098 Value
*IRBuilderBase::CreateVectorSplat(unsigned NumElts
, Value
*V
,
1099 const Twine
&Name
) {
1100 auto EC
= ElementCount::getFixed(NumElts
);
1101 return CreateVectorSplat(EC
, V
, Name
);
1104 Value
*IRBuilderBase::CreateVectorSplat(ElementCount EC
, Value
*V
,
1105 const Twine
&Name
) {
1106 assert(EC
.isNonZero() && "Cannot splat to an empty vector!");
1108 // First insert it into a poison vector so we can shuffle it.
1109 Type
*I32Ty
= getInt32Ty();
1110 Value
*Poison
= PoisonValue::get(VectorType::get(V
->getType(), EC
));
1111 V
= CreateInsertElement(Poison
, V
, ConstantInt::get(I32Ty
, 0),
1112 Name
+ ".splatinsert");
1114 // Shuffle the value across the desired number of elements.
1115 SmallVector
<int, 16> Zeros
;
1116 Zeros
.resize(EC
.getKnownMinValue());
1117 return CreateShuffleVector(V
, Zeros
, Name
+ ".splat");
1120 Value
*IRBuilderBase::CreateExtractInteger(
1121 const DataLayout
&DL
, Value
*From
, IntegerType
*ExtractedTy
,
1122 uint64_t Offset
, const Twine
&Name
) {
1123 auto *IntTy
= cast
<IntegerType
>(From
->getType());
1124 assert(DL
.getTypeStoreSize(ExtractedTy
) + Offset
<=
1125 DL
.getTypeStoreSize(IntTy
) &&
1126 "Element extends past full value");
1127 uint64_t ShAmt
= 8 * Offset
;
1129 if (DL
.isBigEndian())
1130 ShAmt
= 8 * (DL
.getTypeStoreSize(IntTy
) -
1131 DL
.getTypeStoreSize(ExtractedTy
) - Offset
);
1133 V
= CreateLShr(V
, ShAmt
, Name
+ ".shift");
1135 assert(ExtractedTy
->getBitWidth() <= IntTy
->getBitWidth() &&
1136 "Cannot extract to a larger integer!");
1137 if (ExtractedTy
!= IntTy
) {
1138 V
= CreateTrunc(V
, ExtractedTy
, Name
+ ".trunc");
1143 Value
*IRBuilderBase::CreatePreserveArrayAccessIndex(
1144 Type
*ElTy
, Value
*Base
, unsigned Dimension
, unsigned LastIndex
,
1146 auto *BaseType
= Base
->getType();
1147 assert(isa
<PointerType
>(BaseType
) &&
1148 "Invalid Base ptr type for preserve.array.access.index.");
1149 assert(cast
<PointerType
>(BaseType
)->isOpaqueOrPointeeTypeMatches(ElTy
) &&
1150 "Pointer element type mismatch");
1152 Value
*LastIndexV
= getInt32(LastIndex
);
1153 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1154 SmallVector
<Value
*, 4> IdxList(Dimension
, Zero
);
1155 IdxList
.push_back(LastIndexV
);
1158 GetElementPtrInst::getGEPReturnType(ElTy
, Base
, IdxList
);
1160 Module
*M
= BB
->getParent()->getParent();
1161 Function
*FnPreserveArrayAccessIndex
= Intrinsic::getDeclaration(
1162 M
, Intrinsic::preserve_array_access_index
, {ResultType
, BaseType
});
1164 Value
*DimV
= getInt32(Dimension
);
1166 CreateCall(FnPreserveArrayAccessIndex
, {Base
, DimV
, LastIndexV
});
1168 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1170 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1175 Value
*IRBuilderBase::CreatePreserveUnionAccessIndex(
1176 Value
*Base
, unsigned FieldIndex
, MDNode
*DbgInfo
) {
1177 assert(isa
<PointerType
>(Base
->getType()) &&
1178 "Invalid Base ptr type for preserve.union.access.index.");
1179 auto *BaseType
= Base
->getType();
1181 Module
*M
= BB
->getParent()->getParent();
1182 Function
*FnPreserveUnionAccessIndex
= Intrinsic::getDeclaration(
1183 M
, Intrinsic::preserve_union_access_index
, {BaseType
, BaseType
});
1185 Value
*DIIndex
= getInt32(FieldIndex
);
1187 CreateCall(FnPreserveUnionAccessIndex
, {Base
, DIIndex
});
1189 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1194 Value
*IRBuilderBase::CreatePreserveStructAccessIndex(
1195 Type
*ElTy
, Value
*Base
, unsigned Index
, unsigned FieldIndex
,
1197 auto *BaseType
= Base
->getType();
1198 assert(isa
<PointerType
>(BaseType
) &&
1199 "Invalid Base ptr type for preserve.struct.access.index.");
1200 assert(cast
<PointerType
>(BaseType
)->isOpaqueOrPointeeTypeMatches(ElTy
) &&
1201 "Pointer element type mismatch");
1203 Value
*GEPIndex
= getInt32(Index
);
1204 Constant
*Zero
= ConstantInt::get(Type::getInt32Ty(Context
), 0);
1206 GetElementPtrInst::getGEPReturnType(ElTy
, Base
, {Zero
, GEPIndex
});
1208 Module
*M
= BB
->getParent()->getParent();
1209 Function
*FnPreserveStructAccessIndex
= Intrinsic::getDeclaration(
1210 M
, Intrinsic::preserve_struct_access_index
, {ResultType
, BaseType
});
1212 Value
*DIIndex
= getInt32(FieldIndex
);
1213 CallInst
*Fn
= CreateCall(FnPreserveStructAccessIndex
,
1214 {Base
, GEPIndex
, DIIndex
});
1216 0, Attribute::get(Fn
->getContext(), Attribute::ElementType
, ElTy
));
1218 Fn
->setMetadata(LLVMContext::MD_preserve_access_index
, DbgInfo
);
1223 CallInst
*IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout
&DL
,
1226 Value
*OffsetValue
) {
1227 SmallVector
<Value
*, 4> Vals({PtrValue
, AlignValue
});
1229 Vals
.push_back(OffsetValue
);
1230 OperandBundleDefT
<Value
*> AlignOpB("align", Vals
);
1231 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB
});
1234 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1237 Value
*OffsetValue
) {
1238 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1239 "trying to create an alignment assumption on a non-pointer?");
1240 assert(Alignment
!= 0 && "Invalid Alignment");
1241 auto *PtrTy
= cast
<PointerType
>(PtrValue
->getType());
1242 Type
*IntPtrTy
= getIntPtrTy(DL
, PtrTy
->getAddressSpace());
1243 Value
*AlignValue
= ConstantInt::get(IntPtrTy
, Alignment
);
1244 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, AlignValue
, OffsetValue
);
1247 CallInst
*IRBuilderBase::CreateAlignmentAssumption(const DataLayout
&DL
,
1250 Value
*OffsetValue
) {
1251 assert(isa
<PointerType
>(PtrValue
->getType()) &&
1252 "trying to create an alignment assumption on a non-pointer?");
1253 return CreateAlignmentAssumptionHelper(DL
, PtrValue
, Alignment
, OffsetValue
);
1256 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
1257 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
1258 IRBuilderFolder::~IRBuilderFolder() {}
1259 void ConstantFolder::anchor() {}
1260 void NoFolder::anchor() {}