1 //===- InferAddressSpace.cpp - --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
25 // For example, the Clang translates the following CUDA code
26 // __shared__ float a[10];
29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 // %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 // CUDA: Compiling and optimizing for a GPU platform
48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 // %generic.input = addrspacecast float addrspace(3)* %input to float*
60 // %y = phi [ %generic.input, %y2 ]
61 // %y2 = getelementptr %y, 1
63 // br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 // uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "poison" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `poison` and fix all the uses later.
82 // For instance, our algorithm first converts %y to
83 // %y' = phi float addrspace(3)* [ %input, poison ]
84 // Then, it converts %y2 to
85 // %y2' = getelementptr %y', 1
86 // Finally, it fixes the poison in %y' so that
87 // %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //===----------------------------------------------------------------------===//
91 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/SetVector.h"
96 #include "llvm/ADT/SmallVector.h"
97 #include "llvm/Analysis/AssumptionCache.h"
98 #include "llvm/Analysis/TargetTransformInfo.h"
99 #include "llvm/Analysis/ValueTracking.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Dominators.h"
104 #include "llvm/IR/Function.h"
105 #include "llvm/IR/IRBuilder.h"
106 #include "llvm/IR/InstIterator.h"
107 #include "llvm/IR/Instruction.h"
108 #include "llvm/IR/Instructions.h"
109 #include "llvm/IR/IntrinsicInst.h"
110 #include "llvm/IR/Intrinsics.h"
111 #include "llvm/IR/LLVMContext.h"
112 #include "llvm/IR/Operator.h"
113 #include "llvm/IR/PassManager.h"
114 #include "llvm/IR/Type.h"
115 #include "llvm/IR/Use.h"
116 #include "llvm/IR/User.h"
117 #include "llvm/IR/Value.h"
118 #include "llvm/IR/ValueHandle.h"
119 #include "llvm/InitializePasses.h"
120 #include "llvm/Pass.h"
121 #include "llvm/Support/Casting.h"
122 #include "llvm/Support/CommandLine.h"
123 #include "llvm/Support/Debug.h"
124 #include "llvm/Support/ErrorHandling.h"
125 #include "llvm/Support/raw_ostream.h"
126 #include "llvm/Transforms/Scalar.h"
127 #include "llvm/Transforms/Utils/Local.h"
128 #include "llvm/Transforms/Utils/ValueMapper.h"
135 #define DEBUG_TYPE "infer-address-spaces"
137 using namespace llvm
;
139 static cl::opt
<bool> AssumeDefaultIsFlatAddressSpace(
140 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden
,
141 cl::desc("The default address space is assumed as the flat address space. "
142 "This is mainly for test purpose."));
144 static const unsigned UninitializedAddressSpace
=
145 std::numeric_limits
<unsigned>::max();
149 using ValueToAddrSpaceMapTy
= DenseMap
<const Value
*, unsigned>;
150 // Different from ValueToAddrSpaceMapTy, where a new addrspace is inferred on
151 // the *def* of a value, PredicatedAddrSpaceMapTy is map where a new
152 // addrspace is inferred on the *use* of a pointer. This map is introduced to
153 // infer addrspace from the addrspace predicate assumption built from assume
154 // intrinsic. In that scenario, only specific uses (under valid assumption
155 // context) could be inferred with a new addrspace.
156 using PredicatedAddrSpaceMapTy
=
157 DenseMap
<std::pair
<const Value
*, const Value
*>, unsigned>;
158 using PostorderStackTy
= llvm::SmallVector
<PointerIntPair
<Value
*, 1, bool>, 4>;
160 class InferAddressSpaces
: public FunctionPass
{
161 unsigned FlatAddrSpace
= 0;
167 : FunctionPass(ID
), FlatAddrSpace(UninitializedAddressSpace
) {
168 initializeInferAddressSpacesPass(*PassRegistry::getPassRegistry());
170 InferAddressSpaces(unsigned AS
) : FunctionPass(ID
), FlatAddrSpace(AS
) {
171 initializeInferAddressSpacesPass(*PassRegistry::getPassRegistry());
174 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
175 AU
.setPreservesCFG();
176 AU
.addPreserved
<DominatorTreeWrapperPass
>();
177 AU
.addRequired
<AssumptionCacheTracker
>();
178 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
181 bool runOnFunction(Function
&F
) override
;
184 class InferAddressSpacesImpl
{
186 Function
*F
= nullptr;
187 const DominatorTree
*DT
= nullptr;
188 const TargetTransformInfo
*TTI
= nullptr;
189 const DataLayout
*DL
= nullptr;
191 /// Target specific address space which uses of should be replaced if
193 unsigned FlatAddrSpace
= 0;
195 // Try to update the address space of V. If V is updated, returns true and
197 bool updateAddressSpace(const Value
&V
,
198 ValueToAddrSpaceMapTy
&InferredAddrSpace
,
199 PredicatedAddrSpaceMapTy
&PredicatedAS
) const;
201 // Tries to infer the specific address space of each address expression in
203 void inferAddressSpaces(ArrayRef
<WeakTrackingVH
> Postorder
,
204 ValueToAddrSpaceMapTy
&InferredAddrSpace
,
205 PredicatedAddrSpaceMapTy
&PredicatedAS
) const;
207 bool isSafeToCastConstAddrSpace(Constant
*C
, unsigned NewAS
) const;
209 Value
*cloneInstructionWithNewAddressSpace(
210 Instruction
*I
, unsigned NewAddrSpace
,
211 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
212 const PredicatedAddrSpaceMapTy
&PredicatedAS
,
213 SmallVectorImpl
<const Use
*> *PoisonUsesToFix
) const;
215 void performPointerReplacement(
216 Value
*V
, Value
*NewV
, Use
&U
, ValueToValueMapTy
&ValueWithNewAddrSpace
,
217 SmallVectorImpl
<Instruction
*> &DeadInstructions
) const;
219 // Changes the flat address expressions in function F to point to specific
220 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
221 // all flat expressions in the use-def graph of function F.
222 bool rewriteWithNewAddressSpaces(
223 ArrayRef
<WeakTrackingVH
> Postorder
,
224 const ValueToAddrSpaceMapTy
&InferredAddrSpace
,
225 const PredicatedAddrSpaceMapTy
&PredicatedAS
) const;
227 void appendsFlatAddressExpressionToPostorderStack(
228 Value
*V
, PostorderStackTy
&PostorderStack
,
229 DenseSet
<Value
*> &Visited
) const;
231 bool rewriteIntrinsicOperands(IntrinsicInst
*II
, Value
*OldV
,
233 void collectRewritableIntrinsicOperands(IntrinsicInst
*II
,
234 PostorderStackTy
&PostorderStack
,
235 DenseSet
<Value
*> &Visited
) const;
237 std::vector
<WeakTrackingVH
> collectFlatAddressExpressions(Function
&F
) const;
239 Value
*cloneValueWithNewAddressSpace(
240 Value
*V
, unsigned NewAddrSpace
,
241 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
242 const PredicatedAddrSpaceMapTy
&PredicatedAS
,
243 SmallVectorImpl
<const Use
*> *PoisonUsesToFix
) const;
244 unsigned joinAddressSpaces(unsigned AS1
, unsigned AS2
) const;
246 unsigned getPredicatedAddrSpace(const Value
&PtrV
,
247 const Value
*UserCtx
) const;
250 InferAddressSpacesImpl(AssumptionCache
&AC
, const DominatorTree
*DT
,
251 const TargetTransformInfo
*TTI
, unsigned FlatAddrSpace
)
252 : AC(AC
), DT(DT
), TTI(TTI
), FlatAddrSpace(FlatAddrSpace
) {}
253 bool run(Function
&F
);
256 } // end anonymous namespace
258 char InferAddressSpaces::ID
= 0;
260 INITIALIZE_PASS_BEGIN(InferAddressSpaces
, DEBUG_TYPE
, "Infer address spaces",
262 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
263 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
264 INITIALIZE_PASS_END(InferAddressSpaces
, DEBUG_TYPE
, "Infer address spaces",
267 static Type
*getPtrOrVecOfPtrsWithNewAS(Type
*Ty
, unsigned NewAddrSpace
) {
268 assert(Ty
->isPtrOrPtrVectorTy());
269 PointerType
*NPT
= PointerType::get(Ty
->getContext(), NewAddrSpace
);
270 return Ty
->getWithNewType(NPT
);
273 // Check whether that's no-op pointer bicast using a pair of
274 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
275 // different address spaces.
276 static bool isNoopPtrIntCastPair(const Operator
*I2P
, const DataLayout
&DL
,
277 const TargetTransformInfo
*TTI
) {
278 assert(I2P
->getOpcode() == Instruction::IntToPtr
);
279 auto *P2I
= dyn_cast
<Operator
>(I2P
->getOperand(0));
280 if (!P2I
|| P2I
->getOpcode() != Instruction::PtrToInt
)
282 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a
283 // no-op cast. Besides checking both of them are no-op casts, as the
284 // reinterpreted pointer may be used in other pointer arithmetic, we also
285 // need to double-check that through the target-specific hook. That ensures
286 // the underlying target also agrees that's a no-op address space cast and
287 // pointer bits are preserved.
288 // The current IR spec doesn't have clear rules on address space casts,
289 // especially a clear definition for pointer bits in non-default address
290 // spaces. It would be undefined if that pointer is dereferenced after an
291 // invalid reinterpret cast. Also, due to the unclearness for the meaning of
292 // bits in non-default address spaces in the current spec, the pointer
293 // arithmetic may also be undefined after invalid pointer reinterpret cast.
294 // However, as we confirm through the target hooks that it's a no-op
295 // addrspacecast, it doesn't matter since the bits should be the same.
296 unsigned P2IOp0AS
= P2I
->getOperand(0)->getType()->getPointerAddressSpace();
297 unsigned I2PAS
= I2P
->getType()->getPointerAddressSpace();
298 return CastInst::isNoopCast(Instruction::CastOps(I2P
->getOpcode()),
299 I2P
->getOperand(0)->getType(), I2P
->getType(),
301 CastInst::isNoopCast(Instruction::CastOps(P2I
->getOpcode()),
302 P2I
->getOperand(0)->getType(), P2I
->getType(),
304 (P2IOp0AS
== I2PAS
|| TTI
->isNoopAddrSpaceCast(P2IOp0AS
, I2PAS
));
307 // Returns true if V is an address expression.
308 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
309 // getelementptr operators.
310 static bool isAddressExpression(const Value
&V
, const DataLayout
&DL
,
311 const TargetTransformInfo
*TTI
) {
312 const Operator
*Op
= dyn_cast
<Operator
>(&V
);
316 switch (Op
->getOpcode()) {
317 case Instruction::PHI
:
318 assert(Op
->getType()->isPtrOrPtrVectorTy());
320 case Instruction::BitCast
:
321 case Instruction::AddrSpaceCast
:
322 case Instruction::GetElementPtr
:
324 case Instruction::Select
:
325 return Op
->getType()->isPtrOrPtrVectorTy();
326 case Instruction::Call
: {
327 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(&V
);
328 return II
&& II
->getIntrinsicID() == Intrinsic::ptrmask
;
330 case Instruction::IntToPtr
:
331 return isNoopPtrIntCastPair(Op
, DL
, TTI
);
333 // That value is an address expression if it has an assumed address space.
334 return TTI
->getAssumedAddrSpace(&V
) != UninitializedAddressSpace
;
338 // Returns the pointer operands of V.
340 // Precondition: V is an address expression.
341 static SmallVector
<Value
*, 2>
342 getPointerOperands(const Value
&V
, const DataLayout
&DL
,
343 const TargetTransformInfo
*TTI
) {
344 const Operator
&Op
= cast
<Operator
>(V
);
345 switch (Op
.getOpcode()) {
346 case Instruction::PHI
: {
347 auto IncomingValues
= cast
<PHINode
>(Op
).incoming_values();
348 return {IncomingValues
.begin(), IncomingValues
.end()};
350 case Instruction::BitCast
:
351 case Instruction::AddrSpaceCast
:
352 case Instruction::GetElementPtr
:
353 return {Op
.getOperand(0)};
354 case Instruction::Select
:
355 return {Op
.getOperand(1), Op
.getOperand(2)};
356 case Instruction::Call
: {
357 const IntrinsicInst
&II
= cast
<IntrinsicInst
>(Op
);
358 assert(II
.getIntrinsicID() == Intrinsic::ptrmask
&&
359 "unexpected intrinsic call");
360 return {II
.getArgOperand(0)};
362 case Instruction::IntToPtr
: {
363 assert(isNoopPtrIntCastPair(&Op
, DL
, TTI
));
364 auto *P2I
= cast
<Operator
>(Op
.getOperand(0));
365 return {P2I
->getOperand(0)};
368 llvm_unreachable("Unexpected instruction type.");
372 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst
*II
,
375 Module
*M
= II
->getParent()->getParent()->getParent();
376 Intrinsic::ID IID
= II
->getIntrinsicID();
378 case Intrinsic::objectsize
:
379 case Intrinsic::masked_load
: {
380 Type
*DestTy
= II
->getType();
381 Type
*SrcTy
= NewV
->getType();
383 Intrinsic::getOrInsertDeclaration(M
, IID
, {DestTy
, SrcTy
});
384 II
->setArgOperand(0, NewV
);
385 II
->setCalledFunction(NewDecl
);
388 case Intrinsic::ptrmask
:
389 // This is handled as an address expression, not as a use memory operation.
391 case Intrinsic::masked_gather
: {
392 Type
*RetTy
= II
->getType();
393 Type
*NewPtrTy
= NewV
->getType();
395 Intrinsic::getOrInsertDeclaration(M
, IID
, {RetTy
, NewPtrTy
});
396 II
->setArgOperand(0, NewV
);
397 II
->setCalledFunction(NewDecl
);
400 case Intrinsic::masked_store
:
401 case Intrinsic::masked_scatter
: {
402 Type
*ValueTy
= II
->getOperand(0)->getType();
403 Type
*NewPtrTy
= NewV
->getType();
404 Function
*NewDecl
= Intrinsic::getOrInsertDeclaration(
405 M
, II
->getIntrinsicID(), {ValueTy
, NewPtrTy
});
406 II
->setArgOperand(1, NewV
);
407 II
->setCalledFunction(NewDecl
);
410 case Intrinsic::prefetch
:
411 case Intrinsic::is_constant
: {
412 Function
*NewDecl
= Intrinsic::getOrInsertDeclaration(
413 M
, II
->getIntrinsicID(), {NewV
->getType()});
414 II
->setArgOperand(0, NewV
);
415 II
->setCalledFunction(NewDecl
);
418 case Intrinsic::fake_use
: {
419 II
->replaceUsesOfWith(OldV
, NewV
);
423 Value
*Rewrite
= TTI
->rewriteIntrinsicWithAddressSpace(II
, OldV
, NewV
);
427 II
->replaceAllUsesWith(Rewrite
);
433 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
434 IntrinsicInst
*II
, PostorderStackTy
&PostorderStack
,
435 DenseSet
<Value
*> &Visited
) const {
436 auto IID
= II
->getIntrinsicID();
438 case Intrinsic::ptrmask
:
439 case Intrinsic::objectsize
:
440 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(0),
441 PostorderStack
, Visited
);
443 case Intrinsic::is_constant
: {
444 Value
*Ptr
= II
->getArgOperand(0);
445 if (Ptr
->getType()->isPtrOrPtrVectorTy()) {
446 appendsFlatAddressExpressionToPostorderStack(Ptr
, PostorderStack
,
452 case Intrinsic::masked_load
:
453 case Intrinsic::masked_gather
:
454 case Intrinsic::prefetch
:
455 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(0),
456 PostorderStack
, Visited
);
458 case Intrinsic::masked_store
:
459 case Intrinsic::masked_scatter
:
460 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(1),
461 PostorderStack
, Visited
);
463 case Intrinsic::fake_use
: {
464 for (Value
*Op
: II
->operands()) {
465 if (Op
->getType()->isPtrOrPtrVectorTy()) {
466 appendsFlatAddressExpressionToPostorderStack(Op
, PostorderStack
,
474 SmallVector
<int, 2> OpIndexes
;
475 if (TTI
->collectFlatAddressOperands(OpIndexes
, IID
)) {
476 for (int Idx
: OpIndexes
) {
477 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(Idx
),
478 PostorderStack
, Visited
);
485 // Returns all flat address expressions in function F. The elements are
486 // If V is an unvisited flat address expression, appends V to PostorderStack
487 // and marks it as visited.
488 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack(
489 Value
*V
, PostorderStackTy
&PostorderStack
,
490 DenseSet
<Value
*> &Visited
) const {
491 assert(V
->getType()->isPtrOrPtrVectorTy());
493 // Generic addressing expressions may be hidden in nested constant
495 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
)) {
496 // TODO: Look in non-address parts, like icmp operands.
497 if (isAddressExpression(*CE
, *DL
, TTI
) && Visited
.insert(CE
).second
)
498 PostorderStack
.emplace_back(CE
, false);
503 if (V
->getType()->getPointerAddressSpace() == FlatAddrSpace
&&
504 isAddressExpression(*V
, *DL
, TTI
)) {
505 if (Visited
.insert(V
).second
) {
506 PostorderStack
.emplace_back(V
, false);
508 Operator
*Op
= cast
<Operator
>(V
);
509 for (unsigned I
= 0, E
= Op
->getNumOperands(); I
!= E
; ++I
) {
510 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
->getOperand(I
))) {
511 if (isAddressExpression(*CE
, *DL
, TTI
) && Visited
.insert(CE
).second
)
512 PostorderStack
.emplace_back(CE
, false);
519 // Returns all flat address expressions in function F. The elements are ordered
521 std::vector
<WeakTrackingVH
>
522 InferAddressSpacesImpl::collectFlatAddressExpressions(Function
&F
) const {
523 // This function implements a non-recursive postorder traversal of a partial
524 // use-def graph of function F.
525 PostorderStackTy PostorderStack
;
526 // The set of visited expressions.
527 DenseSet
<Value
*> Visited
;
529 auto PushPtrOperand
= [&](Value
*Ptr
) {
530 appendsFlatAddressExpressionToPostorderStack(Ptr
, PostorderStack
, Visited
);
533 // Look at operations that may be interesting accelerate by moving to a known
534 // address space. We aim at generating after loads and stores, but pure
535 // addressing calculations may also be faster.
536 for (Instruction
&I
: instructions(F
)) {
537 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(&I
)) {
538 PushPtrOperand(GEP
->getPointerOperand());
539 } else if (auto *LI
= dyn_cast
<LoadInst
>(&I
))
540 PushPtrOperand(LI
->getPointerOperand());
541 else if (auto *SI
= dyn_cast
<StoreInst
>(&I
))
542 PushPtrOperand(SI
->getPointerOperand());
543 else if (auto *RMW
= dyn_cast
<AtomicRMWInst
>(&I
))
544 PushPtrOperand(RMW
->getPointerOperand());
545 else if (auto *CmpX
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
546 PushPtrOperand(CmpX
->getPointerOperand());
547 else if (auto *MI
= dyn_cast
<MemIntrinsic
>(&I
)) {
548 // For memset/memcpy/memmove, any pointer operand can be replaced.
549 PushPtrOperand(MI
->getRawDest());
551 // Handle 2nd operand for memcpy/memmove.
552 if (auto *MTI
= dyn_cast
<MemTransferInst
>(MI
))
553 PushPtrOperand(MTI
->getRawSource());
554 } else if (auto *II
= dyn_cast
<IntrinsicInst
>(&I
))
555 collectRewritableIntrinsicOperands(II
, PostorderStack
, Visited
);
556 else if (ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(&I
)) {
557 if (Cmp
->getOperand(0)->getType()->isPtrOrPtrVectorTy()) {
558 PushPtrOperand(Cmp
->getOperand(0));
559 PushPtrOperand(Cmp
->getOperand(1));
561 } else if (auto *ASC
= dyn_cast
<AddrSpaceCastInst
>(&I
)) {
562 PushPtrOperand(ASC
->getPointerOperand());
563 } else if (auto *I2P
= dyn_cast
<IntToPtrInst
>(&I
)) {
564 if (isNoopPtrIntCastPair(cast
<Operator
>(I2P
), *DL
, TTI
))
565 PushPtrOperand(cast
<Operator
>(I2P
->getOperand(0))->getOperand(0));
566 } else if (auto *RI
= dyn_cast
<ReturnInst
>(&I
)) {
567 if (auto *RV
= RI
->getReturnValue();
568 RV
&& RV
->getType()->isPtrOrPtrVectorTy())
573 std::vector
<WeakTrackingVH
> Postorder
; // The resultant postorder.
574 while (!PostorderStack
.empty()) {
575 Value
*TopVal
= PostorderStack
.back().getPointer();
576 // If the operands of the expression on the top are already explored,
577 // adds that expression to the resultant postorder.
578 if (PostorderStack
.back().getInt()) {
579 if (TopVal
->getType()->getPointerAddressSpace() == FlatAddrSpace
)
580 Postorder
.push_back(TopVal
);
581 PostorderStack
.pop_back();
584 // Otherwise, adds its operands to the stack and explores them.
585 PostorderStack
.back().setInt(true);
586 // Skip values with an assumed address space.
587 if (TTI
->getAssumedAddrSpace(TopVal
) == UninitializedAddressSpace
) {
588 for (Value
*PtrOperand
: getPointerOperands(*TopVal
, *DL
, TTI
)) {
589 appendsFlatAddressExpressionToPostorderStack(PtrOperand
, PostorderStack
,
597 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
598 // of OperandUse.get() in the new address space. If the clone is not ready yet,
599 // returns poison in the new address space as a placeholder.
600 static Value
*operandWithNewAddressSpaceOrCreatePoison(
601 const Use
&OperandUse
, unsigned NewAddrSpace
,
602 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
603 const PredicatedAddrSpaceMapTy
&PredicatedAS
,
604 SmallVectorImpl
<const Use
*> *PoisonUsesToFix
) {
605 Value
*Operand
= OperandUse
.get();
607 Type
*NewPtrTy
= getPtrOrVecOfPtrsWithNewAS(Operand
->getType(), NewAddrSpace
);
609 if (Constant
*C
= dyn_cast
<Constant
>(Operand
))
610 return ConstantExpr::getAddrSpaceCast(C
, NewPtrTy
);
612 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(Operand
))
615 Instruction
*Inst
= cast
<Instruction
>(OperandUse
.getUser());
616 auto I
= PredicatedAS
.find(std::make_pair(Inst
, Operand
));
617 if (I
!= PredicatedAS
.end()) {
618 // Insert an addrspacecast on that operand before the user.
619 unsigned NewAS
= I
->second
;
620 Type
*NewPtrTy
= getPtrOrVecOfPtrsWithNewAS(Operand
->getType(), NewAS
);
621 auto *NewI
= new AddrSpaceCastInst(Operand
, NewPtrTy
);
622 NewI
->insertBefore(Inst
);
623 NewI
->setDebugLoc(Inst
->getDebugLoc());
627 PoisonUsesToFix
->push_back(&OperandUse
);
628 return PoisonValue::get(NewPtrTy
);
631 // Returns a clone of `I` with its operands converted to those specified in
632 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
633 // operand whose address space needs to be modified might not exist in
634 // ValueWithNewAddrSpace. In that case, uses poison as a placeholder operand and
635 // adds that operand use to PoisonUsesToFix so that caller can fix them later.
637 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
638 // from a pointer whose type already matches. Therefore, this function returns a
639 // Value* instead of an Instruction*.
641 // This may also return nullptr in the case the instruction could not be
643 Value
*InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
644 Instruction
*I
, unsigned NewAddrSpace
,
645 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
646 const PredicatedAddrSpaceMapTy
&PredicatedAS
,
647 SmallVectorImpl
<const Use
*> *PoisonUsesToFix
) const {
648 Type
*NewPtrType
= getPtrOrVecOfPtrsWithNewAS(I
->getType(), NewAddrSpace
);
650 if (I
->getOpcode() == Instruction::AddrSpaceCast
) {
651 Value
*Src
= I
->getOperand(0);
652 // Because `I` is flat, the source address space must be specific.
653 // Therefore, the inferred address space must be the source space, according
655 assert(Src
->getType()->getPointerAddressSpace() == NewAddrSpace
);
656 if (Src
->getType() != NewPtrType
)
657 return new BitCastInst(Src
, NewPtrType
);
661 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
662 // Technically the intrinsic ID is a pointer typed argument, so specially
663 // handle calls early.
664 assert(II
->getIntrinsicID() == Intrinsic::ptrmask
);
665 Value
*NewPtr
= operandWithNewAddressSpaceOrCreatePoison(
666 II
->getArgOperandUse(0), NewAddrSpace
, ValueWithNewAddrSpace
,
667 PredicatedAS
, PoisonUsesToFix
);
669 TTI
->rewriteIntrinsicWithAddressSpace(II
, II
->getArgOperand(0), NewPtr
);
671 assert(Rewrite
!= II
&& "cannot modify this pointer operation in place");
678 unsigned AS
= TTI
->getAssumedAddrSpace(I
);
679 if (AS
!= UninitializedAddressSpace
) {
680 // For the assumed address space, insert an `addrspacecast` to make that
682 Type
*NewPtrTy
= getPtrOrVecOfPtrsWithNewAS(I
->getType(), AS
);
683 auto *NewI
= new AddrSpaceCastInst(I
, NewPtrTy
);
684 NewI
->insertAfter(I
);
685 NewI
->setDebugLoc(I
->getDebugLoc());
689 // Computes the converted pointer operands.
690 SmallVector
<Value
*, 4> NewPointerOperands
;
691 for (const Use
&OperandUse
: I
->operands()) {
692 if (!OperandUse
.get()->getType()->isPtrOrPtrVectorTy())
693 NewPointerOperands
.push_back(nullptr);
695 NewPointerOperands
.push_back(operandWithNewAddressSpaceOrCreatePoison(
696 OperandUse
, NewAddrSpace
, ValueWithNewAddrSpace
, PredicatedAS
,
700 switch (I
->getOpcode()) {
701 case Instruction::BitCast
:
702 return new BitCastInst(NewPointerOperands
[0], NewPtrType
);
703 case Instruction::PHI
: {
704 assert(I
->getType()->isPtrOrPtrVectorTy());
705 PHINode
*PHI
= cast
<PHINode
>(I
);
706 PHINode
*NewPHI
= PHINode::Create(NewPtrType
, PHI
->getNumIncomingValues());
707 for (unsigned Index
= 0; Index
< PHI
->getNumIncomingValues(); ++Index
) {
708 unsigned OperandNo
= PHINode::getOperandNumForIncomingValue(Index
);
709 NewPHI
->addIncoming(NewPointerOperands
[OperandNo
],
710 PHI
->getIncomingBlock(Index
));
714 case Instruction::GetElementPtr
: {
715 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(I
);
716 GetElementPtrInst
*NewGEP
= GetElementPtrInst::Create(
717 GEP
->getSourceElementType(), NewPointerOperands
[0],
718 SmallVector
<Value
*, 4>(GEP
->indices()));
719 NewGEP
->setIsInBounds(GEP
->isInBounds());
722 case Instruction::Select
:
723 assert(I
->getType()->isPtrOrPtrVectorTy());
724 return SelectInst::Create(I
->getOperand(0), NewPointerOperands
[1],
725 NewPointerOperands
[2], "", nullptr, I
);
726 case Instruction::IntToPtr
: {
727 assert(isNoopPtrIntCastPair(cast
<Operator
>(I
), *DL
, TTI
));
728 Value
*Src
= cast
<Operator
>(I
->getOperand(0))->getOperand(0);
729 if (Src
->getType() == NewPtrType
)
732 // If we had a no-op inttoptr/ptrtoint pair, we may still have inferred a
733 // source address space from a generic pointer source need to insert a cast
735 return CastInst::CreatePointerBitCastOrAddrSpaceCast(Src
, NewPtrType
);
738 llvm_unreachable("Unexpected opcode");
742 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
743 // constant expression `CE` with its operands replaced as specified in
744 // ValueWithNewAddrSpace.
745 static Value
*cloneConstantExprWithNewAddressSpace(
746 ConstantExpr
*CE
, unsigned NewAddrSpace
,
747 const ValueToValueMapTy
&ValueWithNewAddrSpace
, const DataLayout
*DL
,
748 const TargetTransformInfo
*TTI
) {
750 CE
->getType()->isPtrOrPtrVectorTy()
751 ? getPtrOrVecOfPtrsWithNewAS(CE
->getType(), NewAddrSpace
)
754 if (CE
->getOpcode() == Instruction::AddrSpaceCast
) {
755 // Because CE is flat, the source address space must be specific.
756 // Therefore, the inferred address space must be the source space according
758 assert(CE
->getOperand(0)->getType()->getPointerAddressSpace() ==
760 return ConstantExpr::getBitCast(CE
->getOperand(0), TargetType
);
763 if (CE
->getOpcode() == Instruction::BitCast
) {
764 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(CE
->getOperand(0)))
765 return ConstantExpr::getBitCast(cast
<Constant
>(NewOperand
), TargetType
);
766 return ConstantExpr::getAddrSpaceCast(CE
, TargetType
);
769 if (CE
->getOpcode() == Instruction::IntToPtr
) {
770 assert(isNoopPtrIntCastPair(cast
<Operator
>(CE
), *DL
, TTI
));
771 Constant
*Src
= cast
<ConstantExpr
>(CE
->getOperand(0))->getOperand(0);
772 assert(Src
->getType()->getPointerAddressSpace() == NewAddrSpace
);
773 return ConstantExpr::getBitCast(Src
, TargetType
);
776 // Computes the operands of the new constant expression.
778 SmallVector
<Constant
*, 4> NewOperands
;
779 for (unsigned Index
= 0; Index
< CE
->getNumOperands(); ++Index
) {
780 Constant
*Operand
= CE
->getOperand(Index
);
781 // If the address space of `Operand` needs to be modified, the new operand
782 // with the new address space should already be in ValueWithNewAddrSpace
783 // because (1) the constant expressions we consider (i.e. addrspacecast,
784 // bitcast, and getelementptr) do not incur cycles in the data flow graph
785 // and (2) this function is called on constant expressions in postorder.
786 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(Operand
)) {
788 NewOperands
.push_back(cast
<Constant
>(NewOperand
));
791 if (auto *CExpr
= dyn_cast
<ConstantExpr
>(Operand
))
792 if (Value
*NewOperand
= cloneConstantExprWithNewAddressSpace(
793 CExpr
, NewAddrSpace
, ValueWithNewAddrSpace
, DL
, TTI
)) {
795 NewOperands
.push_back(cast
<Constant
>(NewOperand
));
798 // Otherwise, reuses the old operand.
799 NewOperands
.push_back(Operand
);
802 // If !IsNew, we will replace the Value with itself. However, replaced values
803 // are assumed to wrapped in an addrspacecast cast later so drop it now.
807 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
808 // Needs to specify the source type while constructing a getelementptr
809 // constant expression.
810 return CE
->getWithOperands(NewOperands
, TargetType
, /*OnlyIfReduced=*/false,
811 cast
<GEPOperator
>(CE
)->getSourceElementType());
814 return CE
->getWithOperands(NewOperands
, TargetType
);
817 // Returns a clone of the value `V`, with its operands replaced as specified in
818 // ValueWithNewAddrSpace. This function is called on every flat address
819 // expression whose address space needs to be modified, in postorder.
821 // See cloneInstructionWithNewAddressSpace for the meaning of PoisonUsesToFix.
822 Value
*InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
823 Value
*V
, unsigned NewAddrSpace
,
824 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
825 const PredicatedAddrSpaceMapTy
&PredicatedAS
,
826 SmallVectorImpl
<const Use
*> *PoisonUsesToFix
) const {
827 // All values in Postorder are flat address expressions.
828 assert(V
->getType()->getPointerAddressSpace() == FlatAddrSpace
&&
829 isAddressExpression(*V
, *DL
, TTI
));
831 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
832 Value
*NewV
= cloneInstructionWithNewAddressSpace(
833 I
, NewAddrSpace
, ValueWithNewAddrSpace
, PredicatedAS
, PoisonUsesToFix
);
834 if (Instruction
*NewI
= dyn_cast_or_null
<Instruction
>(NewV
)) {
835 if (NewI
->getParent() == nullptr) {
836 NewI
->insertBefore(I
);
838 NewI
->setDebugLoc(I
->getDebugLoc());
844 return cloneConstantExprWithNewAddressSpace(
845 cast
<ConstantExpr
>(V
), NewAddrSpace
, ValueWithNewAddrSpace
, DL
, TTI
);
848 // Defines the join operation on the address space lattice (see the file header
850 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1
,
851 unsigned AS2
) const {
852 if (AS1
== FlatAddrSpace
|| AS2
== FlatAddrSpace
)
853 return FlatAddrSpace
;
855 if (AS1
== UninitializedAddressSpace
)
857 if (AS2
== UninitializedAddressSpace
)
860 // The join of two different specific address spaces is flat.
861 return (AS1
== AS2
) ? AS1
: FlatAddrSpace
;
864 bool InferAddressSpacesImpl::run(Function
&CurFn
) {
866 DL
= &F
->getDataLayout();
868 if (AssumeDefaultIsFlatAddressSpace
)
871 if (FlatAddrSpace
== UninitializedAddressSpace
) {
872 FlatAddrSpace
= TTI
->getFlatAddressSpace();
873 if (FlatAddrSpace
== UninitializedAddressSpace
)
877 // Collects all flat address expressions in postorder.
878 std::vector
<WeakTrackingVH
> Postorder
= collectFlatAddressExpressions(*F
);
880 // Runs a data-flow analysis to refine the address spaces of every expression
882 ValueToAddrSpaceMapTy InferredAddrSpace
;
883 PredicatedAddrSpaceMapTy PredicatedAS
;
884 inferAddressSpaces(Postorder
, InferredAddrSpace
, PredicatedAS
);
886 // Changes the address spaces of the flat address expressions who are inferred
887 // to point to a specific address space.
888 return rewriteWithNewAddressSpaces(Postorder
, InferredAddrSpace
,
892 // Constants need to be tracked through RAUW to handle cases with nested
893 // constant expressions, so wrap values in WeakTrackingVH.
894 void InferAddressSpacesImpl::inferAddressSpaces(
895 ArrayRef
<WeakTrackingVH
> Postorder
,
896 ValueToAddrSpaceMapTy
&InferredAddrSpace
,
897 PredicatedAddrSpaceMapTy
&PredicatedAS
) const {
898 SetVector
<Value
*> Worklist(Postorder
.begin(), Postorder
.end());
899 // Initially, all expressions are in the uninitialized address space.
900 for (Value
*V
: Postorder
)
901 InferredAddrSpace
[V
] = UninitializedAddressSpace
;
903 while (!Worklist
.empty()) {
904 Value
*V
= Worklist
.pop_back_val();
906 // Try to update the address space of the stack top according to the
907 // address spaces of its operands.
908 if (!updateAddressSpace(*V
, InferredAddrSpace
, PredicatedAS
))
911 for (Value
*User
: V
->users()) {
912 // Skip if User is already in the worklist.
913 if (Worklist
.count(User
))
916 auto Pos
= InferredAddrSpace
.find(User
);
917 // Our algorithm only updates the address spaces of flat address
918 // expressions, which are those in InferredAddrSpace.
919 if (Pos
== InferredAddrSpace
.end())
922 // Function updateAddressSpace moves the address space down a lattice
923 // path. Therefore, nothing to do if User is already inferred as flat (the
924 // bottom element in the lattice).
925 if (Pos
->second
== FlatAddrSpace
)
928 Worklist
.insert(User
);
934 InferAddressSpacesImpl::getPredicatedAddrSpace(const Value
&Ptr
,
935 const Value
*UserCtx
) const {
936 const Instruction
*UserCtxI
= dyn_cast
<Instruction
>(UserCtx
);
938 return UninitializedAddressSpace
;
940 const Value
*StrippedPtr
= Ptr
.stripInBoundsOffsets();
941 for (auto &AssumeVH
: AC
.assumptionsFor(StrippedPtr
)) {
944 CallInst
*CI
= cast
<CallInst
>(AssumeVH
);
945 if (!isValidAssumeForContext(CI
, UserCtxI
, DT
))
950 std::tie(Ptr
, AS
) = TTI
->getPredicatedAddrSpace(CI
->getArgOperand(0));
955 return UninitializedAddressSpace
;
958 bool InferAddressSpacesImpl::updateAddressSpace(
959 const Value
&V
, ValueToAddrSpaceMapTy
&InferredAddrSpace
,
960 PredicatedAddrSpaceMapTy
&PredicatedAS
) const {
961 assert(InferredAddrSpace
.count(&V
));
963 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << V
<< '\n');
965 // The new inferred address space equals the join of the address spaces
966 // of all its pointer operands.
967 unsigned NewAS
= UninitializedAddressSpace
;
969 const Operator
&Op
= cast
<Operator
>(V
);
970 if (Op
.getOpcode() == Instruction::Select
) {
971 Value
*Src0
= Op
.getOperand(1);
972 Value
*Src1
= Op
.getOperand(2);
974 auto I
= InferredAddrSpace
.find(Src0
);
975 unsigned Src0AS
= (I
!= InferredAddrSpace
.end())
977 : Src0
->getType()->getPointerAddressSpace();
979 auto J
= InferredAddrSpace
.find(Src1
);
980 unsigned Src1AS
= (J
!= InferredAddrSpace
.end())
982 : Src1
->getType()->getPointerAddressSpace();
984 auto *C0
= dyn_cast
<Constant
>(Src0
);
985 auto *C1
= dyn_cast
<Constant
>(Src1
);
987 // If one of the inputs is a constant, we may be able to do a constant
988 // addrspacecast of it. Defer inferring the address space until the input
989 // address space is known.
990 if ((C1
&& Src0AS
== UninitializedAddressSpace
) ||
991 (C0
&& Src1AS
== UninitializedAddressSpace
))
994 if (C0
&& isSafeToCastConstAddrSpace(C0
, Src1AS
))
996 else if (C1
&& isSafeToCastConstAddrSpace(C1
, Src0AS
))
999 NewAS
= joinAddressSpaces(Src0AS
, Src1AS
);
1001 unsigned AS
= TTI
->getAssumedAddrSpace(&V
);
1002 if (AS
!= UninitializedAddressSpace
) {
1003 // Use the assumed address space directly.
1006 // Otherwise, infer the address space from its pointer operands.
1007 for (Value
*PtrOperand
: getPointerOperands(V
, *DL
, TTI
)) {
1008 auto I
= InferredAddrSpace
.find(PtrOperand
);
1010 if (I
== InferredAddrSpace
.end()) {
1011 OperandAS
= PtrOperand
->getType()->getPointerAddressSpace();
1012 if (OperandAS
== FlatAddrSpace
) {
1013 // Check AC for assumption dominating V.
1014 unsigned AS
= getPredicatedAddrSpace(*PtrOperand
, &V
);
1015 if (AS
!= UninitializedAddressSpace
) {
1017 << " deduce operand AS from the predicate addrspace "
1020 // Record this use with the predicated AS.
1021 PredicatedAS
[std::make_pair(&V
, PtrOperand
)] = OperandAS
;
1025 OperandAS
= I
->second
;
1027 // join(flat, *) = flat. So we can break if NewAS is already flat.
1028 NewAS
= joinAddressSpaces(NewAS
, OperandAS
);
1029 if (NewAS
== FlatAddrSpace
)
1035 unsigned OldAS
= InferredAddrSpace
.lookup(&V
);
1036 assert(OldAS
!= FlatAddrSpace
);
1040 // If any updates are made, grabs its users to the worklist because
1041 // their address spaces can also be possibly updated.
1042 LLVM_DEBUG(dbgs() << " to " << NewAS
<< '\n');
1043 InferredAddrSpace
[&V
] = NewAS
;
1047 /// Replace operand \p OpIdx in \p Inst, if the value is the same as \p OldVal
1049 static bool replaceOperandIfSame(Instruction
*Inst
, unsigned OpIdx
,
1050 Value
*OldVal
, Value
*NewVal
) {
1051 Use
&U
= Inst
->getOperandUse(OpIdx
);
1052 if (U
.get() == OldVal
) {
1060 template <typename InstrType
>
1061 static bool replaceSimplePointerUse(const TargetTransformInfo
&TTI
,
1062 InstrType
*MemInstr
, unsigned AddrSpace
,
1063 Value
*OldV
, Value
*NewV
) {
1064 if (!MemInstr
->isVolatile() || TTI
.hasVolatileVariant(MemInstr
, AddrSpace
)) {
1065 return replaceOperandIfSame(MemInstr
, InstrType::getPointerOperandIndex(),
1072 /// If \p OldV is used as the pointer operand of a compatible memory operation
1073 /// \p Inst, replaces the pointer operand with NewV.
1075 /// This covers memory instructions with a single pointer operand that can have
1076 /// its address space changed by simply mutating the use to a new value.
1078 /// \p returns true the user replacement was made.
1079 static bool replaceIfSimplePointerUse(const TargetTransformInfo
&TTI
,
1080 User
*Inst
, unsigned AddrSpace
,
1081 Value
*OldV
, Value
*NewV
) {
1082 if (auto *LI
= dyn_cast
<LoadInst
>(Inst
))
1083 return replaceSimplePointerUse(TTI
, LI
, AddrSpace
, OldV
, NewV
);
1085 if (auto *SI
= dyn_cast
<StoreInst
>(Inst
))
1086 return replaceSimplePointerUse(TTI
, SI
, AddrSpace
, OldV
, NewV
);
1088 if (auto *RMW
= dyn_cast
<AtomicRMWInst
>(Inst
))
1089 return replaceSimplePointerUse(TTI
, RMW
, AddrSpace
, OldV
, NewV
);
1091 if (auto *CmpX
= dyn_cast
<AtomicCmpXchgInst
>(Inst
))
1092 return replaceSimplePointerUse(TTI
, CmpX
, AddrSpace
, OldV
, NewV
);
1097 /// Update memory intrinsic uses that require more complex processing than
1098 /// simple memory instructions. These require re-mangling and may have multiple
1099 /// pointer operands.
1100 static bool handleMemIntrinsicPtrUse(MemIntrinsic
*MI
, Value
*OldV
,
1103 MDNode
*TBAA
= MI
->getMetadata(LLVMContext::MD_tbaa
);
1104 MDNode
*ScopeMD
= MI
->getMetadata(LLVMContext::MD_alias_scope
);
1105 MDNode
*NoAliasMD
= MI
->getMetadata(LLVMContext::MD_noalias
);
1107 if (auto *MSI
= dyn_cast
<MemSetInst
>(MI
)) {
1108 B
.CreateMemSet(NewV
, MSI
->getValue(), MSI
->getLength(), MSI
->getDestAlign(),
1109 false, // isVolatile
1110 TBAA
, ScopeMD
, NoAliasMD
);
1111 } else if (auto *MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
1112 Value
*Src
= MTI
->getRawSource();
1113 Value
*Dest
= MTI
->getRawDest();
1115 // Be careful in case this is a self-to-self copy.
1122 if (isa
<MemCpyInlineInst
>(MTI
)) {
1123 MDNode
*TBAAStruct
= MTI
->getMetadata(LLVMContext::MD_tbaa_struct
);
1124 B
.CreateMemCpyInline(Dest
, MTI
->getDestAlign(), Src
,
1125 MTI
->getSourceAlign(), MTI
->getLength(),
1126 false, // isVolatile
1127 TBAA
, TBAAStruct
, ScopeMD
, NoAliasMD
);
1128 } else if (isa
<MemCpyInst
>(MTI
)) {
1129 MDNode
*TBAAStruct
= MTI
->getMetadata(LLVMContext::MD_tbaa_struct
);
1130 B
.CreateMemCpy(Dest
, MTI
->getDestAlign(), Src
, MTI
->getSourceAlign(),
1132 false, // isVolatile
1133 TBAA
, TBAAStruct
, ScopeMD
, NoAliasMD
);
1135 assert(isa
<MemMoveInst
>(MTI
));
1136 B
.CreateMemMove(Dest
, MTI
->getDestAlign(), Src
, MTI
->getSourceAlign(),
1138 false, // isVolatile
1139 TBAA
, ScopeMD
, NoAliasMD
);
1142 llvm_unreachable("unhandled MemIntrinsic");
1144 MI
->eraseFromParent();
1148 // \p returns true if it is OK to change the address space of constant \p C with
1149 // a ConstantExpr addrspacecast.
1150 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant
*C
,
1151 unsigned NewAS
) const {
1152 assert(NewAS
!= UninitializedAddressSpace
);
1154 unsigned SrcAS
= C
->getType()->getPointerAddressSpace();
1155 if (SrcAS
== NewAS
|| isa
<UndefValue
>(C
))
1158 // Prevent illegal casts between different non-flat address spaces.
1159 if (SrcAS
!= FlatAddrSpace
&& NewAS
!= FlatAddrSpace
)
1162 if (isa
<ConstantPointerNull
>(C
))
1165 if (auto *Op
= dyn_cast
<Operator
>(C
)) {
1166 // If we already have a constant addrspacecast, it should be safe to cast it
1168 if (Op
->getOpcode() == Instruction::AddrSpaceCast
)
1169 return isSafeToCastConstAddrSpace(cast
<Constant
>(Op
->getOperand(0)),
1172 if (Op
->getOpcode() == Instruction::IntToPtr
&&
1173 Op
->getType()->getPointerAddressSpace() == FlatAddrSpace
)
1180 static Value::use_iterator
skipToNextUser(Value::use_iterator I
,
1181 Value::use_iterator End
) {
1182 User
*CurUser
= I
->getUser();
1185 while (I
!= End
&& I
->getUser() == CurUser
)
1191 void InferAddressSpacesImpl::performPointerReplacement(
1192 Value
*V
, Value
*NewV
, Use
&U
, ValueToValueMapTy
&ValueWithNewAddrSpace
,
1193 SmallVectorImpl
<Instruction
*> &DeadInstructions
) const {
1195 User
*CurUser
= U
.getUser();
1197 unsigned AddrSpace
= V
->getType()->getPointerAddressSpace();
1198 if (replaceIfSimplePointerUse(*TTI
, CurUser
, AddrSpace
, V
, NewV
))
1201 // Skip if the current user is the new value itself.
1202 if (CurUser
== NewV
)
1205 auto *CurUserI
= dyn_cast
<Instruction
>(CurUser
);
1206 if (!CurUserI
|| CurUserI
->getFunction() != F
)
1209 // Handle more complex cases like intrinsic that need to be remangled.
1210 if (auto *MI
= dyn_cast
<MemIntrinsic
>(CurUser
)) {
1211 if (!MI
->isVolatile() && handleMemIntrinsicPtrUse(MI
, V
, NewV
))
1215 if (auto *II
= dyn_cast
<IntrinsicInst
>(CurUser
)) {
1216 if (rewriteIntrinsicOperands(II
, V
, NewV
))
1220 if (ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(CurUserI
)) {
1221 // If we can infer that both pointers are in the same addrspace,
1223 // %cmp = icmp eq float* %p, %q
1225 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
1227 unsigned NewAS
= NewV
->getType()->getPointerAddressSpace();
1228 int SrcIdx
= U
.getOperandNo();
1229 int OtherIdx
= (SrcIdx
== 0) ? 1 : 0;
1230 Value
*OtherSrc
= Cmp
->getOperand(OtherIdx
);
1232 if (Value
*OtherNewV
= ValueWithNewAddrSpace
.lookup(OtherSrc
)) {
1233 if (OtherNewV
->getType()->getPointerAddressSpace() == NewAS
) {
1234 Cmp
->setOperand(OtherIdx
, OtherNewV
);
1235 Cmp
->setOperand(SrcIdx
, NewV
);
1240 // Even if the type mismatches, we can cast the constant.
1241 if (auto *KOtherSrc
= dyn_cast
<Constant
>(OtherSrc
)) {
1242 if (isSafeToCastConstAddrSpace(KOtherSrc
, NewAS
)) {
1243 Cmp
->setOperand(SrcIdx
, NewV
);
1244 Cmp
->setOperand(OtherIdx
, ConstantExpr::getAddrSpaceCast(
1245 KOtherSrc
, NewV
->getType()));
1251 if (AddrSpaceCastInst
*ASC
= dyn_cast
<AddrSpaceCastInst
>(CurUserI
)) {
1252 unsigned NewAS
= NewV
->getType()->getPointerAddressSpace();
1253 if (ASC
->getDestAddressSpace() == NewAS
) {
1254 ASC
->replaceAllUsesWith(NewV
);
1255 DeadInstructions
.push_back(ASC
);
1260 // Otherwise, replaces the use with flat(NewV).
1261 if (Instruction
*VInst
= dyn_cast
<Instruction
>(V
)) {
1262 // Don't create a copy of the original addrspacecast.
1263 if (U
== V
&& isa
<AddrSpaceCastInst
>(V
))
1266 // Insert the addrspacecast after NewV.
1267 BasicBlock::iterator InsertPos
;
1268 if (Instruction
*NewVInst
= dyn_cast
<Instruction
>(NewV
))
1269 InsertPos
= std::next(NewVInst
->getIterator());
1271 InsertPos
= std::next(VInst
->getIterator());
1273 while (isa
<PHINode
>(InsertPos
))
1275 // This instruction may contain multiple uses of V, update them all.
1276 CurUser
->replaceUsesOfWith(
1277 V
, new AddrSpaceCastInst(NewV
, V
->getType(), "", InsertPos
));
1279 CurUserI
->replaceUsesOfWith(
1280 V
, ConstantExpr::getAddrSpaceCast(cast
<Constant
>(NewV
), V
->getType()));
1284 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
1285 ArrayRef
<WeakTrackingVH
> Postorder
,
1286 const ValueToAddrSpaceMapTy
&InferredAddrSpace
,
1287 const PredicatedAddrSpaceMapTy
&PredicatedAS
) const {
1288 // For each address expression to be modified, creates a clone of it with its
1289 // pointer operands converted to the new address space. Since the pointer
1290 // operands are converted, the clone is naturally in the new address space by
1292 ValueToValueMapTy ValueWithNewAddrSpace
;
1293 SmallVector
<const Use
*, 32> PoisonUsesToFix
;
1294 for (Value
*V
: Postorder
) {
1295 unsigned NewAddrSpace
= InferredAddrSpace
.lookup(V
);
1297 // In some degenerate cases (e.g. invalid IR in unreachable code), we may
1298 // not even infer the value to have its original address space.
1299 if (NewAddrSpace
== UninitializedAddressSpace
)
1302 if (V
->getType()->getPointerAddressSpace() != NewAddrSpace
) {
1304 cloneValueWithNewAddressSpace(V
, NewAddrSpace
, ValueWithNewAddrSpace
,
1305 PredicatedAS
, &PoisonUsesToFix
);
1307 ValueWithNewAddrSpace
[V
] = New
;
1311 if (ValueWithNewAddrSpace
.empty())
1314 // Fixes all the poison uses generated by cloneInstructionWithNewAddressSpace.
1315 for (const Use
*PoisonUse
: PoisonUsesToFix
) {
1316 User
*V
= PoisonUse
->getUser();
1317 User
*NewV
= cast_or_null
<User
>(ValueWithNewAddrSpace
.lookup(V
));
1321 unsigned OperandNo
= PoisonUse
->getOperandNo();
1322 assert(isa
<PoisonValue
>(NewV
->getOperand(OperandNo
)));
1323 NewV
->setOperand(OperandNo
, ValueWithNewAddrSpace
.lookup(PoisonUse
->get()));
1326 SmallVector
<Instruction
*, 16> DeadInstructions
;
1327 ValueToValueMapTy VMap
;
1328 ValueMapper
VMapper(VMap
, RF_NoModuleLevelChanges
| RF_IgnoreMissingLocals
);
1330 // Replaces the uses of the old address expressions with the new ones.
1331 for (const WeakTrackingVH
&WVH
: Postorder
) {
1332 assert(WVH
&& "value was unexpectedly deleted");
1334 Value
*NewV
= ValueWithNewAddrSpace
.lookup(V
);
1335 if (NewV
== nullptr)
1338 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V
<< "\n with\n "
1341 if (Constant
*C
= dyn_cast
<Constant
>(V
)) {
1343 ConstantExpr::getAddrSpaceCast(cast
<Constant
>(NewV
), C
->getType());
1345 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
1346 << ": " << *Replace
<< '\n');
1347 SmallVector
<User
*, 16> WorkList
;
1348 for (User
*U
: make_early_inc_range(C
->users())) {
1349 if (auto *I
= dyn_cast
<Instruction
>(U
)) {
1350 if (I
->getFunction() == F
)
1351 I
->replaceUsesOfWith(C
, Replace
);
1353 WorkList
.append(U
->user_begin(), U
->user_end());
1356 if (!WorkList
.empty()) {
1358 DenseSet
<User
*> Visited
{WorkList
.begin(), WorkList
.end()};
1359 while (!WorkList
.empty()) {
1360 User
*U
= WorkList
.pop_back_val();
1361 if (auto *I
= dyn_cast
<Instruction
>(U
)) {
1362 if (I
->getFunction() == F
)
1363 VMapper
.remapInstruction(*I
);
1366 for (User
*U2
: U
->users())
1367 if (Visited
.insert(U2
).second
)
1368 WorkList
.push_back(U2
);
1375 Value::use_iterator I
, E
, Next
;
1376 for (I
= V
->use_begin(), E
= V
->use_end(); I
!= E
;) {
1379 // Some users may see the same pointer operand in multiple operands. Skip
1380 // to the next instruction.
1381 I
= skipToNextUser(I
, E
);
1383 performPointerReplacement(V
, NewV
, U
, ValueWithNewAddrSpace
,
1387 if (V
->use_empty()) {
1388 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
1389 DeadInstructions
.push_back(I
);
1393 for (Instruction
*I
: DeadInstructions
)
1394 RecursivelyDeleteTriviallyDeadInstructions(I
);
1399 bool InferAddressSpaces::runOnFunction(Function
&F
) {
1400 if (skipFunction(F
))
1403 auto *DTWP
= getAnalysisIfAvailable
<DominatorTreeWrapperPass
>();
1404 DominatorTree
*DT
= DTWP
? &DTWP
->getDomTree() : nullptr;
1405 return InferAddressSpacesImpl(
1406 getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
), DT
,
1407 &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
),
1412 FunctionPass
*llvm::createInferAddressSpacesPass(unsigned AddressSpace
) {
1413 return new InferAddressSpaces(AddressSpace
);
1416 InferAddressSpacesPass::InferAddressSpacesPass()
1417 : FlatAddrSpace(UninitializedAddressSpace
) {}
1418 InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace
)
1419 : FlatAddrSpace(AddressSpace
) {}
1421 PreservedAnalyses
InferAddressSpacesPass::run(Function
&F
,
1422 FunctionAnalysisManager
&AM
) {
1424 InferAddressSpacesImpl(AM
.getResult
<AssumptionAnalysis
>(F
),
1425 AM
.getCachedResult
<DominatorTreeAnalysis
>(F
),
1426 &AM
.getResult
<TargetIRAnalysis
>(F
), FlatAddrSpace
)
1429 PreservedAnalyses PA
;
1430 PA
.preserveSet
<CFGAnalyses
>();
1431 PA
.preserve
<DominatorTreeAnalysis
>();
1434 return PreservedAnalyses::all();