1 //===- InferAddressSpace.cpp - --------------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // CUDA C/C++ includes memory space designation as variable type qualifers (such
10 // as __global__ and __shared__). Knowing the space of a memory access allows
11 // CUDA compilers to emit faster PTX loads and stores. For example, a load from
12 // shared memory can be translated to `ld.shared` which is roughly 10% faster
13 // than a generic `ld` on an NVIDIA Tesla K40c.
15 // Unfortunately, type qualifiers only apply to variable declarations, so CUDA
16 // compilers must infer the memory space of an address expression from
17 // type-qualified variables.
19 // LLVM IR uses non-zero (so-called) specific address spaces to represent memory
20 // spaces (e.g. addrspace(3) means shared memory). The Clang frontend
21 // places only type-qualified variables in specific address spaces, and then
22 // conservatively `addrspacecast`s each type-qualified variable to addrspace(0)
23 // (so-called the generic address space) for other instructions to use.
25 // For example, the Clang translates the following CUDA code
26 // __shared__ float a[10];
29 // %0 = addrspacecast [10 x float] addrspace(3)* @a to [10 x float]*
30 // %1 = gep [10 x float], [10 x float]* %0, i64 0, i64 %i
31 // %v = load float, float* %1 ; emits ld.f32
32 // @a is in addrspace(3) since it's type-qualified, but its use from %1 is
33 // redirected to %0 (the generic version of @a).
35 // The optimization implemented in this file propagates specific address spaces
36 // from type-qualified variable declarations to its users. For example, it
37 // optimizes the above IR to
38 // %1 = gep [10 x float] addrspace(3)* @a, i64 0, i64 %i
39 // %v = load float addrspace(3)* %1 ; emits ld.shared.f32
40 // propagating the addrspace(3) from @a to %1. As the result, the NVPTX
41 // codegen is able to emit ld.shared.f32 for %v.
43 // Address space inference works in two steps. First, it uses a data-flow
44 // analysis to infer as many generic pointers as possible to point to only one
45 // specific address space. In the above example, it can prove that %1 only
46 // points to addrspace(3). This algorithm was published in
47 // CUDA: Compiling and optimizing for a GPU platform
48 // Chakrabarti, Grover, Aarts, Kong, Kudlur, Lin, Marathe, Murphy, Wang
51 // Then, address space inference replaces all refinable generic pointers with
52 // equivalent specific pointers.
54 // The major challenge of implementing this optimization is handling PHINodes,
55 // which may create loops in the data flow graph. This brings two complications.
57 // First, the data flow analysis in Step 1 needs to be circular. For example,
58 // %generic.input = addrspacecast float addrspace(3)* %input to float*
60 // %y = phi [ %generic.input, %y2 ]
61 // %y2 = getelementptr %y, 1
63 // br ..., label %loop, ...
64 // proving %y specific requires proving both %generic.input and %y2 specific,
65 // but proving %y2 specific circles back to %y. To address this complication,
66 // the data flow analysis operates on a lattice:
67 // uninitialized > specific address spaces > generic.
68 // All address expressions (our implementation only considers phi, bitcast,
69 // addrspacecast, and getelementptr) start with the uninitialized address space.
70 // The monotone transfer function moves the address space of a pointer down a
71 // lattice path from uninitialized to specific and then to generic. A join
72 // operation of two different specific address spaces pushes the expression down
73 // to the generic address space. The analysis completes once it reaches a fixed
76 // Second, IR rewriting in Step 2 also needs to be circular. For example,
77 // converting %y to addrspace(3) requires the compiler to know the converted
78 // %y2, but converting %y2 needs the converted %y. To address this complication,
79 // we break these cycles using "undef" placeholders. When converting an
80 // instruction `I` to a new address space, if its operand `Op` is not converted
81 // yet, we let `I` temporarily use `undef` and fix all the uses of undef later.
82 // For instance, our algorithm first converts %y to
83 // %y' = phi float addrspace(3)* [ %input, undef ]
84 // Then, it converts %y2 to
85 // %y2' = getelementptr %y', 1
86 // Finally, it fixes the undef in %y' so that
87 // %y' = phi float addrspace(3)* [ %input, %y2' ]
89 //===----------------------------------------------------------------------===//
91 #include "llvm/Transforms/Scalar/InferAddressSpaces.h"
92 #include "llvm/ADT/ArrayRef.h"
93 #include "llvm/ADT/DenseMap.h"
94 #include "llvm/ADT/DenseSet.h"
95 #include "llvm/ADT/None.h"
96 #include "llvm/ADT/Optional.h"
97 #include "llvm/ADT/SetVector.h"
98 #include "llvm/ADT/SmallVector.h"
99 #include "llvm/Analysis/TargetTransformInfo.h"
100 #include "llvm/IR/BasicBlock.h"
101 #include "llvm/IR/Constant.h"
102 #include "llvm/IR/Constants.h"
103 #include "llvm/IR/Function.h"
104 #include "llvm/IR/IRBuilder.h"
105 #include "llvm/IR/InstIterator.h"
106 #include "llvm/IR/Instruction.h"
107 #include "llvm/IR/Instructions.h"
108 #include "llvm/IR/IntrinsicInst.h"
109 #include "llvm/IR/Intrinsics.h"
110 #include "llvm/IR/LLVMContext.h"
111 #include "llvm/IR/Operator.h"
112 #include "llvm/IR/PassManager.h"
113 #include "llvm/IR/Type.h"
114 #include "llvm/IR/Use.h"
115 #include "llvm/IR/User.h"
116 #include "llvm/IR/Value.h"
117 #include "llvm/IR/ValueHandle.h"
118 #include "llvm/Pass.h"
119 #include "llvm/Support/Casting.h"
120 #include "llvm/Support/CommandLine.h"
121 #include "llvm/Support/Compiler.h"
122 #include "llvm/Support/Debug.h"
123 #include "llvm/Support/ErrorHandling.h"
124 #include "llvm/Support/raw_ostream.h"
125 #include "llvm/Transforms/Scalar.h"
126 #include "llvm/Transforms/Utils/Local.h"
127 #include "llvm/Transforms/Utils/ValueMapper.h"
134 #define DEBUG_TYPE "infer-address-spaces"
136 using namespace llvm
;
138 static cl::opt
<bool> AssumeDefaultIsFlatAddressSpace(
139 "assume-default-is-flat-addrspace", cl::init(false), cl::ReallyHidden
,
140 cl::desc("The default address space is assumed as the flat address space. "
141 "This is mainly for test purpose."));
143 static const unsigned UninitializedAddressSpace
=
144 std::numeric_limits
<unsigned>::max();
148 using ValueToAddrSpaceMapTy
= DenseMap
<const Value
*, unsigned>;
149 using PostorderStackTy
= llvm::SmallVector
<PointerIntPair
<Value
*, 1, bool>, 4>;
151 class InferAddressSpaces
: public FunctionPass
{
152 unsigned FlatAddrSpace
= 0;
157 InferAddressSpaces() :
158 FunctionPass(ID
), FlatAddrSpace(UninitializedAddressSpace
) {}
159 InferAddressSpaces(unsigned AS
) : FunctionPass(ID
), FlatAddrSpace(AS
) {}
161 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
162 AU
.setPreservesCFG();
163 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
166 bool runOnFunction(Function
&F
) override
;
169 class InferAddressSpacesImpl
{
170 const TargetTransformInfo
*TTI
= nullptr;
171 const DataLayout
*DL
= nullptr;
173 /// Target specific address space which uses of should be replaced if
175 unsigned FlatAddrSpace
= 0;
177 // Returns the new address space of V if updated; otherwise, returns None.
179 updateAddressSpace(const Value
&V
,
180 const ValueToAddrSpaceMapTy
&InferredAddrSpace
) const;
182 // Tries to infer the specific address space of each address expression in
184 void inferAddressSpaces(ArrayRef
<WeakTrackingVH
> Postorder
,
185 ValueToAddrSpaceMapTy
*InferredAddrSpace
) const;
187 bool isSafeToCastConstAddrSpace(Constant
*C
, unsigned NewAS
) const;
189 Value
*cloneInstructionWithNewAddressSpace(
190 Instruction
*I
, unsigned NewAddrSpace
,
191 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
192 SmallVectorImpl
<const Use
*> *UndefUsesToFix
) const;
194 // Changes the flat address expressions in function F to point to specific
195 // address spaces if InferredAddrSpace says so. Postorder is the postorder of
196 // all flat expressions in the use-def graph of function F.
197 bool rewriteWithNewAddressSpaces(
198 const TargetTransformInfo
&TTI
, ArrayRef
<WeakTrackingVH
> Postorder
,
199 const ValueToAddrSpaceMapTy
&InferredAddrSpace
, Function
*F
) const;
201 void appendsFlatAddressExpressionToPostorderStack(
202 Value
*V
, PostorderStackTy
&PostorderStack
,
203 DenseSet
<Value
*> &Visited
) const;
205 bool rewriteIntrinsicOperands(IntrinsicInst
*II
,
206 Value
*OldV
, Value
*NewV
) const;
207 void collectRewritableIntrinsicOperands(IntrinsicInst
*II
,
208 PostorderStackTy
&PostorderStack
,
209 DenseSet
<Value
*> &Visited
) const;
211 std::vector
<WeakTrackingVH
> collectFlatAddressExpressions(Function
&F
) const;
213 Value
*cloneValueWithNewAddressSpace(
214 Value
*V
, unsigned NewAddrSpace
,
215 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
216 SmallVectorImpl
<const Use
*> *UndefUsesToFix
) const;
217 unsigned joinAddressSpaces(unsigned AS1
, unsigned AS2
) const;
220 InferAddressSpacesImpl(const TargetTransformInfo
*TTI
, unsigned FlatAddrSpace
)
221 : TTI(TTI
), FlatAddrSpace(FlatAddrSpace
) {}
222 bool run(Function
&F
);
225 } // end anonymous namespace
227 char InferAddressSpaces::ID
= 0;
231 void initializeInferAddressSpacesPass(PassRegistry
&);
233 } // end namespace llvm
235 INITIALIZE_PASS(InferAddressSpaces
, DEBUG_TYPE
, "Infer address spaces",
238 // Check whether that's no-op pointer bicast using a pair of
239 // `ptrtoint`/`inttoptr` due to the missing no-op pointer bitcast over
240 // different address spaces.
241 static bool isNoopPtrIntCastPair(const Operator
*I2P
, const DataLayout
&DL
,
242 const TargetTransformInfo
*TTI
) {
243 assert(I2P
->getOpcode() == Instruction::IntToPtr
);
244 auto *P2I
= dyn_cast
<Operator
>(I2P
->getOperand(0));
245 if (!P2I
|| P2I
->getOpcode() != Instruction::PtrToInt
)
247 // Check it's really safe to treat that pair of `ptrtoint`/`inttoptr` as a
248 // no-op cast. Besides checking both of them are no-op casts, as the
249 // reinterpreted pointer may be used in other pointer arithmetic, we also
250 // need to double-check that through the target-specific hook. That ensures
251 // the underlying target also agrees that's a no-op address space cast and
252 // pointer bits are preserved.
253 // The current IR spec doesn't have clear rules on address space casts,
254 // especially a clear definition for pointer bits in non-default address
255 // spaces. It would be undefined if that pointer is dereferenced after an
256 // invalid reinterpret cast. Also, due to the unclearness for the meaning of
257 // bits in non-default address spaces in the current spec, the pointer
258 // arithmetic may also be undefined after invalid pointer reinterpret cast.
259 // However, as we confirm through the target hooks that it's a no-op
260 // addrspacecast, it doesn't matter since the bits should be the same.
261 return CastInst::isNoopCast(Instruction::CastOps(I2P
->getOpcode()),
262 I2P
->getOperand(0)->getType(), I2P
->getType(),
264 CastInst::isNoopCast(Instruction::CastOps(P2I
->getOpcode()),
265 P2I
->getOperand(0)->getType(), P2I
->getType(),
267 TTI
->isNoopAddrSpaceCast(
268 P2I
->getOperand(0)->getType()->getPointerAddressSpace(),
269 I2P
->getType()->getPointerAddressSpace());
272 // Returns true if V is an address expression.
273 // TODO: Currently, we consider only phi, bitcast, addrspacecast, and
274 // getelementptr operators.
275 static bool isAddressExpression(const Value
&V
, const DataLayout
&DL
,
276 const TargetTransformInfo
*TTI
) {
277 const Operator
*Op
= dyn_cast
<Operator
>(&V
);
281 switch (Op
->getOpcode()) {
282 case Instruction::PHI
:
283 assert(Op
->getType()->isPointerTy());
285 case Instruction::BitCast
:
286 case Instruction::AddrSpaceCast
:
287 case Instruction::GetElementPtr
:
289 case Instruction::Select
:
290 return Op
->getType()->isPointerTy();
291 case Instruction::Call
: {
292 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(&V
);
293 return II
&& II
->getIntrinsicID() == Intrinsic::ptrmask
;
295 case Instruction::IntToPtr
:
296 return isNoopPtrIntCastPair(Op
, DL
, TTI
);
298 // That value is an address expression if it has an assumed address space.
299 return TTI
->getAssumedAddrSpace(&V
) != UninitializedAddressSpace
;
303 // Returns the pointer operands of V.
305 // Precondition: V is an address expression.
306 static SmallVector
<Value
*, 2>
307 getPointerOperands(const Value
&V
, const DataLayout
&DL
,
308 const TargetTransformInfo
*TTI
) {
309 const Operator
&Op
= cast
<Operator
>(V
);
310 switch (Op
.getOpcode()) {
311 case Instruction::PHI
: {
312 auto IncomingValues
= cast
<PHINode
>(Op
).incoming_values();
313 return SmallVector
<Value
*, 2>(IncomingValues
.begin(),
314 IncomingValues
.end());
316 case Instruction::BitCast
:
317 case Instruction::AddrSpaceCast
:
318 case Instruction::GetElementPtr
:
319 return {Op
.getOperand(0)};
320 case Instruction::Select
:
321 return {Op
.getOperand(1), Op
.getOperand(2)};
322 case Instruction::Call
: {
323 const IntrinsicInst
&II
= cast
<IntrinsicInst
>(Op
);
324 assert(II
.getIntrinsicID() == Intrinsic::ptrmask
&&
325 "unexpected intrinsic call");
326 return {II
.getArgOperand(0)};
328 case Instruction::IntToPtr
: {
329 assert(isNoopPtrIntCastPair(&Op
, DL
, TTI
));
330 auto *P2I
= cast
<Operator
>(Op
.getOperand(0));
331 return {P2I
->getOperand(0)};
334 llvm_unreachable("Unexpected instruction type.");
338 bool InferAddressSpacesImpl::rewriteIntrinsicOperands(IntrinsicInst
*II
,
341 Module
*M
= II
->getParent()->getParent()->getParent();
343 switch (II
->getIntrinsicID()) {
344 case Intrinsic::objectsize
: {
345 Type
*DestTy
= II
->getType();
346 Type
*SrcTy
= NewV
->getType();
348 Intrinsic::getDeclaration(M
, II
->getIntrinsicID(), {DestTy
, SrcTy
});
349 II
->setArgOperand(0, NewV
);
350 II
->setCalledFunction(NewDecl
);
353 case Intrinsic::ptrmask
:
354 // This is handled as an address expression, not as a use memory operation.
357 Value
*Rewrite
= TTI
->rewriteIntrinsicWithAddressSpace(II
, OldV
, NewV
);
361 II
->replaceAllUsesWith(Rewrite
);
367 void InferAddressSpacesImpl::collectRewritableIntrinsicOperands(
368 IntrinsicInst
*II
, PostorderStackTy
&PostorderStack
,
369 DenseSet
<Value
*> &Visited
) const {
370 auto IID
= II
->getIntrinsicID();
372 case Intrinsic::ptrmask
:
373 case Intrinsic::objectsize
:
374 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(0),
375 PostorderStack
, Visited
);
378 SmallVector
<int, 2> OpIndexes
;
379 if (TTI
->collectFlatAddressOperands(OpIndexes
, IID
)) {
380 for (int Idx
: OpIndexes
) {
381 appendsFlatAddressExpressionToPostorderStack(II
->getArgOperand(Idx
),
382 PostorderStack
, Visited
);
389 // Returns all flat address expressions in function F. The elements are
390 // If V is an unvisited flat address expression, appends V to PostorderStack
391 // and marks it as visited.
392 void InferAddressSpacesImpl::appendsFlatAddressExpressionToPostorderStack(
393 Value
*V
, PostorderStackTy
&PostorderStack
,
394 DenseSet
<Value
*> &Visited
) const {
395 assert(V
->getType()->isPointerTy());
397 // Generic addressing expressions may be hidden in nested constant
399 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
)) {
400 // TODO: Look in non-address parts, like icmp operands.
401 if (isAddressExpression(*CE
, *DL
, TTI
) && Visited
.insert(CE
).second
)
402 PostorderStack
.emplace_back(CE
, false);
407 if (V
->getType()->getPointerAddressSpace() == FlatAddrSpace
&&
408 isAddressExpression(*V
, *DL
, TTI
)) {
409 if (Visited
.insert(V
).second
) {
410 PostorderStack
.emplace_back(V
, false);
412 Operator
*Op
= cast
<Operator
>(V
);
413 for (unsigned I
= 0, E
= Op
->getNumOperands(); I
!= E
; ++I
) {
414 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
->getOperand(I
))) {
415 if (isAddressExpression(*CE
, *DL
, TTI
) && Visited
.insert(CE
).second
)
416 PostorderStack
.emplace_back(CE
, false);
423 // Returns all flat address expressions in function F. The elements are ordered
424 // ordered in postorder.
425 std::vector
<WeakTrackingVH
>
426 InferAddressSpacesImpl::collectFlatAddressExpressions(Function
&F
) const {
427 // This function implements a non-recursive postorder traversal of a partial
428 // use-def graph of function F.
429 PostorderStackTy PostorderStack
;
430 // The set of visited expressions.
431 DenseSet
<Value
*> Visited
;
433 auto PushPtrOperand
= [&](Value
*Ptr
) {
434 appendsFlatAddressExpressionToPostorderStack(Ptr
, PostorderStack
,
438 // Look at operations that may be interesting accelerate by moving to a known
439 // address space. We aim at generating after loads and stores, but pure
440 // addressing calculations may also be faster.
441 for (Instruction
&I
: instructions(F
)) {
442 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(&I
)) {
443 if (!GEP
->getType()->isVectorTy())
444 PushPtrOperand(GEP
->getPointerOperand());
445 } else if (auto *LI
= dyn_cast
<LoadInst
>(&I
))
446 PushPtrOperand(LI
->getPointerOperand());
447 else if (auto *SI
= dyn_cast
<StoreInst
>(&I
))
448 PushPtrOperand(SI
->getPointerOperand());
449 else if (auto *RMW
= dyn_cast
<AtomicRMWInst
>(&I
))
450 PushPtrOperand(RMW
->getPointerOperand());
451 else if (auto *CmpX
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
452 PushPtrOperand(CmpX
->getPointerOperand());
453 else if (auto *MI
= dyn_cast
<MemIntrinsic
>(&I
)) {
454 // For memset/memcpy/memmove, any pointer operand can be replaced.
455 PushPtrOperand(MI
->getRawDest());
457 // Handle 2nd operand for memcpy/memmove.
458 if (auto *MTI
= dyn_cast
<MemTransferInst
>(MI
))
459 PushPtrOperand(MTI
->getRawSource());
460 } else if (auto *II
= dyn_cast
<IntrinsicInst
>(&I
))
461 collectRewritableIntrinsicOperands(II
, PostorderStack
, Visited
);
462 else if (ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(&I
)) {
463 // FIXME: Handle vectors of pointers
464 if (Cmp
->getOperand(0)->getType()->isPointerTy()) {
465 PushPtrOperand(Cmp
->getOperand(0));
466 PushPtrOperand(Cmp
->getOperand(1));
468 } else if (auto *ASC
= dyn_cast
<AddrSpaceCastInst
>(&I
)) {
469 if (!ASC
->getType()->isVectorTy())
470 PushPtrOperand(ASC
->getPointerOperand());
471 } else if (auto *I2P
= dyn_cast
<IntToPtrInst
>(&I
)) {
472 if (isNoopPtrIntCastPair(cast
<Operator
>(I2P
), *DL
, TTI
))
474 cast
<Operator
>(I2P
->getOperand(0))->getOperand(0));
478 std::vector
<WeakTrackingVH
> Postorder
; // The resultant postorder.
479 while (!PostorderStack
.empty()) {
480 Value
*TopVal
= PostorderStack
.back().getPointer();
481 // If the operands of the expression on the top are already explored,
482 // adds that expression to the resultant postorder.
483 if (PostorderStack
.back().getInt()) {
484 if (TopVal
->getType()->getPointerAddressSpace() == FlatAddrSpace
)
485 Postorder
.push_back(TopVal
);
486 PostorderStack
.pop_back();
489 // Otherwise, adds its operands to the stack and explores them.
490 PostorderStack
.back().setInt(true);
491 // Skip values with an assumed address space.
492 if (TTI
->getAssumedAddrSpace(TopVal
) == UninitializedAddressSpace
) {
493 for (Value
*PtrOperand
: getPointerOperands(*TopVal
, *DL
, TTI
)) {
494 appendsFlatAddressExpressionToPostorderStack(PtrOperand
, PostorderStack
,
502 // A helper function for cloneInstructionWithNewAddressSpace. Returns the clone
503 // of OperandUse.get() in the new address space. If the clone is not ready yet,
504 // returns an undef in the new address space as a placeholder.
505 static Value
*operandWithNewAddressSpaceOrCreateUndef(
506 const Use
&OperandUse
, unsigned NewAddrSpace
,
507 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
508 SmallVectorImpl
<const Use
*> *UndefUsesToFix
) {
509 Value
*Operand
= OperandUse
.get();
511 Type
*NewPtrTy
= PointerType::getWithSamePointeeType(
512 cast
<PointerType
>(Operand
->getType()), NewAddrSpace
);
514 if (Constant
*C
= dyn_cast
<Constant
>(Operand
))
515 return ConstantExpr::getAddrSpaceCast(C
, NewPtrTy
);
517 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(Operand
))
520 UndefUsesToFix
->push_back(&OperandUse
);
521 return UndefValue::get(NewPtrTy
);
524 // Returns a clone of `I` with its operands converted to those specified in
525 // ValueWithNewAddrSpace. Due to potential cycles in the data flow graph, an
526 // operand whose address space needs to be modified might not exist in
527 // ValueWithNewAddrSpace. In that case, uses undef as a placeholder operand and
528 // adds that operand use to UndefUsesToFix so that caller can fix them later.
530 // Note that we do not necessarily clone `I`, e.g., if it is an addrspacecast
531 // from a pointer whose type already matches. Therefore, this function returns a
532 // Value* instead of an Instruction*.
534 // This may also return nullptr in the case the instruction could not be
536 Value
*InferAddressSpacesImpl::cloneInstructionWithNewAddressSpace(
537 Instruction
*I
, unsigned NewAddrSpace
,
538 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
539 SmallVectorImpl
<const Use
*> *UndefUsesToFix
) const {
540 Type
*NewPtrType
= PointerType::getWithSamePointeeType(
541 cast
<PointerType
>(I
->getType()), NewAddrSpace
);
543 if (I
->getOpcode() == Instruction::AddrSpaceCast
) {
544 Value
*Src
= I
->getOperand(0);
545 // Because `I` is flat, the source address space must be specific.
546 // Therefore, the inferred address space must be the source space, according
548 assert(Src
->getType()->getPointerAddressSpace() == NewAddrSpace
);
549 if (Src
->getType() != NewPtrType
)
550 return new BitCastInst(Src
, NewPtrType
);
554 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
555 // Technically the intrinsic ID is a pointer typed argument, so specially
556 // handle calls early.
557 assert(II
->getIntrinsicID() == Intrinsic::ptrmask
);
558 Value
*NewPtr
= operandWithNewAddressSpaceOrCreateUndef(
559 II
->getArgOperandUse(0), NewAddrSpace
, ValueWithNewAddrSpace
,
562 TTI
->rewriteIntrinsicWithAddressSpace(II
, II
->getArgOperand(0), NewPtr
);
564 assert(Rewrite
!= II
&& "cannot modify this pointer operation in place");
571 unsigned AS
= TTI
->getAssumedAddrSpace(I
);
572 if (AS
!= UninitializedAddressSpace
) {
573 // For the assumed address space, insert an `addrspacecast` to make that
575 Type
*NewPtrTy
= PointerType::getWithSamePointeeType(
576 cast
<PointerType
>(I
->getType()), AS
);
577 auto *NewI
= new AddrSpaceCastInst(I
, NewPtrTy
);
578 NewI
->insertAfter(I
);
582 // Computes the converted pointer operands.
583 SmallVector
<Value
*, 4> NewPointerOperands
;
584 for (const Use
&OperandUse
: I
->operands()) {
585 if (!OperandUse
.get()->getType()->isPointerTy())
586 NewPointerOperands
.push_back(nullptr);
588 NewPointerOperands
.push_back(operandWithNewAddressSpaceOrCreateUndef(
589 OperandUse
, NewAddrSpace
, ValueWithNewAddrSpace
, UndefUsesToFix
));
592 switch (I
->getOpcode()) {
593 case Instruction::BitCast
:
594 return new BitCastInst(NewPointerOperands
[0], NewPtrType
);
595 case Instruction::PHI
: {
596 assert(I
->getType()->isPointerTy());
597 PHINode
*PHI
= cast
<PHINode
>(I
);
598 PHINode
*NewPHI
= PHINode::Create(NewPtrType
, PHI
->getNumIncomingValues());
599 for (unsigned Index
= 0; Index
< PHI
->getNumIncomingValues(); ++Index
) {
600 unsigned OperandNo
= PHINode::getOperandNumForIncomingValue(Index
);
601 NewPHI
->addIncoming(NewPointerOperands
[OperandNo
],
602 PHI
->getIncomingBlock(Index
));
606 case Instruction::GetElementPtr
: {
607 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(I
);
608 GetElementPtrInst
*NewGEP
= GetElementPtrInst::Create(
609 GEP
->getSourceElementType(), NewPointerOperands
[0],
610 SmallVector
<Value
*, 4>(GEP
->indices()));
611 NewGEP
->setIsInBounds(GEP
->isInBounds());
614 case Instruction::Select
:
615 assert(I
->getType()->isPointerTy());
616 return SelectInst::Create(I
->getOperand(0), NewPointerOperands
[1],
617 NewPointerOperands
[2], "", nullptr, I
);
618 case Instruction::IntToPtr
: {
619 assert(isNoopPtrIntCastPair(cast
<Operator
>(I
), *DL
, TTI
));
620 Value
*Src
= cast
<Operator
>(I
->getOperand(0))->getOperand(0);
621 assert(Src
->getType()->getPointerAddressSpace() == NewAddrSpace
);
622 if (Src
->getType() != NewPtrType
)
623 return new BitCastInst(Src
, NewPtrType
);
627 llvm_unreachable("Unexpected opcode");
631 // Similar to cloneInstructionWithNewAddressSpace, returns a clone of the
632 // constant expression `CE` with its operands replaced as specified in
633 // ValueWithNewAddrSpace.
634 static Value
*cloneConstantExprWithNewAddressSpace(
635 ConstantExpr
*CE
, unsigned NewAddrSpace
,
636 const ValueToValueMapTy
&ValueWithNewAddrSpace
, const DataLayout
*DL
,
637 const TargetTransformInfo
*TTI
) {
638 Type
*TargetType
= CE
->getType()->isPointerTy()
639 ? PointerType::getWithSamePointeeType(
640 cast
<PointerType
>(CE
->getType()), NewAddrSpace
)
643 if (CE
->getOpcode() == Instruction::AddrSpaceCast
) {
644 // Because CE is flat, the source address space must be specific.
645 // Therefore, the inferred address space must be the source space according
647 assert(CE
->getOperand(0)->getType()->getPointerAddressSpace() ==
649 return ConstantExpr::getBitCast(CE
->getOperand(0), TargetType
);
652 if (CE
->getOpcode() == Instruction::BitCast
) {
653 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(CE
->getOperand(0)))
654 return ConstantExpr::getBitCast(cast
<Constant
>(NewOperand
), TargetType
);
655 return ConstantExpr::getAddrSpaceCast(CE
, TargetType
);
658 if (CE
->getOpcode() == Instruction::Select
) {
659 Constant
*Src0
= CE
->getOperand(1);
660 Constant
*Src1
= CE
->getOperand(2);
661 if (Src0
->getType()->getPointerAddressSpace() ==
662 Src1
->getType()->getPointerAddressSpace()) {
664 return ConstantExpr::getSelect(
665 CE
->getOperand(0), ConstantExpr::getAddrSpaceCast(Src0
, TargetType
),
666 ConstantExpr::getAddrSpaceCast(Src1
, TargetType
));
670 if (CE
->getOpcode() == Instruction::IntToPtr
) {
671 assert(isNoopPtrIntCastPair(cast
<Operator
>(CE
), *DL
, TTI
));
672 Constant
*Src
= cast
<ConstantExpr
>(CE
->getOperand(0))->getOperand(0);
673 assert(Src
->getType()->getPointerAddressSpace() == NewAddrSpace
);
674 return ConstantExpr::getBitCast(Src
, TargetType
);
677 // Computes the operands of the new constant expression.
679 SmallVector
<Constant
*, 4> NewOperands
;
680 for (unsigned Index
= 0; Index
< CE
->getNumOperands(); ++Index
) {
681 Constant
*Operand
= CE
->getOperand(Index
);
682 // If the address space of `Operand` needs to be modified, the new operand
683 // with the new address space should already be in ValueWithNewAddrSpace
684 // because (1) the constant expressions we consider (i.e. addrspacecast,
685 // bitcast, and getelementptr) do not incur cycles in the data flow graph
686 // and (2) this function is called on constant expressions in postorder.
687 if (Value
*NewOperand
= ValueWithNewAddrSpace
.lookup(Operand
)) {
689 NewOperands
.push_back(cast
<Constant
>(NewOperand
));
692 if (auto CExpr
= dyn_cast
<ConstantExpr
>(Operand
))
693 if (Value
*NewOperand
= cloneConstantExprWithNewAddressSpace(
694 CExpr
, NewAddrSpace
, ValueWithNewAddrSpace
, DL
, TTI
)) {
696 NewOperands
.push_back(cast
<Constant
>(NewOperand
));
699 // Otherwise, reuses the old operand.
700 NewOperands
.push_back(Operand
);
703 // If !IsNew, we will replace the Value with itself. However, replaced values
704 // are assumed to wrapped in a addrspace cast later so drop it now.
708 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
709 // Needs to specify the source type while constructing a getelementptr
710 // constant expression.
711 return CE
->getWithOperands(
712 NewOperands
, TargetType
, /*OnlyIfReduced=*/false,
713 NewOperands
[0]->getType()->getPointerElementType());
716 return CE
->getWithOperands(NewOperands
, TargetType
);
719 // Returns a clone of the value `V`, with its operands replaced as specified in
720 // ValueWithNewAddrSpace. This function is called on every flat address
721 // expression whose address space needs to be modified, in postorder.
723 // See cloneInstructionWithNewAddressSpace for the meaning of UndefUsesToFix.
724 Value
*InferAddressSpacesImpl::cloneValueWithNewAddressSpace(
725 Value
*V
, unsigned NewAddrSpace
,
726 const ValueToValueMapTy
&ValueWithNewAddrSpace
,
727 SmallVectorImpl
<const Use
*> *UndefUsesToFix
) const {
728 // All values in Postorder are flat address expressions.
729 assert(V
->getType()->getPointerAddressSpace() == FlatAddrSpace
&&
730 isAddressExpression(*V
, *DL
, TTI
));
732 if (Instruction
*I
= dyn_cast
<Instruction
>(V
)) {
733 Value
*NewV
= cloneInstructionWithNewAddressSpace(
734 I
, NewAddrSpace
, ValueWithNewAddrSpace
, UndefUsesToFix
);
735 if (Instruction
*NewI
= dyn_cast_or_null
<Instruction
>(NewV
)) {
736 if (NewI
->getParent() == nullptr) {
737 NewI
->insertBefore(I
);
744 return cloneConstantExprWithNewAddressSpace(
745 cast
<ConstantExpr
>(V
), NewAddrSpace
, ValueWithNewAddrSpace
, DL
, TTI
);
748 // Defines the join operation on the address space lattice (see the file header
750 unsigned InferAddressSpacesImpl::joinAddressSpaces(unsigned AS1
,
751 unsigned AS2
) const {
752 if (AS1
== FlatAddrSpace
|| AS2
== FlatAddrSpace
)
753 return FlatAddrSpace
;
755 if (AS1
== UninitializedAddressSpace
)
757 if (AS2
== UninitializedAddressSpace
)
760 // The join of two different specific address spaces is flat.
761 return (AS1
== AS2
) ? AS1
: FlatAddrSpace
;
764 bool InferAddressSpacesImpl::run(Function
&F
) {
765 DL
= &F
.getParent()->getDataLayout();
767 if (AssumeDefaultIsFlatAddressSpace
)
770 if (FlatAddrSpace
== UninitializedAddressSpace
) {
771 FlatAddrSpace
= TTI
->getFlatAddressSpace();
772 if (FlatAddrSpace
== UninitializedAddressSpace
)
776 // Collects all flat address expressions in postorder.
777 std::vector
<WeakTrackingVH
> Postorder
= collectFlatAddressExpressions(F
);
779 // Runs a data-flow analysis to refine the address spaces of every expression
781 ValueToAddrSpaceMapTy InferredAddrSpace
;
782 inferAddressSpaces(Postorder
, &InferredAddrSpace
);
784 // Changes the address spaces of the flat address expressions who are inferred
785 // to point to a specific address space.
786 return rewriteWithNewAddressSpaces(*TTI
, Postorder
, InferredAddrSpace
, &F
);
789 // Constants need to be tracked through RAUW to handle cases with nested
790 // constant expressions, so wrap values in WeakTrackingVH.
791 void InferAddressSpacesImpl::inferAddressSpaces(
792 ArrayRef
<WeakTrackingVH
> Postorder
,
793 ValueToAddrSpaceMapTy
*InferredAddrSpace
) const {
794 SetVector
<Value
*> Worklist(Postorder
.begin(), Postorder
.end());
795 // Initially, all expressions are in the uninitialized address space.
796 for (Value
*V
: Postorder
)
797 (*InferredAddrSpace
)[V
] = UninitializedAddressSpace
;
799 while (!Worklist
.empty()) {
800 Value
*V
= Worklist
.pop_back_val();
802 // Tries to update the address space of the stack top according to the
803 // address spaces of its operands.
804 LLVM_DEBUG(dbgs() << "Updating the address space of\n " << *V
<< '\n');
805 Optional
<unsigned> NewAS
= updateAddressSpace(*V
, *InferredAddrSpace
);
806 if (!NewAS
.hasValue())
808 // If any updates are made, grabs its users to the worklist because
809 // their address spaces can also be possibly updated.
810 LLVM_DEBUG(dbgs() << " to " << NewAS
.getValue() << '\n');
811 (*InferredAddrSpace
)[V
] = NewAS
.getValue();
813 for (Value
*User
: V
->users()) {
814 // Skip if User is already in the worklist.
815 if (Worklist
.count(User
))
818 auto Pos
= InferredAddrSpace
->find(User
);
819 // Our algorithm only updates the address spaces of flat address
820 // expressions, which are those in InferredAddrSpace.
821 if (Pos
== InferredAddrSpace
->end())
824 // Function updateAddressSpace moves the address space down a lattice
825 // path. Therefore, nothing to do if User is already inferred as flat (the
826 // bottom element in the lattice).
827 if (Pos
->second
== FlatAddrSpace
)
830 Worklist
.insert(User
);
835 Optional
<unsigned> InferAddressSpacesImpl::updateAddressSpace(
836 const Value
&V
, const ValueToAddrSpaceMapTy
&InferredAddrSpace
) const {
837 assert(InferredAddrSpace
.count(&V
));
839 // The new inferred address space equals the join of the address spaces
840 // of all its pointer operands.
841 unsigned NewAS
= UninitializedAddressSpace
;
843 const Operator
&Op
= cast
<Operator
>(V
);
844 if (Op
.getOpcode() == Instruction::Select
) {
845 Value
*Src0
= Op
.getOperand(1);
846 Value
*Src1
= Op
.getOperand(2);
848 auto I
= InferredAddrSpace
.find(Src0
);
849 unsigned Src0AS
= (I
!= InferredAddrSpace
.end()) ?
850 I
->second
: Src0
->getType()->getPointerAddressSpace();
852 auto J
= InferredAddrSpace
.find(Src1
);
853 unsigned Src1AS
= (J
!= InferredAddrSpace
.end()) ?
854 J
->second
: Src1
->getType()->getPointerAddressSpace();
856 auto *C0
= dyn_cast
<Constant
>(Src0
);
857 auto *C1
= dyn_cast
<Constant
>(Src1
);
859 // If one of the inputs is a constant, we may be able to do a constant
860 // addrspacecast of it. Defer inferring the address space until the input
861 // address space is known.
862 if ((C1
&& Src0AS
== UninitializedAddressSpace
) ||
863 (C0
&& Src1AS
== UninitializedAddressSpace
))
866 if (C0
&& isSafeToCastConstAddrSpace(C0
, Src1AS
))
868 else if (C1
&& isSafeToCastConstAddrSpace(C1
, Src0AS
))
871 NewAS
= joinAddressSpaces(Src0AS
, Src1AS
);
873 unsigned AS
= TTI
->getAssumedAddrSpace(&V
);
874 if (AS
!= UninitializedAddressSpace
) {
875 // Use the assumed address space directly.
878 // Otherwise, infer the address space from its pointer operands.
879 for (Value
*PtrOperand
: getPointerOperands(V
, *DL
, TTI
)) {
880 auto I
= InferredAddrSpace
.find(PtrOperand
);
882 I
!= InferredAddrSpace
.end()
884 : PtrOperand
->getType()->getPointerAddressSpace();
886 // join(flat, *) = flat. So we can break if NewAS is already flat.
887 NewAS
= joinAddressSpaces(NewAS
, OperandAS
);
888 if (NewAS
== FlatAddrSpace
)
894 unsigned OldAS
= InferredAddrSpace
.lookup(&V
);
895 assert(OldAS
!= FlatAddrSpace
);
901 /// \p returns true if \p U is the pointer operand of a memory instruction with
902 /// a single pointer operand that can have its address space changed by simply
903 /// mutating the use to a new value. If the memory instruction is volatile,
904 /// return true only if the target allows the memory instruction to be volatile
905 /// in the new address space.
906 static bool isSimplePointerUseValidToReplace(const TargetTransformInfo
&TTI
,
907 Use
&U
, unsigned AddrSpace
) {
908 User
*Inst
= U
.getUser();
909 unsigned OpNo
= U
.getOperandNo();
910 bool VolatileIsAllowed
= false;
911 if (auto *I
= dyn_cast
<Instruction
>(Inst
))
912 VolatileIsAllowed
= TTI
.hasVolatileVariant(I
, AddrSpace
);
914 if (auto *LI
= dyn_cast
<LoadInst
>(Inst
))
915 return OpNo
== LoadInst::getPointerOperandIndex() &&
916 (VolatileIsAllowed
|| !LI
->isVolatile());
918 if (auto *SI
= dyn_cast
<StoreInst
>(Inst
))
919 return OpNo
== StoreInst::getPointerOperandIndex() &&
920 (VolatileIsAllowed
|| !SI
->isVolatile());
922 if (auto *RMW
= dyn_cast
<AtomicRMWInst
>(Inst
))
923 return OpNo
== AtomicRMWInst::getPointerOperandIndex() &&
924 (VolatileIsAllowed
|| !RMW
->isVolatile());
926 if (auto *CmpX
= dyn_cast
<AtomicCmpXchgInst
>(Inst
))
927 return OpNo
== AtomicCmpXchgInst::getPointerOperandIndex() &&
928 (VolatileIsAllowed
|| !CmpX
->isVolatile());
933 /// Update memory intrinsic uses that require more complex processing than
934 /// simple memory instructions. Thse require re-mangling and may have multiple
935 /// pointer operands.
936 static bool handleMemIntrinsicPtrUse(MemIntrinsic
*MI
, Value
*OldV
,
939 MDNode
*TBAA
= MI
->getMetadata(LLVMContext::MD_tbaa
);
940 MDNode
*ScopeMD
= MI
->getMetadata(LLVMContext::MD_alias_scope
);
941 MDNode
*NoAliasMD
= MI
->getMetadata(LLVMContext::MD_noalias
);
943 if (auto *MSI
= dyn_cast
<MemSetInst
>(MI
)) {
944 B
.CreateMemSet(NewV
, MSI
->getValue(), MSI
->getLength(),
945 MaybeAlign(MSI
->getDestAlignment()),
947 TBAA
, ScopeMD
, NoAliasMD
);
948 } else if (auto *MTI
= dyn_cast
<MemTransferInst
>(MI
)) {
949 Value
*Src
= MTI
->getRawSource();
950 Value
*Dest
= MTI
->getRawDest();
952 // Be careful in case this is a self-to-self copy.
959 if (isa
<MemCpyInlineInst
>(MTI
)) {
960 MDNode
*TBAAStruct
= MTI
->getMetadata(LLVMContext::MD_tbaa_struct
);
961 B
.CreateMemCpyInline(Dest
, MTI
->getDestAlign(), Src
,
962 MTI
->getSourceAlign(), MTI
->getLength(),
964 TBAA
, TBAAStruct
, ScopeMD
, NoAliasMD
);
965 } else if (isa
<MemCpyInst
>(MTI
)) {
966 MDNode
*TBAAStruct
= MTI
->getMetadata(LLVMContext::MD_tbaa_struct
);
967 B
.CreateMemCpy(Dest
, MTI
->getDestAlign(), Src
, MTI
->getSourceAlign(),
970 TBAA
, TBAAStruct
, ScopeMD
, NoAliasMD
);
972 assert(isa
<MemMoveInst
>(MTI
));
973 B
.CreateMemMove(Dest
, MTI
->getDestAlign(), Src
, MTI
->getSourceAlign(),
976 TBAA
, ScopeMD
, NoAliasMD
);
979 llvm_unreachable("unhandled MemIntrinsic");
981 MI
->eraseFromParent();
985 // \p returns true if it is OK to change the address space of constant \p C with
986 // a ConstantExpr addrspacecast.
987 bool InferAddressSpacesImpl::isSafeToCastConstAddrSpace(Constant
*C
,
988 unsigned NewAS
) const {
989 assert(NewAS
!= UninitializedAddressSpace
);
991 unsigned SrcAS
= C
->getType()->getPointerAddressSpace();
992 if (SrcAS
== NewAS
|| isa
<UndefValue
>(C
))
995 // Prevent illegal casts between different non-flat address spaces.
996 if (SrcAS
!= FlatAddrSpace
&& NewAS
!= FlatAddrSpace
)
999 if (isa
<ConstantPointerNull
>(C
))
1002 if (auto *Op
= dyn_cast
<Operator
>(C
)) {
1003 // If we already have a constant addrspacecast, it should be safe to cast it
1005 if (Op
->getOpcode() == Instruction::AddrSpaceCast
)
1006 return isSafeToCastConstAddrSpace(cast
<Constant
>(Op
->getOperand(0)), NewAS
);
1008 if (Op
->getOpcode() == Instruction::IntToPtr
&&
1009 Op
->getType()->getPointerAddressSpace() == FlatAddrSpace
)
1016 static Value::use_iterator
skipToNextUser(Value::use_iterator I
,
1017 Value::use_iterator End
) {
1018 User
*CurUser
= I
->getUser();
1021 while (I
!= End
&& I
->getUser() == CurUser
)
1027 bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
1028 const TargetTransformInfo
&TTI
, ArrayRef
<WeakTrackingVH
> Postorder
,
1029 const ValueToAddrSpaceMapTy
&InferredAddrSpace
, Function
*F
) const {
1030 // For each address expression to be modified, creates a clone of it with its
1031 // pointer operands converted to the new address space. Since the pointer
1032 // operands are converted, the clone is naturally in the new address space by
1034 ValueToValueMapTy ValueWithNewAddrSpace
;
1035 SmallVector
<const Use
*, 32> UndefUsesToFix
;
1036 for (Value
* V
: Postorder
) {
1037 unsigned NewAddrSpace
= InferredAddrSpace
.lookup(V
);
1039 // In some degenerate cases (e.g. invalid IR in unreachable code), we may
1040 // not even infer the value to have its original address space.
1041 if (NewAddrSpace
== UninitializedAddressSpace
)
1044 if (V
->getType()->getPointerAddressSpace() != NewAddrSpace
) {
1045 Value
*New
= cloneValueWithNewAddressSpace(
1046 V
, NewAddrSpace
, ValueWithNewAddrSpace
, &UndefUsesToFix
);
1048 ValueWithNewAddrSpace
[V
] = New
;
1052 if (ValueWithNewAddrSpace
.empty())
1055 // Fixes all the undef uses generated by cloneInstructionWithNewAddressSpace.
1056 for (const Use
*UndefUse
: UndefUsesToFix
) {
1057 User
*V
= UndefUse
->getUser();
1058 User
*NewV
= cast_or_null
<User
>(ValueWithNewAddrSpace
.lookup(V
));
1062 unsigned OperandNo
= UndefUse
->getOperandNo();
1063 assert(isa
<UndefValue
>(NewV
->getOperand(OperandNo
)));
1064 NewV
->setOperand(OperandNo
, ValueWithNewAddrSpace
.lookup(UndefUse
->get()));
1067 SmallVector
<Instruction
*, 16> DeadInstructions
;
1069 // Replaces the uses of the old address expressions with the new ones.
1070 for (const WeakTrackingVH
&WVH
: Postorder
) {
1071 assert(WVH
&& "value was unexpectedly deleted");
1073 Value
*NewV
= ValueWithNewAddrSpace
.lookup(V
);
1074 if (NewV
== nullptr)
1077 LLVM_DEBUG(dbgs() << "Replacing the uses of " << *V
<< "\n with\n "
1080 if (Constant
*C
= dyn_cast
<Constant
>(V
)) {
1081 Constant
*Replace
= ConstantExpr::getAddrSpaceCast(cast
<Constant
>(NewV
),
1084 LLVM_DEBUG(dbgs() << "Inserting replacement const cast: " << Replace
1085 << ": " << *Replace
<< '\n');
1086 C
->replaceAllUsesWith(Replace
);
1091 Value::use_iterator I
, E
, Next
;
1092 for (I
= V
->use_begin(), E
= V
->use_end(); I
!= E
; ) {
1095 // Some users may see the same pointer operand in multiple operands. Skip
1096 // to the next instruction.
1097 I
= skipToNextUser(I
, E
);
1099 if (isSimplePointerUseValidToReplace(
1100 TTI
, U
, V
->getType()->getPointerAddressSpace())) {
1101 // If V is used as the pointer operand of a compatible memory operation,
1102 // sets the pointer operand to NewV. This replacement does not change
1103 // the element type, so the resultant load/store is still valid.
1108 User
*CurUser
= U
.getUser();
1109 // Skip if the current user is the new value itself.
1110 if (CurUser
== NewV
)
1112 // Handle more complex cases like intrinsic that need to be remangled.
1113 if (auto *MI
= dyn_cast
<MemIntrinsic
>(CurUser
)) {
1114 if (!MI
->isVolatile() && handleMemIntrinsicPtrUse(MI
, V
, NewV
))
1118 if (auto *II
= dyn_cast
<IntrinsicInst
>(CurUser
)) {
1119 if (rewriteIntrinsicOperands(II
, V
, NewV
))
1123 if (isa
<Instruction
>(CurUser
)) {
1124 if (ICmpInst
*Cmp
= dyn_cast
<ICmpInst
>(CurUser
)) {
1125 // If we can infer that both pointers are in the same addrspace,
1127 // %cmp = icmp eq float* %p, %q
1129 // %cmp = icmp eq float addrspace(3)* %new_p, %new_q
1131 unsigned NewAS
= NewV
->getType()->getPointerAddressSpace();
1132 int SrcIdx
= U
.getOperandNo();
1133 int OtherIdx
= (SrcIdx
== 0) ? 1 : 0;
1134 Value
*OtherSrc
= Cmp
->getOperand(OtherIdx
);
1136 if (Value
*OtherNewV
= ValueWithNewAddrSpace
.lookup(OtherSrc
)) {
1137 if (OtherNewV
->getType()->getPointerAddressSpace() == NewAS
) {
1138 Cmp
->setOperand(OtherIdx
, OtherNewV
);
1139 Cmp
->setOperand(SrcIdx
, NewV
);
1144 // Even if the type mismatches, we can cast the constant.
1145 if (auto *KOtherSrc
= dyn_cast
<Constant
>(OtherSrc
)) {
1146 if (isSafeToCastConstAddrSpace(KOtherSrc
, NewAS
)) {
1147 Cmp
->setOperand(SrcIdx
, NewV
);
1148 Cmp
->setOperand(OtherIdx
,
1149 ConstantExpr::getAddrSpaceCast(KOtherSrc
, NewV
->getType()));
1155 if (AddrSpaceCastInst
*ASC
= dyn_cast
<AddrSpaceCastInst
>(CurUser
)) {
1156 unsigned NewAS
= NewV
->getType()->getPointerAddressSpace();
1157 if (ASC
->getDestAddressSpace() == NewAS
) {
1158 if (ASC
->getType()->getPointerElementType() !=
1159 NewV
->getType()->getPointerElementType()) {
1160 NewV
= CastInst::Create(Instruction::BitCast
, NewV
,
1161 ASC
->getType(), "", ASC
);
1163 ASC
->replaceAllUsesWith(NewV
);
1164 DeadInstructions
.push_back(ASC
);
1169 // Otherwise, replaces the use with flat(NewV).
1170 if (Instruction
*Inst
= dyn_cast
<Instruction
>(V
)) {
1171 // Don't create a copy of the original addrspacecast.
1172 if (U
== V
&& isa
<AddrSpaceCastInst
>(V
))
1175 BasicBlock::iterator InsertPos
= std::next(Inst
->getIterator());
1176 while (isa
<PHINode
>(InsertPos
))
1178 U
.set(new AddrSpaceCastInst(NewV
, V
->getType(), "", &*InsertPos
));
1180 U
.set(ConstantExpr::getAddrSpaceCast(cast
<Constant
>(NewV
),
1186 if (V
->use_empty()) {
1187 if (Instruction
*I
= dyn_cast
<Instruction
>(V
))
1188 DeadInstructions
.push_back(I
);
1192 for (Instruction
*I
: DeadInstructions
)
1193 RecursivelyDeleteTriviallyDeadInstructions(I
);
1198 bool InferAddressSpaces::runOnFunction(Function
&F
) {
1199 if (skipFunction(F
))
1202 return InferAddressSpacesImpl(
1203 &getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
),
1208 FunctionPass
*llvm::createInferAddressSpacesPass(unsigned AddressSpace
) {
1209 return new InferAddressSpaces(AddressSpace
);
1212 InferAddressSpacesPass::InferAddressSpacesPass()
1213 : FlatAddrSpace(UninitializedAddressSpace
) {}
1214 InferAddressSpacesPass::InferAddressSpacesPass(unsigned AddressSpace
)
1215 : FlatAddrSpace(AddressSpace
) {}
1217 PreservedAnalyses
InferAddressSpacesPass::run(Function
&F
,
1218 FunctionAnalysisManager
&AM
) {
1220 InferAddressSpacesImpl(&AM
.getResult
<TargetIRAnalysis
>(F
), FlatAddrSpace
)
1223 PreservedAnalyses PA
;
1224 PA
.preserveSet
<CFGAnalyses
>();
1227 return PreservedAnalyses::all();