Silence -Wunused-variable in release builds.
[llvm/stm8.git] / lib / Transforms / Scalar / ScalarReplAggregates.cpp
blob7d6349cf4e7766c3ff1826cb2474fd13330ab8a9
1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/LLVMContext.h"
31 #include "llvm/Module.h"
32 #include "llvm/Pass.h"
33 #include "llvm/Analysis/DebugInfo.h"
34 #include "llvm/Analysis/DIBuilder.h"
35 #include "llvm/Analysis/Dominators.h"
36 #include "llvm/Analysis/Loads.h"
37 #include "llvm/Analysis/ValueTracking.h"
38 #include "llvm/Target/TargetData.h"
39 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 #include "llvm/Transforms/Utils/SSAUpdater.h"
42 #include "llvm/Support/CallSite.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/ErrorHandling.h"
45 #include "llvm/Support/GetElementPtrTypeIterator.h"
46 #include "llvm/Support/IRBuilder.h"
47 #include "llvm/Support/MathExtras.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/ADT/SetVector.h"
50 #include "llvm/ADT/SmallVector.h"
51 #include "llvm/ADT/Statistic.h"
52 using namespace llvm;
54 STATISTIC(NumReplaced, "Number of allocas broken up");
55 STATISTIC(NumPromoted, "Number of allocas promoted");
56 STATISTIC(NumAdjusted, "Number of scalar allocas adjusted to allow promotion");
57 STATISTIC(NumConverted, "Number of aggregates converted to scalar");
58 STATISTIC(NumGlobals, "Number of allocas copied from constant global");
60 namespace {
61 struct SROA : public FunctionPass {
62 SROA(int T, bool hasDT, char &ID)
63 : FunctionPass(ID), HasDomTree(hasDT) {
64 if (T == -1)
65 SRThreshold = 128;
66 else
67 SRThreshold = T;
70 bool runOnFunction(Function &F);
72 bool performScalarRepl(Function &F);
73 bool performPromotion(Function &F);
75 private:
76 bool HasDomTree;
77 TargetData *TD;
79 /// DeadInsts - Keep track of instructions we have made dead, so that
80 /// we can remove them after we are done working.
81 SmallVector<Value*, 32> DeadInsts;
83 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
84 /// information about the uses. All these fields are initialized to false
85 /// and set to true when something is learned.
86 struct AllocaInfo {
87 /// The alloca to promote.
88 AllocaInst *AI;
90 /// CheckedPHIs - This is a set of verified PHI nodes, to prevent infinite
91 /// looping and avoid redundant work.
92 SmallPtrSet<PHINode*, 8> CheckedPHIs;
94 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
95 bool isUnsafe : 1;
97 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
98 bool isMemCpySrc : 1;
100 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
101 bool isMemCpyDst : 1;
103 /// hasSubelementAccess - This is true if a subelement of the alloca is
104 /// ever accessed, or false if the alloca is only accessed with mem
105 /// intrinsics or load/store that only access the entire alloca at once.
106 bool hasSubelementAccess : 1;
108 /// hasALoadOrStore - This is true if there are any loads or stores to it.
109 /// The alloca may just be accessed with memcpy, for example, which would
110 /// not set this.
111 bool hasALoadOrStore : 1;
113 explicit AllocaInfo(AllocaInst *ai)
114 : AI(ai), isUnsafe(false), isMemCpySrc(false), isMemCpyDst(false),
115 hasSubelementAccess(false), hasALoadOrStore(false) {}
118 unsigned SRThreshold;
120 void MarkUnsafe(AllocaInfo &I, Instruction *User) {
121 I.isUnsafe = true;
122 DEBUG(dbgs() << " Transformation preventing inst: " << *User << '\n');
125 bool isSafeAllocaToScalarRepl(AllocaInst *AI);
127 void isSafeForScalarRepl(Instruction *I, uint64_t Offset, AllocaInfo &Info);
128 void isSafePHISelectUseForScalarRepl(Instruction *User, uint64_t Offset,
129 AllocaInfo &Info);
130 void isSafeGEP(GetElementPtrInst *GEPI, uint64_t &Offset, AllocaInfo &Info);
131 void isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
132 const Type *MemOpType, bool isStore, AllocaInfo &Info,
133 Instruction *TheAccess, bool AllowWholeAccess);
134 bool TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size);
135 uint64_t FindElementAndOffset(const Type *&T, uint64_t &Offset,
136 const Type *&IdxTy);
138 void DoScalarReplacement(AllocaInst *AI,
139 std::vector<AllocaInst*> &WorkList);
140 void DeleteDeadInstructions();
142 void RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
143 SmallVector<AllocaInst*, 32> &NewElts);
144 void RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
145 SmallVector<AllocaInst*, 32> &NewElts);
146 void RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
147 SmallVector<AllocaInst*, 32> &NewElts);
148 void RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
149 AllocaInst *AI,
150 SmallVector<AllocaInst*, 32> &NewElts);
151 void RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
152 SmallVector<AllocaInst*, 32> &NewElts);
153 void RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
154 SmallVector<AllocaInst*, 32> &NewElts);
156 static MemTransferInst *isOnlyCopiedFromConstantGlobal(
157 AllocaInst *AI, SmallVector<Instruction*, 4> &ToDelete);
160 // SROA_DT - SROA that uses DominatorTree.
161 struct SROA_DT : public SROA {
162 static char ID;
163 public:
164 SROA_DT(int T = -1) : SROA(T, true, ID) {
165 initializeSROA_DTPass(*PassRegistry::getPassRegistry());
168 // getAnalysisUsage - This pass does not require any passes, but we know it
169 // will not alter the CFG, so say so.
170 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
171 AU.addRequired<DominatorTree>();
172 AU.setPreservesCFG();
176 // SROA_SSAUp - SROA that uses SSAUpdater.
177 struct SROA_SSAUp : public SROA {
178 static char ID;
179 public:
180 SROA_SSAUp(int T = -1) : SROA(T, false, ID) {
181 initializeSROA_SSAUpPass(*PassRegistry::getPassRegistry());
184 // getAnalysisUsage - This pass does not require any passes, but we know it
185 // will not alter the CFG, so say so.
186 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
187 AU.setPreservesCFG();
193 char SROA_DT::ID = 0;
194 char SROA_SSAUp::ID = 0;
196 INITIALIZE_PASS_BEGIN(SROA_DT, "scalarrepl",
197 "Scalar Replacement of Aggregates (DT)", false, false)
198 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
199 INITIALIZE_PASS_END(SROA_DT, "scalarrepl",
200 "Scalar Replacement of Aggregates (DT)", false, false)
202 INITIALIZE_PASS_BEGIN(SROA_SSAUp, "scalarrepl-ssa",
203 "Scalar Replacement of Aggregates (SSAUp)", false, false)
204 INITIALIZE_PASS_END(SROA_SSAUp, "scalarrepl-ssa",
205 "Scalar Replacement of Aggregates (SSAUp)", false, false)
207 // Public interface to the ScalarReplAggregates pass
208 FunctionPass *llvm::createScalarReplAggregatesPass(int Threshold,
209 bool UseDomTree) {
210 if (UseDomTree)
211 return new SROA_DT(Threshold);
212 return new SROA_SSAUp(Threshold);
216 //===----------------------------------------------------------------------===//
217 // Convert To Scalar Optimization.
218 //===----------------------------------------------------------------------===//
220 namespace {
221 /// ConvertToScalarInfo - This class implements the "Convert To Scalar"
222 /// optimization, which scans the uses of an alloca and determines if it can
223 /// rewrite it in terms of a single new alloca that can be mem2reg'd.
224 class ConvertToScalarInfo {
225 /// AllocaSize - The size of the alloca being considered in bytes.
226 unsigned AllocaSize;
227 const TargetData &TD;
229 /// IsNotTrivial - This is set to true if there is some access to the object
230 /// which means that mem2reg can't promote it.
231 bool IsNotTrivial;
233 /// ScalarKind - Tracks the kind of alloca being considered for promotion,
234 /// computed based on the uses of the alloca rather than the LLVM type system.
235 enum {
236 Unknown,
238 // Accesses via GEPs that are consistent with element access of a vector
239 // type. This will not be converted into a vector unless there is a later
240 // access using an actual vector type.
241 ImplicitVector,
243 // Accesses via vector operations and GEPs that are consistent with the
244 // layout of a vector type.
245 Vector,
247 // An integer bag-of-bits with bitwise operations for insertion and
248 // extraction. Any combination of types can be converted into this kind
249 // of scalar.
250 Integer
251 } ScalarKind;
253 /// VectorTy - This tracks the type that we should promote the vector to if
254 /// it is possible to turn it into a vector. This starts out null, and if it
255 /// isn't possible to turn into a vector type, it gets set to VoidTy.
256 const VectorType *VectorTy;
258 /// HadNonMemTransferAccess - True if there is at least one access to the
259 /// alloca that is not a MemTransferInst. We don't want to turn structs into
260 /// large integers unless there is some potential for optimization.
261 bool HadNonMemTransferAccess;
263 public:
264 explicit ConvertToScalarInfo(unsigned Size, const TargetData &td)
265 : AllocaSize(Size), TD(td), IsNotTrivial(false), ScalarKind(Unknown),
266 VectorTy(0), HadNonMemTransferAccess(false) { }
268 AllocaInst *TryConvert(AllocaInst *AI);
270 private:
271 bool CanConvertToScalar(Value *V, uint64_t Offset);
272 void MergeInTypeForLoadOrStore(const Type *In, uint64_t Offset);
273 bool MergeInVectorType(const VectorType *VInTy, uint64_t Offset);
274 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, uint64_t Offset);
276 Value *ConvertScalar_ExtractValue(Value *NV, const Type *ToType,
277 uint64_t Offset, IRBuilder<> &Builder);
278 Value *ConvertScalar_InsertValue(Value *StoredVal, Value *ExistingVal,
279 uint64_t Offset, IRBuilder<> &Builder);
281 } // end anonymous namespace.
284 /// TryConvert - Analyze the specified alloca, and if it is safe to do so,
285 /// rewrite it to be a new alloca which is mem2reg'able. This returns the new
286 /// alloca if possible or null if not.
287 AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) {
288 // If we can't convert this scalar, or if mem2reg can trivially do it, bail
289 // out.
290 if (!CanConvertToScalar(AI, 0) || !IsNotTrivial)
291 return 0;
293 // If an alloca has only memset / memcpy uses, it may still have an Unknown
294 // ScalarKind. Treat it as an Integer below.
295 if (ScalarKind == Unknown)
296 ScalarKind = Integer;
298 // FIXME: It should be possible to promote the vector type up to the alloca's
299 // size.
300 if (ScalarKind == Vector && VectorTy->getBitWidth() != AllocaSize * 8)
301 ScalarKind = Integer;
303 // If we were able to find a vector type that can handle this with
304 // insert/extract elements, and if there was at least one use that had
305 // a vector type, promote this to a vector. We don't want to promote
306 // random stuff that doesn't use vectors (e.g. <9 x double>) because then
307 // we just get a lot of insert/extracts. If at least one vector is
308 // involved, then we probably really do have a union of vector/array.
309 const Type *NewTy;
310 if (ScalarKind == Vector) {
311 assert(VectorTy && "Missing type for vector scalar.");
312 DEBUG(dbgs() << "CONVERT TO VECTOR: " << *AI << "\n TYPE = "
313 << *VectorTy << '\n');
314 NewTy = VectorTy; // Use the vector type.
315 } else {
316 unsigned BitWidth = AllocaSize * 8;
317 if ((ScalarKind == ImplicitVector || ScalarKind == Integer) &&
318 !HadNonMemTransferAccess && !TD.fitsInLegalInteger(BitWidth))
319 return 0;
321 DEBUG(dbgs() << "CONVERT TO SCALAR INTEGER: " << *AI << "\n");
322 // Create and insert the integer alloca.
323 NewTy = IntegerType::get(AI->getContext(), BitWidth);
325 AllocaInst *NewAI = new AllocaInst(NewTy, 0, "", AI->getParent()->begin());
326 ConvertUsesToScalar(AI, NewAI, 0);
327 return NewAI;
330 /// MergeInTypeForLoadOrStore - Add the 'In' type to the accumulated vector type
331 /// (VectorTy) so far at the offset specified by Offset (which is specified in
332 /// bytes).
334 /// There are three cases we handle here:
335 /// 1) A union of vector types of the same size and potentially its elements.
336 /// Here we turn element accesses into insert/extract element operations.
337 /// This promotes a <4 x float> with a store of float to the third element
338 /// into a <4 x float> that uses insert element.
339 /// 2) A union of vector types with power-of-2 size differences, e.g. a float,
340 /// <2 x float> and <4 x float>. Here we turn element accesses into insert
341 /// and extract element operations, and <2 x float> accesses into a cast to
342 /// <2 x double>, an extract, and a cast back to <2 x float>.
343 /// 3) A fully general blob of memory, which we turn into some (potentially
344 /// large) integer type with extract and insert operations where the loads
345 /// and stores would mutate the memory. We mark this by setting VectorTy
346 /// to VoidTy.
347 void ConvertToScalarInfo::MergeInTypeForLoadOrStore(const Type *In,
348 uint64_t Offset) {
349 // If we already decided to turn this into a blob of integer memory, there is
350 // nothing to be done.
351 if (ScalarKind == Integer)
352 return;
354 // If this could be contributing to a vector, analyze it.
356 // If the In type is a vector that is the same size as the alloca, see if it
357 // matches the existing VecTy.
358 if (const VectorType *VInTy = dyn_cast<VectorType>(In)) {
359 if (MergeInVectorType(VInTy, Offset))
360 return;
361 } else if (In->isFloatTy() || In->isDoubleTy() ||
362 (In->isIntegerTy() && In->getPrimitiveSizeInBits() >= 8 &&
363 isPowerOf2_32(In->getPrimitiveSizeInBits()))) {
364 // Full width accesses can be ignored, because they can always be turned
365 // into bitcasts.
366 unsigned EltSize = In->getPrimitiveSizeInBits()/8;
367 if (EltSize == AllocaSize)
368 return;
370 // If we're accessing something that could be an element of a vector, see
371 // if the implied vector agrees with what we already have and if Offset is
372 // compatible with it.
373 if (Offset % EltSize == 0 && AllocaSize % EltSize == 0 &&
374 (!VectorTy || Offset * 8 < VectorTy->getPrimitiveSizeInBits())) {
375 if (!VectorTy) {
376 ScalarKind = ImplicitVector;
377 VectorTy = VectorType::get(In, AllocaSize/EltSize);
378 return;
381 unsigned CurrentEltSize = VectorTy->getElementType()
382 ->getPrimitiveSizeInBits()/8;
383 if (EltSize == CurrentEltSize)
384 return;
386 if (In->isIntegerTy() && isPowerOf2_32(AllocaSize / EltSize))
387 return;
391 // Otherwise, we have a case that we can't handle with an optimized vector
392 // form. We can still turn this into a large integer.
393 ScalarKind = Integer;
396 /// MergeInVectorType - Handles the vector case of MergeInTypeForLoadOrStore,
397 /// returning true if the type was successfully merged and false otherwise.
398 bool ConvertToScalarInfo::MergeInVectorType(const VectorType *VInTy,
399 uint64_t Offset) {
400 // TODO: Support nonzero offsets?
401 if (Offset != 0)
402 return false;
404 // Only allow vectors that are a power-of-2 away from the size of the alloca.
405 if (!isPowerOf2_64(AllocaSize / (VInTy->getBitWidth() / 8)))
406 return false;
408 // If this the first vector we see, remember the type so that we know the
409 // element size.
410 if (!VectorTy) {
411 ScalarKind = Vector;
412 VectorTy = VInTy;
413 return true;
416 unsigned BitWidth = VectorTy->getBitWidth();
417 unsigned InBitWidth = VInTy->getBitWidth();
419 // Vectors of the same size can be converted using a simple bitcast.
420 if (InBitWidth == BitWidth && AllocaSize == (InBitWidth / 8)) {
421 ScalarKind = Vector;
422 return true;
425 const Type *ElementTy = VectorTy->getElementType();
426 const Type *InElementTy = VInTy->getElementType();
428 // Do not allow mixed integer and floating-point accesses from vectors of
429 // different sizes.
430 if (ElementTy->isFloatingPointTy() != InElementTy->isFloatingPointTy())
431 return false;
433 if (ElementTy->isFloatingPointTy()) {
434 // Only allow floating-point vectors of different sizes if they have the
435 // same element type.
436 // TODO: This could be loosened a bit, but would anything benefit?
437 if (ElementTy != InElementTy)
438 return false;
440 // There are no arbitrary-precision floating-point types, which limits the
441 // number of legal vector types with larger element types that we can form
442 // to bitcast and extract a subvector.
443 // TODO: We could support some more cases with mixed fp128 and double here.
444 if (!(BitWidth == 64 || BitWidth == 128) ||
445 !(InBitWidth == 64 || InBitWidth == 128))
446 return false;
447 } else {
448 assert(ElementTy->isIntegerTy() && "Vector elements must be either integer "
449 "or floating-point.");
450 unsigned BitWidth = ElementTy->getPrimitiveSizeInBits();
451 unsigned InBitWidth = InElementTy->getPrimitiveSizeInBits();
453 // Do not allow integer types smaller than a byte or types whose widths are
454 // not a multiple of a byte.
455 if (BitWidth < 8 || InBitWidth < 8 ||
456 BitWidth % 8 != 0 || InBitWidth % 8 != 0)
457 return false;
460 // Pick the largest of the two vector types.
461 ScalarKind = Vector;
462 if (InBitWidth > BitWidth)
463 VectorTy = VInTy;
465 return true;
468 /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
469 /// its accesses to a single vector type, return true and set VecTy to
470 /// the new type. If we could convert the alloca into a single promotable
471 /// integer, return true but set VecTy to VoidTy. Further, if the use is not a
472 /// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
473 /// is the current offset from the base of the alloca being analyzed.
475 /// If we see at least one access to the value that is as a vector type, set the
476 /// SawVec flag.
477 bool ConvertToScalarInfo::CanConvertToScalar(Value *V, uint64_t Offset) {
478 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
479 Instruction *User = cast<Instruction>(*UI);
481 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
482 // Don't break volatile loads.
483 if (LI->isVolatile())
484 return false;
485 // Don't touch MMX operations.
486 if (LI->getType()->isX86_MMXTy())
487 return false;
488 HadNonMemTransferAccess = true;
489 MergeInTypeForLoadOrStore(LI->getType(), Offset);
490 continue;
493 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
494 // Storing the pointer, not into the value?
495 if (SI->getOperand(0) == V || SI->isVolatile()) return false;
496 // Don't touch MMX operations.
497 if (SI->getOperand(0)->getType()->isX86_MMXTy())
498 return false;
499 HadNonMemTransferAccess = true;
500 MergeInTypeForLoadOrStore(SI->getOperand(0)->getType(), Offset);
501 continue;
504 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
505 IsNotTrivial = true; // Can't be mem2reg'd.
506 if (!CanConvertToScalar(BCI, Offset))
507 return false;
508 continue;
511 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
512 // If this is a GEP with a variable indices, we can't handle it.
513 if (!GEP->hasAllConstantIndices())
514 return false;
516 // Compute the offset that this GEP adds to the pointer.
517 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
518 uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
519 &Indices[0], Indices.size());
520 // See if all uses can be converted.
521 if (!CanConvertToScalar(GEP, Offset+GEPOffset))
522 return false;
523 IsNotTrivial = true; // Can't be mem2reg'd.
524 HadNonMemTransferAccess = true;
525 continue;
528 // If this is a constant sized memset of a constant value (e.g. 0) we can
529 // handle it.
530 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
531 // Store of constant value.
532 if (!isa<ConstantInt>(MSI->getValue()))
533 return false;
535 // Store of constant size.
536 ConstantInt *Len = dyn_cast<ConstantInt>(MSI->getLength());
537 if (!Len)
538 return false;
540 // If the size differs from the alloca, we can only convert the alloca to
541 // an integer bag-of-bits.
542 // FIXME: This should handle all of the cases that are currently accepted
543 // as vector element insertions.
544 if (Len->getZExtValue() != AllocaSize || Offset != 0)
545 ScalarKind = Integer;
547 IsNotTrivial = true; // Can't be mem2reg'd.
548 HadNonMemTransferAccess = true;
549 continue;
552 // If this is a memcpy or memmove into or out of the whole allocation, we
553 // can handle it like a load or store of the scalar type.
554 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
555 ConstantInt *Len = dyn_cast<ConstantInt>(MTI->getLength());
556 if (Len == 0 || Len->getZExtValue() != AllocaSize || Offset != 0)
557 return false;
559 IsNotTrivial = true; // Can't be mem2reg'd.
560 continue;
563 // Otherwise, we cannot handle this!
564 return false;
567 return true;
570 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
571 /// directly. This happens when we are converting an "integer union" to a
572 /// single integer scalar, or when we are converting a "vector union" to a
573 /// vector with insert/extractelement instructions.
575 /// Offset is an offset from the original alloca, in bits that need to be
576 /// shifted to the right. By the end of this, there should be no uses of Ptr.
577 void ConvertToScalarInfo::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI,
578 uint64_t Offset) {
579 while (!Ptr->use_empty()) {
580 Instruction *User = cast<Instruction>(Ptr->use_back());
582 if (BitCastInst *CI = dyn_cast<BitCastInst>(User)) {
583 ConvertUsesToScalar(CI, NewAI, Offset);
584 CI->eraseFromParent();
585 continue;
588 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
589 // Compute the offset that this GEP adds to the pointer.
590 SmallVector<Value*, 8> Indices(GEP->op_begin()+1, GEP->op_end());
591 uint64_t GEPOffset = TD.getIndexedOffset(GEP->getPointerOperandType(),
592 &Indices[0], Indices.size());
593 ConvertUsesToScalar(GEP, NewAI, Offset+GEPOffset*8);
594 GEP->eraseFromParent();
595 continue;
598 IRBuilder<> Builder(User);
600 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
601 // The load is a bit extract from NewAI shifted right by Offset bits.
602 Value *LoadedVal = Builder.CreateLoad(NewAI, "tmp");
603 Value *NewLoadVal
604 = ConvertScalar_ExtractValue(LoadedVal, LI->getType(), Offset, Builder);
605 LI->replaceAllUsesWith(NewLoadVal);
606 LI->eraseFromParent();
607 continue;
610 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
611 assert(SI->getOperand(0) != Ptr && "Consistency error!");
612 Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
613 Value *New = ConvertScalar_InsertValue(SI->getOperand(0), Old, Offset,
614 Builder);
615 Builder.CreateStore(New, NewAI);
616 SI->eraseFromParent();
618 // If the load we just inserted is now dead, then the inserted store
619 // overwrote the entire thing.
620 if (Old->use_empty())
621 Old->eraseFromParent();
622 continue;
625 // If this is a constant sized memset of a constant value (e.g. 0) we can
626 // transform it into a store of the expanded constant value.
627 if (MemSetInst *MSI = dyn_cast<MemSetInst>(User)) {
628 assert(MSI->getRawDest() == Ptr && "Consistency error!");
629 unsigned NumBytes = cast<ConstantInt>(MSI->getLength())->getZExtValue();
630 if (NumBytes != 0) {
631 unsigned Val = cast<ConstantInt>(MSI->getValue())->getZExtValue();
633 // Compute the value replicated the right number of times.
634 APInt APVal(NumBytes*8, Val);
636 // Splat the value if non-zero.
637 if (Val)
638 for (unsigned i = 1; i != NumBytes; ++i)
639 APVal |= APVal << 8;
641 Instruction *Old = Builder.CreateLoad(NewAI, NewAI->getName()+".in");
642 Value *New = ConvertScalar_InsertValue(
643 ConstantInt::get(User->getContext(), APVal),
644 Old, Offset, Builder);
645 Builder.CreateStore(New, NewAI);
647 // If the load we just inserted is now dead, then the memset overwrote
648 // the entire thing.
649 if (Old->use_empty())
650 Old->eraseFromParent();
652 MSI->eraseFromParent();
653 continue;
656 // If this is a memcpy or memmove into or out of the whole allocation, we
657 // can handle it like a load or store of the scalar type.
658 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(User)) {
659 assert(Offset == 0 && "must be store to start of alloca");
661 // If the source and destination are both to the same alloca, then this is
662 // a noop copy-to-self, just delete it. Otherwise, emit a load and store
663 // as appropriate.
664 AllocaInst *OrigAI = cast<AllocaInst>(GetUnderlyingObject(Ptr, &TD, 0));
666 if (GetUnderlyingObject(MTI->getSource(), &TD, 0) != OrigAI) {
667 // Dest must be OrigAI, change this to be a load from the original
668 // pointer (bitcasted), then a store to our new alloca.
669 assert(MTI->getRawDest() == Ptr && "Neither use is of pointer?");
670 Value *SrcPtr = MTI->getSource();
671 const PointerType* SPTy = cast<PointerType>(SrcPtr->getType());
672 const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
673 if (SPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
674 AIPTy = PointerType::get(AIPTy->getElementType(),
675 SPTy->getAddressSpace());
677 SrcPtr = Builder.CreateBitCast(SrcPtr, AIPTy);
679 LoadInst *SrcVal = Builder.CreateLoad(SrcPtr, "srcval");
680 SrcVal->setAlignment(MTI->getAlignment());
681 Builder.CreateStore(SrcVal, NewAI);
682 } else if (GetUnderlyingObject(MTI->getDest(), &TD, 0) != OrigAI) {
683 // Src must be OrigAI, change this to be a load from NewAI then a store
684 // through the original dest pointer (bitcasted).
685 assert(MTI->getRawSource() == Ptr && "Neither use is of pointer?");
686 LoadInst *SrcVal = Builder.CreateLoad(NewAI, "srcval");
688 const PointerType* DPTy = cast<PointerType>(MTI->getDest()->getType());
689 const PointerType* AIPTy = cast<PointerType>(NewAI->getType());
690 if (DPTy->getAddressSpace() != AIPTy->getAddressSpace()) {
691 AIPTy = PointerType::get(AIPTy->getElementType(),
692 DPTy->getAddressSpace());
694 Value *DstPtr = Builder.CreateBitCast(MTI->getDest(), AIPTy);
696 StoreInst *NewStore = Builder.CreateStore(SrcVal, DstPtr);
697 NewStore->setAlignment(MTI->getAlignment());
698 } else {
699 // Noop transfer. Src == Dst
702 MTI->eraseFromParent();
703 continue;
706 llvm_unreachable("Unsupported operation!");
710 /// getScaledElementType - Gets a scaled element type for a partial vector
711 /// access of an alloca. The input types must be integer or floating-point
712 /// scalar or vector types, and the resulting type is an integer, float or
713 /// double.
714 static const Type *getScaledElementType(const Type *Ty1, const Type *Ty2,
715 unsigned NewBitWidth) {
716 bool IsFP1 = Ty1->isFloatingPointTy() ||
717 (Ty1->isVectorTy() &&
718 cast<VectorType>(Ty1)->getElementType()->isFloatingPointTy());
719 bool IsFP2 = Ty2->isFloatingPointTy() ||
720 (Ty2->isVectorTy() &&
721 cast<VectorType>(Ty2)->getElementType()->isFloatingPointTy());
723 LLVMContext &Context = Ty1->getContext();
725 // Prefer floating-point types over integer types, as integer types may have
726 // been created by earlier scalar replacement.
727 if (IsFP1 || IsFP2) {
728 if (NewBitWidth == 32)
729 return Type::getFloatTy(Context);
730 if (NewBitWidth == 64)
731 return Type::getDoubleTy(Context);
734 return Type::getIntNTy(Context, NewBitWidth);
737 /// CreateShuffleVectorCast - Creates a shuffle vector to convert one vector
738 /// to another vector of the same element type which has the same allocation
739 /// size but different primitive sizes (e.g. <3 x i32> and <4 x i32>).
740 static Value *CreateShuffleVectorCast(Value *FromVal, const Type *ToType,
741 IRBuilder<> &Builder) {
742 const Type *FromType = FromVal->getType();
743 const VectorType *FromVTy = cast<VectorType>(FromType);
744 const VectorType *ToVTy = cast<VectorType>(ToType);
745 assert((ToVTy->getElementType() == FromVTy->getElementType()) &&
746 "Vectors must have the same element type");
747 Value *UnV = UndefValue::get(FromType);
748 unsigned numEltsFrom = FromVTy->getNumElements();
749 unsigned numEltsTo = ToVTy->getNumElements();
751 SmallVector<Constant*, 3> Args;
752 const Type* Int32Ty = Builder.getInt32Ty();
753 unsigned minNumElts = std::min(numEltsFrom, numEltsTo);
754 unsigned i;
755 for (i=0; i != minNumElts; ++i)
756 Args.push_back(ConstantInt::get(Int32Ty, i));
758 if (i < numEltsTo) {
759 Constant* UnC = UndefValue::get(Int32Ty);
760 for (; i != numEltsTo; ++i)
761 Args.push_back(UnC);
763 Constant *Mask = ConstantVector::get(Args);
764 return Builder.CreateShuffleVector(FromVal, UnV, Mask, "tmpV");
767 /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
768 /// or vector value FromVal, extracting the bits from the offset specified by
769 /// Offset. This returns the value, which is of type ToType.
771 /// This happens when we are converting an "integer union" to a single
772 /// integer scalar, or when we are converting a "vector union" to a vector with
773 /// insert/extractelement instructions.
775 /// Offset is an offset from the original alloca, in bits that need to be
776 /// shifted to the right.
777 Value *ConvertToScalarInfo::
778 ConvertScalar_ExtractValue(Value *FromVal, const Type *ToType,
779 uint64_t Offset, IRBuilder<> &Builder) {
780 // If the load is of the whole new alloca, no conversion is needed.
781 const Type *FromType = FromVal->getType();
782 if (FromType == ToType && Offset == 0)
783 return FromVal;
785 // If the result alloca is a vector type, this is either an element
786 // access or a bitcast to another vector type of the same size.
787 if (const VectorType *VTy = dyn_cast<VectorType>(FromType)) {
788 unsigned FromTypeSize = TD.getTypeAllocSize(FromType);
789 unsigned ToTypeSize = TD.getTypeAllocSize(ToType);
790 if (FromTypeSize == ToTypeSize) {
791 // If the two types have the same primitive size, use a bit cast.
792 // Otherwise, it is two vectors with the same element type that has
793 // the same allocation size but different number of elements so use
794 // a shuffle vector.
795 if (FromType->getPrimitiveSizeInBits() ==
796 ToType->getPrimitiveSizeInBits())
797 return Builder.CreateBitCast(FromVal, ToType, "tmp");
798 else
799 return CreateShuffleVectorCast(FromVal, ToType, Builder);
802 if (isPowerOf2_64(FromTypeSize / ToTypeSize)) {
803 assert(!(ToType->isVectorTy() && Offset != 0) && "Can't extract a value "
804 "of a smaller vector type at a nonzero offset.");
806 const Type *CastElementTy = getScaledElementType(FromType, ToType,
807 ToTypeSize * 8);
808 unsigned NumCastVectorElements = FromTypeSize / ToTypeSize;
810 LLVMContext &Context = FromVal->getContext();
811 const Type *CastTy = VectorType::get(CastElementTy,
812 NumCastVectorElements);
813 Value *Cast = Builder.CreateBitCast(FromVal, CastTy, "tmp");
815 unsigned EltSize = TD.getTypeAllocSizeInBits(CastElementTy);
816 unsigned Elt = Offset/EltSize;
817 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
818 Value *Extract = Builder.CreateExtractElement(Cast, ConstantInt::get(
819 Type::getInt32Ty(Context), Elt), "tmp");
820 return Builder.CreateBitCast(Extract, ToType, "tmp");
823 // Otherwise it must be an element access.
824 unsigned Elt = 0;
825 if (Offset) {
826 unsigned EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
827 Elt = Offset/EltSize;
828 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
830 // Return the element extracted out of it.
831 Value *V = Builder.CreateExtractElement(FromVal, ConstantInt::get(
832 Type::getInt32Ty(FromVal->getContext()), Elt), "tmp");
833 if (V->getType() != ToType)
834 V = Builder.CreateBitCast(V, ToType, "tmp");
835 return V;
838 // If ToType is a first class aggregate, extract out each of the pieces and
839 // use insertvalue's to form the FCA.
840 if (const StructType *ST = dyn_cast<StructType>(ToType)) {
841 const StructLayout &Layout = *TD.getStructLayout(ST);
842 Value *Res = UndefValue::get(ST);
843 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
844 Value *Elt = ConvertScalar_ExtractValue(FromVal, ST->getElementType(i),
845 Offset+Layout.getElementOffsetInBits(i),
846 Builder);
847 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
849 return Res;
852 if (const ArrayType *AT = dyn_cast<ArrayType>(ToType)) {
853 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
854 Value *Res = UndefValue::get(AT);
855 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
856 Value *Elt = ConvertScalar_ExtractValue(FromVal, AT->getElementType(),
857 Offset+i*EltSize, Builder);
858 Res = Builder.CreateInsertValue(Res, Elt, i, "tmp");
860 return Res;
863 // Otherwise, this must be a union that was converted to an integer value.
864 const IntegerType *NTy = cast<IntegerType>(FromVal->getType());
866 // If this is a big-endian system and the load is narrower than the
867 // full alloca type, we need to do a shift to get the right bits.
868 int ShAmt = 0;
869 if (TD.isBigEndian()) {
870 // On big-endian machines, the lowest bit is stored at the bit offset
871 // from the pointer given by getTypeStoreSizeInBits. This matters for
872 // integers with a bitwidth that is not a multiple of 8.
873 ShAmt = TD.getTypeStoreSizeInBits(NTy) -
874 TD.getTypeStoreSizeInBits(ToType) - Offset;
875 } else {
876 ShAmt = Offset;
879 // Note: we support negative bitwidths (with shl) which are not defined.
880 // We do this to support (f.e.) loads off the end of a structure where
881 // only some bits are used.
882 if (ShAmt > 0 && (unsigned)ShAmt < NTy->getBitWidth())
883 FromVal = Builder.CreateLShr(FromVal,
884 ConstantInt::get(FromVal->getType(),
885 ShAmt), "tmp");
886 else if (ShAmt < 0 && (unsigned)-ShAmt < NTy->getBitWidth())
887 FromVal = Builder.CreateShl(FromVal,
888 ConstantInt::get(FromVal->getType(),
889 -ShAmt), "tmp");
891 // Finally, unconditionally truncate the integer to the right width.
892 unsigned LIBitWidth = TD.getTypeSizeInBits(ToType);
893 if (LIBitWidth < NTy->getBitWidth())
894 FromVal =
895 Builder.CreateTrunc(FromVal, IntegerType::get(FromVal->getContext(),
896 LIBitWidth), "tmp");
897 else if (LIBitWidth > NTy->getBitWidth())
898 FromVal =
899 Builder.CreateZExt(FromVal, IntegerType::get(FromVal->getContext(),
900 LIBitWidth), "tmp");
902 // If the result is an integer, this is a trunc or bitcast.
903 if (ToType->isIntegerTy()) {
904 // Should be done.
905 } else if (ToType->isFloatingPointTy() || ToType->isVectorTy()) {
906 // Just do a bitcast, we know the sizes match up.
907 FromVal = Builder.CreateBitCast(FromVal, ToType, "tmp");
908 } else {
909 // Otherwise must be a pointer.
910 FromVal = Builder.CreateIntToPtr(FromVal, ToType, "tmp");
912 assert(FromVal->getType() == ToType && "Didn't convert right?");
913 return FromVal;
916 /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
917 /// or vector value "Old" at the offset specified by Offset.
919 /// This happens when we are converting an "integer union" to a
920 /// single integer scalar, or when we are converting a "vector union" to a
921 /// vector with insert/extractelement instructions.
923 /// Offset is an offset from the original alloca, in bits that need to be
924 /// shifted to the right.
925 Value *ConvertToScalarInfo::
926 ConvertScalar_InsertValue(Value *SV, Value *Old,
927 uint64_t Offset, IRBuilder<> &Builder) {
928 // Convert the stored type to the actual type, shift it left to insert
929 // then 'or' into place.
930 const Type *AllocaType = Old->getType();
931 LLVMContext &Context = Old->getContext();
933 if (const VectorType *VTy = dyn_cast<VectorType>(AllocaType)) {
934 uint64_t VecSize = TD.getTypeAllocSizeInBits(VTy);
935 uint64_t ValSize = TD.getTypeAllocSizeInBits(SV->getType());
937 // Changing the whole vector with memset or with an access of a different
938 // vector type?
939 if (ValSize == VecSize) {
940 // If the two types have the same primitive size, use a bit cast.
941 // Otherwise, it is two vectors with the same element type that has
942 // the same allocation size but different number of elements so use
943 // a shuffle vector.
944 if (VTy->getPrimitiveSizeInBits() ==
945 SV->getType()->getPrimitiveSizeInBits())
946 return Builder.CreateBitCast(SV, AllocaType, "tmp");
947 else
948 return CreateShuffleVectorCast(SV, VTy, Builder);
951 if (isPowerOf2_64(VecSize / ValSize)) {
952 assert(!(SV->getType()->isVectorTy() && Offset != 0) && "Can't insert a "
953 "value of a smaller vector type at a nonzero offset.");
955 const Type *CastElementTy = getScaledElementType(VTy, SV->getType(),
956 ValSize);
957 unsigned NumCastVectorElements = VecSize / ValSize;
959 LLVMContext &Context = SV->getContext();
960 const Type *OldCastTy = VectorType::get(CastElementTy,
961 NumCastVectorElements);
962 Value *OldCast = Builder.CreateBitCast(Old, OldCastTy, "tmp");
964 Value *SVCast = Builder.CreateBitCast(SV, CastElementTy, "tmp");
966 unsigned EltSize = TD.getTypeAllocSizeInBits(CastElementTy);
967 unsigned Elt = Offset/EltSize;
968 assert(EltSize*Elt == Offset && "Invalid modulus in validity checking");
969 Value *Insert =
970 Builder.CreateInsertElement(OldCast, SVCast, ConstantInt::get(
971 Type::getInt32Ty(Context), Elt), "tmp");
972 return Builder.CreateBitCast(Insert, AllocaType, "tmp");
975 // Must be an element insertion.
976 assert(SV->getType() == VTy->getElementType());
977 uint64_t EltSize = TD.getTypeAllocSizeInBits(VTy->getElementType());
978 unsigned Elt = Offset/EltSize;
979 return Builder.CreateInsertElement(Old, SV,
980 ConstantInt::get(Type::getInt32Ty(SV->getContext()), Elt),
981 "tmp");
984 // If SV is a first-class aggregate value, insert each value recursively.
985 if (const StructType *ST = dyn_cast<StructType>(SV->getType())) {
986 const StructLayout &Layout = *TD.getStructLayout(ST);
987 for (unsigned i = 0, e = ST->getNumElements(); i != e; ++i) {
988 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
989 Old = ConvertScalar_InsertValue(Elt, Old,
990 Offset+Layout.getElementOffsetInBits(i),
991 Builder);
993 return Old;
996 if (const ArrayType *AT = dyn_cast<ArrayType>(SV->getType())) {
997 uint64_t EltSize = TD.getTypeAllocSizeInBits(AT->getElementType());
998 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
999 Value *Elt = Builder.CreateExtractValue(SV, i, "tmp");
1000 Old = ConvertScalar_InsertValue(Elt, Old, Offset+i*EltSize, Builder);
1002 return Old;
1005 // If SV is a float, convert it to the appropriate integer type.
1006 // If it is a pointer, do the same.
1007 unsigned SrcWidth = TD.getTypeSizeInBits(SV->getType());
1008 unsigned DestWidth = TD.getTypeSizeInBits(AllocaType);
1009 unsigned SrcStoreWidth = TD.getTypeStoreSizeInBits(SV->getType());
1010 unsigned DestStoreWidth = TD.getTypeStoreSizeInBits(AllocaType);
1011 if (SV->getType()->isFloatingPointTy() || SV->getType()->isVectorTy())
1012 SV = Builder.CreateBitCast(SV,
1013 IntegerType::get(SV->getContext(),SrcWidth), "tmp");
1014 else if (SV->getType()->isPointerTy())
1015 SV = Builder.CreatePtrToInt(SV, TD.getIntPtrType(SV->getContext()), "tmp");
1017 // Zero extend or truncate the value if needed.
1018 if (SV->getType() != AllocaType) {
1019 if (SV->getType()->getPrimitiveSizeInBits() <
1020 AllocaType->getPrimitiveSizeInBits())
1021 SV = Builder.CreateZExt(SV, AllocaType, "tmp");
1022 else {
1023 // Truncation may be needed if storing more than the alloca can hold
1024 // (undefined behavior).
1025 SV = Builder.CreateTrunc(SV, AllocaType, "tmp");
1026 SrcWidth = DestWidth;
1027 SrcStoreWidth = DestStoreWidth;
1031 // If this is a big-endian system and the store is narrower than the
1032 // full alloca type, we need to do a shift to get the right bits.
1033 int ShAmt = 0;
1034 if (TD.isBigEndian()) {
1035 // On big-endian machines, the lowest bit is stored at the bit offset
1036 // from the pointer given by getTypeStoreSizeInBits. This matters for
1037 // integers with a bitwidth that is not a multiple of 8.
1038 ShAmt = DestStoreWidth - SrcStoreWidth - Offset;
1039 } else {
1040 ShAmt = Offset;
1043 // Note: we support negative bitwidths (with shr) which are not defined.
1044 // We do this to support (f.e.) stores off the end of a structure where
1045 // only some bits in the structure are set.
1046 APInt Mask(APInt::getLowBitsSet(DestWidth, SrcWidth));
1047 if (ShAmt > 0 && (unsigned)ShAmt < DestWidth) {
1048 SV = Builder.CreateShl(SV, ConstantInt::get(SV->getType(),
1049 ShAmt), "tmp");
1050 Mask <<= ShAmt;
1051 } else if (ShAmt < 0 && (unsigned)-ShAmt < DestWidth) {
1052 SV = Builder.CreateLShr(SV, ConstantInt::get(SV->getType(),
1053 -ShAmt), "tmp");
1054 Mask = Mask.lshr(-ShAmt);
1057 // Mask out the bits we are about to insert from the old value, and or
1058 // in the new bits.
1059 if (SrcWidth != DestWidth) {
1060 assert(DestWidth > SrcWidth);
1061 Old = Builder.CreateAnd(Old, ConstantInt::get(Context, ~Mask), "mask");
1062 SV = Builder.CreateOr(Old, SV, "ins");
1064 return SV;
1068 //===----------------------------------------------------------------------===//
1069 // SRoA Driver
1070 //===----------------------------------------------------------------------===//
1073 bool SROA::runOnFunction(Function &F) {
1074 TD = getAnalysisIfAvailable<TargetData>();
1076 bool Changed = performPromotion(F);
1078 // FIXME: ScalarRepl currently depends on TargetData more than it
1079 // theoretically needs to. It should be refactored in order to support
1080 // target-independent IR. Until this is done, just skip the actual
1081 // scalar-replacement portion of this pass.
1082 if (!TD) return Changed;
1084 while (1) {
1085 bool LocalChange = performScalarRepl(F);
1086 if (!LocalChange) break; // No need to repromote if no scalarrepl
1087 Changed = true;
1088 LocalChange = performPromotion(F);
1089 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
1092 return Changed;
1095 namespace {
1096 class AllocaPromoter : public LoadAndStorePromoter {
1097 AllocaInst *AI;
1098 DIBuilder *DIB;
1099 SmallVector<DbgDeclareInst *, 4> DDIs;
1100 SmallVector<DbgValueInst *, 4> DVIs;
1101 public:
1102 AllocaPromoter(const SmallVectorImpl<Instruction*> &Insts, SSAUpdater &S,
1103 DIBuilder *DB)
1104 : LoadAndStorePromoter(Insts, S), AI(0), DIB(DB) {}
1106 void run(AllocaInst *AI, const SmallVectorImpl<Instruction*> &Insts) {
1107 // Remember which alloca we're promoting (for isInstInList).
1108 this->AI = AI;
1109 if (MDNode *DebugNode = MDNode::getIfExists(AI->getContext(), AI))
1110 for (Value::use_iterator UI = DebugNode->use_begin(),
1111 E = DebugNode->use_end(); UI != E; ++UI)
1112 if (DbgDeclareInst *DDI = dyn_cast<DbgDeclareInst>(*UI))
1113 DDIs.push_back(DDI);
1114 else if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(*UI))
1115 DVIs.push_back(DVI);
1117 LoadAndStorePromoter::run(Insts);
1118 AI->eraseFromParent();
1119 for (SmallVector<DbgDeclareInst *, 4>::iterator I = DDIs.begin(),
1120 E = DDIs.end(); I != E; ++I) {
1121 DbgDeclareInst *DDI = *I;
1122 DDI->eraseFromParent();
1124 for (SmallVector<DbgValueInst *, 4>::iterator I = DVIs.begin(),
1125 E = DVIs.end(); I != E; ++I) {
1126 DbgValueInst *DVI = *I;
1127 DVI->eraseFromParent();
1131 virtual bool isInstInList(Instruction *I,
1132 const SmallVectorImpl<Instruction*> &Insts) const {
1133 if (LoadInst *LI = dyn_cast<LoadInst>(I))
1134 return LI->getOperand(0) == AI;
1135 return cast<StoreInst>(I)->getPointerOperand() == AI;
1138 virtual void updateDebugInfo(Instruction *Inst) const {
1139 for (SmallVector<DbgDeclareInst *, 4>::const_iterator I = DDIs.begin(),
1140 E = DDIs.end(); I != E; ++I) {
1141 DbgDeclareInst *DDI = *I;
1142 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
1143 ConvertDebugDeclareToDebugValue(DDI, SI, *DIB);
1144 else if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
1145 ConvertDebugDeclareToDebugValue(DDI, LI, *DIB);
1147 for (SmallVector<DbgValueInst *, 4>::const_iterator I = DVIs.begin(),
1148 E = DVIs.end(); I != E; ++I) {
1149 DbgValueInst *DVI = *I;
1150 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1151 Instruction *DbgVal = NULL;
1152 // If an argument is zero extended then use argument directly. The ZExt
1153 // may be zapped by an optimization pass in future.
1154 Argument *ExtendedArg = NULL;
1155 if (ZExtInst *ZExt = dyn_cast<ZExtInst>(SI->getOperand(0)))
1156 ExtendedArg = dyn_cast<Argument>(ZExt->getOperand(0));
1157 if (SExtInst *SExt = dyn_cast<SExtInst>(SI->getOperand(0)))
1158 ExtendedArg = dyn_cast<Argument>(SExt->getOperand(0));
1159 if (ExtendedArg)
1160 DbgVal = DIB->insertDbgValueIntrinsic(ExtendedArg, 0,
1161 DIVariable(DVI->getVariable()),
1162 SI);
1163 else
1164 DbgVal = DIB->insertDbgValueIntrinsic(SI->getOperand(0), 0,
1165 DIVariable(DVI->getVariable()),
1166 SI);
1167 DbgVal->setDebugLoc(DVI->getDebugLoc());
1168 } else if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
1169 Instruction *DbgVal =
1170 DIB->insertDbgValueIntrinsic(LI->getOperand(0), 0,
1171 DIVariable(DVI->getVariable()), LI);
1172 DbgVal->setDebugLoc(DVI->getDebugLoc());
1177 } // end anon namespace
1179 /// isSafeSelectToSpeculate - Select instructions that use an alloca and are
1180 /// subsequently loaded can be rewritten to load both input pointers and then
1181 /// select between the result, allowing the load of the alloca to be promoted.
1182 /// From this:
1183 /// %P2 = select i1 %cond, i32* %Alloca, i32* %Other
1184 /// %V = load i32* %P2
1185 /// to:
1186 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1187 /// %V2 = load i32* %Other
1188 /// %V = select i1 %cond, i32 %V1, i32 %V2
1190 /// We can do this to a select if its only uses are loads and if the operand to
1191 /// the select can be loaded unconditionally.
1192 static bool isSafeSelectToSpeculate(SelectInst *SI, const TargetData *TD) {
1193 bool TDerefable = SI->getTrueValue()->isDereferenceablePointer();
1194 bool FDerefable = SI->getFalseValue()->isDereferenceablePointer();
1196 for (Value::use_iterator UI = SI->use_begin(), UE = SI->use_end();
1197 UI != UE; ++UI) {
1198 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1199 if (LI == 0 || LI->isVolatile()) return false;
1201 // Both operands to the select need to be dereferencable, either absolutely
1202 // (e.g. allocas) or at this point because we can see other accesses to it.
1203 if (!TDerefable && !isSafeToLoadUnconditionally(SI->getTrueValue(), LI,
1204 LI->getAlignment(), TD))
1205 return false;
1206 if (!FDerefable && !isSafeToLoadUnconditionally(SI->getFalseValue(), LI,
1207 LI->getAlignment(), TD))
1208 return false;
1211 return true;
1214 /// isSafePHIToSpeculate - PHI instructions that use an alloca and are
1215 /// subsequently loaded can be rewritten to load both input pointers in the pred
1216 /// blocks and then PHI the results, allowing the load of the alloca to be
1217 /// promoted.
1218 /// From this:
1219 /// %P2 = phi [i32* %Alloca, i32* %Other]
1220 /// %V = load i32* %P2
1221 /// to:
1222 /// %V1 = load i32* %Alloca -> will be mem2reg'd
1223 /// ...
1224 /// %V2 = load i32* %Other
1225 /// ...
1226 /// %V = phi [i32 %V1, i32 %V2]
1228 /// We can do this to a select if its only uses are loads and if the operand to
1229 /// the select can be loaded unconditionally.
1230 static bool isSafePHIToSpeculate(PHINode *PN, const TargetData *TD) {
1231 // For now, we can only do this promotion if the load is in the same block as
1232 // the PHI, and if there are no stores between the phi and load.
1233 // TODO: Allow recursive phi users.
1234 // TODO: Allow stores.
1235 BasicBlock *BB = PN->getParent();
1236 unsigned MaxAlign = 0;
1237 for (Value::use_iterator UI = PN->use_begin(), UE = PN->use_end();
1238 UI != UE; ++UI) {
1239 LoadInst *LI = dyn_cast<LoadInst>(*UI);
1240 if (LI == 0 || LI->isVolatile()) return false;
1242 // For now we only allow loads in the same block as the PHI. This is a
1243 // common case that happens when instcombine merges two loads through a PHI.
1244 if (LI->getParent() != BB) return false;
1246 // Ensure that there are no instructions between the PHI and the load that
1247 // could store.
1248 for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI)
1249 if (BBI->mayWriteToMemory())
1250 return false;
1252 MaxAlign = std::max(MaxAlign, LI->getAlignment());
1255 // Okay, we know that we have one or more loads in the same block as the PHI.
1256 // We can transform this if it is safe to push the loads into the predecessor
1257 // blocks. The only thing to watch out for is that we can't put a possibly
1258 // trapping load in the predecessor if it is a critical edge.
1259 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1260 BasicBlock *Pred = PN->getIncomingBlock(i);
1262 // If the predecessor has a single successor, then the edge isn't critical.
1263 if (Pred->getTerminator()->getNumSuccessors() == 1)
1264 continue;
1266 Value *InVal = PN->getIncomingValue(i);
1268 // If the InVal is an invoke in the pred, we can't put a load on the edge.
1269 if (InvokeInst *II = dyn_cast<InvokeInst>(InVal))
1270 if (II->getParent() == Pred)
1271 return false;
1273 // If this pointer is always safe to load, or if we can prove that there is
1274 // already a load in the block, then we can move the load to the pred block.
1275 if (InVal->isDereferenceablePointer() ||
1276 isSafeToLoadUnconditionally(InVal, Pred->getTerminator(), MaxAlign, TD))
1277 continue;
1279 return false;
1282 return true;
1286 /// tryToMakeAllocaBePromotable - This returns true if the alloca only has
1287 /// direct (non-volatile) loads and stores to it. If the alloca is close but
1288 /// not quite there, this will transform the code to allow promotion. As such,
1289 /// it is a non-pure predicate.
1290 static bool tryToMakeAllocaBePromotable(AllocaInst *AI, const TargetData *TD) {
1291 SetVector<Instruction*, SmallVector<Instruction*, 4>,
1292 SmallPtrSet<Instruction*, 4> > InstsToRewrite;
1294 for (Value::use_iterator UI = AI->use_begin(), UE = AI->use_end();
1295 UI != UE; ++UI) {
1296 User *U = *UI;
1297 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
1298 if (LI->isVolatile())
1299 return false;
1300 continue;
1303 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1304 if (SI->getOperand(0) == AI || SI->isVolatile())
1305 return false; // Don't allow a store OF the AI, only INTO the AI.
1306 continue;
1309 if (SelectInst *SI = dyn_cast<SelectInst>(U)) {
1310 // If the condition being selected on is a constant, fold the select, yes
1311 // this does (rarely) happen early on.
1312 if (ConstantInt *CI = dyn_cast<ConstantInt>(SI->getCondition())) {
1313 Value *Result = SI->getOperand(1+CI->isZero());
1314 SI->replaceAllUsesWith(Result);
1315 SI->eraseFromParent();
1317 // This is very rare and we just scrambled the use list of AI, start
1318 // over completely.
1319 return tryToMakeAllocaBePromotable(AI, TD);
1322 // If it is safe to turn "load (select c, AI, ptr)" into a select of two
1323 // loads, then we can transform this by rewriting the select.
1324 if (!isSafeSelectToSpeculate(SI, TD))
1325 return false;
1327 InstsToRewrite.insert(SI);
1328 continue;
1331 if (PHINode *PN = dyn_cast<PHINode>(U)) {
1332 if (PN->use_empty()) { // Dead PHIs can be stripped.
1333 InstsToRewrite.insert(PN);
1334 continue;
1337 // If it is safe to turn "load (phi [AI, ptr, ...])" into a PHI of loads
1338 // in the pred blocks, then we can transform this by rewriting the PHI.
1339 if (!isSafePHIToSpeculate(PN, TD))
1340 return false;
1342 InstsToRewrite.insert(PN);
1343 continue;
1346 return false;
1349 // If there are no instructions to rewrite, then all uses are load/stores and
1350 // we're done!
1351 if (InstsToRewrite.empty())
1352 return true;
1354 // If we have instructions that need to be rewritten for this to be promotable
1355 // take care of it now.
1356 for (unsigned i = 0, e = InstsToRewrite.size(); i != e; ++i) {
1357 if (SelectInst *SI = dyn_cast<SelectInst>(InstsToRewrite[i])) {
1358 // Selects in InstsToRewrite only have load uses. Rewrite each as two
1359 // loads with a new select.
1360 while (!SI->use_empty()) {
1361 LoadInst *LI = cast<LoadInst>(SI->use_back());
1363 IRBuilder<> Builder(LI);
1364 LoadInst *TrueLoad =
1365 Builder.CreateLoad(SI->getTrueValue(), LI->getName()+".t");
1366 LoadInst *FalseLoad =
1367 Builder.CreateLoad(SI->getFalseValue(), LI->getName()+".f");
1369 // Transfer alignment and TBAA info if present.
1370 TrueLoad->setAlignment(LI->getAlignment());
1371 FalseLoad->setAlignment(LI->getAlignment());
1372 if (MDNode *Tag = LI->getMetadata(LLVMContext::MD_tbaa)) {
1373 TrueLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
1374 FalseLoad->setMetadata(LLVMContext::MD_tbaa, Tag);
1377 Value *V = Builder.CreateSelect(SI->getCondition(), TrueLoad, FalseLoad);
1378 V->takeName(LI);
1379 LI->replaceAllUsesWith(V);
1380 LI->eraseFromParent();
1383 // Now that all the loads are gone, the select is gone too.
1384 SI->eraseFromParent();
1385 continue;
1388 // Otherwise, we have a PHI node which allows us to push the loads into the
1389 // predecessors.
1390 PHINode *PN = cast<PHINode>(InstsToRewrite[i]);
1391 if (PN->use_empty()) {
1392 PN->eraseFromParent();
1393 continue;
1396 const Type *LoadTy = cast<PointerType>(PN->getType())->getElementType();
1397 PHINode *NewPN = PHINode::Create(LoadTy, PN->getNumIncomingValues(),
1398 PN->getName()+".ld", PN);
1400 // Get the TBAA tag and alignment to use from one of the loads. It doesn't
1401 // matter which one we get and if any differ, it doesn't matter.
1402 LoadInst *SomeLoad = cast<LoadInst>(PN->use_back());
1403 MDNode *TBAATag = SomeLoad->getMetadata(LLVMContext::MD_tbaa);
1404 unsigned Align = SomeLoad->getAlignment();
1406 // Rewrite all loads of the PN to use the new PHI.
1407 while (!PN->use_empty()) {
1408 LoadInst *LI = cast<LoadInst>(PN->use_back());
1409 LI->replaceAllUsesWith(NewPN);
1410 LI->eraseFromParent();
1413 // Inject loads into all of the pred blocks. Keep track of which blocks we
1414 // insert them into in case we have multiple edges from the same block.
1415 DenseMap<BasicBlock*, LoadInst*> InsertedLoads;
1417 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1418 BasicBlock *Pred = PN->getIncomingBlock(i);
1419 LoadInst *&Load = InsertedLoads[Pred];
1420 if (Load == 0) {
1421 Load = new LoadInst(PN->getIncomingValue(i),
1422 PN->getName() + "." + Pred->getName(),
1423 Pred->getTerminator());
1424 Load->setAlignment(Align);
1425 if (TBAATag) Load->setMetadata(LLVMContext::MD_tbaa, TBAATag);
1428 NewPN->addIncoming(Load, Pred);
1431 PN->eraseFromParent();
1434 ++NumAdjusted;
1435 return true;
1438 bool SROA::performPromotion(Function &F) {
1439 std::vector<AllocaInst*> Allocas;
1440 DominatorTree *DT = 0;
1441 if (HasDomTree)
1442 DT = &getAnalysis<DominatorTree>();
1444 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
1445 DIBuilder DIB(*F.getParent());
1446 bool Changed = false;
1447 SmallVector<Instruction*, 64> Insts;
1448 while (1) {
1449 Allocas.clear();
1451 // Find allocas that are safe to promote, by looking at all instructions in
1452 // the entry node
1453 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
1454 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
1455 if (tryToMakeAllocaBePromotable(AI, TD))
1456 Allocas.push_back(AI);
1458 if (Allocas.empty()) break;
1460 if (HasDomTree)
1461 PromoteMemToReg(Allocas, *DT);
1462 else {
1463 SSAUpdater SSA;
1464 for (unsigned i = 0, e = Allocas.size(); i != e; ++i) {
1465 AllocaInst *AI = Allocas[i];
1467 // Build list of instructions to promote.
1468 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
1469 UI != E; ++UI)
1470 Insts.push_back(cast<Instruction>(*UI));
1471 AllocaPromoter(Insts, SSA, &DIB).run(AI, Insts);
1472 Insts.clear();
1475 NumPromoted += Allocas.size();
1476 Changed = true;
1479 return Changed;
1483 /// ShouldAttemptScalarRepl - Decide if an alloca is a good candidate for
1484 /// SROA. It must be a struct or array type with a small number of elements.
1485 static bool ShouldAttemptScalarRepl(AllocaInst *AI) {
1486 const Type *T = AI->getAllocatedType();
1487 // Do not promote any struct into more than 32 separate vars.
1488 if (const StructType *ST = dyn_cast<StructType>(T))
1489 return ST->getNumElements() <= 32;
1490 // Arrays are much less likely to be safe for SROA; only consider
1491 // them if they are very small.
1492 if (const ArrayType *AT = dyn_cast<ArrayType>(T))
1493 return AT->getNumElements() <= 8;
1494 return false;
1498 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
1499 // which runs on all of the alloca instructions in the function, removing them
1500 // if they are only used by getelementptr instructions.
1502 bool SROA::performScalarRepl(Function &F) {
1503 std::vector<AllocaInst*> WorkList;
1505 // Scan the entry basic block, adding allocas to the worklist.
1506 BasicBlock &BB = F.getEntryBlock();
1507 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
1508 if (AllocaInst *A = dyn_cast<AllocaInst>(I))
1509 WorkList.push_back(A);
1511 // Process the worklist
1512 bool Changed = false;
1513 while (!WorkList.empty()) {
1514 AllocaInst *AI = WorkList.back();
1515 WorkList.pop_back();
1517 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
1518 // with unused elements.
1519 if (AI->use_empty()) {
1520 AI->eraseFromParent();
1521 Changed = true;
1522 continue;
1525 // If this alloca is impossible for us to promote, reject it early.
1526 if (AI->isArrayAllocation() || !AI->getAllocatedType()->isSized())
1527 continue;
1529 // Check to see if this allocation is only modified by a memcpy/memmove from
1530 // a constant global. If this is the case, we can change all users to use
1531 // the constant global instead. This is commonly produced by the CFE by
1532 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
1533 // is only subsequently read.
1534 SmallVector<Instruction *, 4> ToDelete;
1535 if (MemTransferInst *Copy = isOnlyCopiedFromConstantGlobal(AI, ToDelete)) {
1536 DEBUG(dbgs() << "Found alloca equal to global: " << *AI << '\n');
1537 DEBUG(dbgs() << " memcpy = " << *Copy << '\n');
1538 for (unsigned i = 0, e = ToDelete.size(); i != e; ++i)
1539 ToDelete[i]->eraseFromParent();
1540 Constant *TheSrc = cast<Constant>(Copy->getSource());
1541 AI->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc, AI->getType()));
1542 Copy->eraseFromParent(); // Don't mutate the global.
1543 AI->eraseFromParent();
1544 ++NumGlobals;
1545 Changed = true;
1546 continue;
1549 // Check to see if we can perform the core SROA transformation. We cannot
1550 // transform the allocation instruction if it is an array allocation
1551 // (allocations OF arrays are ok though), and an allocation of a scalar
1552 // value cannot be decomposed at all.
1553 uint64_t AllocaSize = TD->getTypeAllocSize(AI->getAllocatedType());
1555 // Do not promote [0 x %struct].
1556 if (AllocaSize == 0) continue;
1558 // Do not promote any struct whose size is too big.
1559 if (AllocaSize > SRThreshold) continue;
1561 // If the alloca looks like a good candidate for scalar replacement, and if
1562 // all its users can be transformed, then split up the aggregate into its
1563 // separate elements.
1564 if (ShouldAttemptScalarRepl(AI) && isSafeAllocaToScalarRepl(AI)) {
1565 DoScalarReplacement(AI, WorkList);
1566 Changed = true;
1567 continue;
1570 // If we can turn this aggregate value (potentially with casts) into a
1571 // simple scalar value that can be mem2reg'd into a register value.
1572 // IsNotTrivial tracks whether this is something that mem2reg could have
1573 // promoted itself. If so, we don't want to transform it needlessly. Note
1574 // that we can't just check based on the type: the alloca may be of an i32
1575 // but that has pointer arithmetic to set byte 3 of it or something.
1576 if (AllocaInst *NewAI =
1577 ConvertToScalarInfo((unsigned)AllocaSize, *TD).TryConvert(AI)) {
1578 NewAI->takeName(AI);
1579 AI->eraseFromParent();
1580 ++NumConverted;
1581 Changed = true;
1582 continue;
1585 // Otherwise, couldn't process this alloca.
1588 return Changed;
1591 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
1592 /// predicate, do SROA now.
1593 void SROA::DoScalarReplacement(AllocaInst *AI,
1594 std::vector<AllocaInst*> &WorkList) {
1595 DEBUG(dbgs() << "Found inst to SROA: " << *AI << '\n');
1596 SmallVector<AllocaInst*, 32> ElementAllocas;
1597 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
1598 ElementAllocas.reserve(ST->getNumContainedTypes());
1599 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
1600 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
1601 AI->getAlignment(),
1602 AI->getName() + "." + Twine(i), AI);
1603 ElementAllocas.push_back(NA);
1604 WorkList.push_back(NA); // Add to worklist for recursive processing
1606 } else {
1607 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
1608 ElementAllocas.reserve(AT->getNumElements());
1609 const Type *ElTy = AT->getElementType();
1610 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
1611 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
1612 AI->getName() + "." + Twine(i), AI);
1613 ElementAllocas.push_back(NA);
1614 WorkList.push_back(NA); // Add to worklist for recursive processing
1618 // Now that we have created the new alloca instructions, rewrite all the
1619 // uses of the old alloca.
1620 RewriteForScalarRepl(AI, AI, 0, ElementAllocas);
1622 // Now erase any instructions that were made dead while rewriting the alloca.
1623 DeleteDeadInstructions();
1624 AI->eraseFromParent();
1626 ++NumReplaced;
1629 /// DeleteDeadInstructions - Erase instructions on the DeadInstrs list,
1630 /// recursively including all their operands that become trivially dead.
1631 void SROA::DeleteDeadInstructions() {
1632 while (!DeadInsts.empty()) {
1633 Instruction *I = cast<Instruction>(DeadInsts.pop_back_val());
1635 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
1636 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
1637 // Zero out the operand and see if it becomes trivially dead.
1638 // (But, don't add allocas to the dead instruction list -- they are
1639 // already on the worklist and will be deleted separately.)
1640 *OI = 0;
1641 if (isInstructionTriviallyDead(U) && !isa<AllocaInst>(U))
1642 DeadInsts.push_back(U);
1645 I->eraseFromParent();
1649 /// isSafeForScalarRepl - Check if instruction I is a safe use with regard to
1650 /// performing scalar replacement of alloca AI. The results are flagged in
1651 /// the Info parameter. Offset indicates the position within AI that is
1652 /// referenced by this instruction.
1653 void SROA::isSafeForScalarRepl(Instruction *I, uint64_t Offset,
1654 AllocaInfo &Info) {
1655 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
1656 Instruction *User = cast<Instruction>(*UI);
1658 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
1659 isSafeForScalarRepl(BC, Offset, Info);
1660 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1661 uint64_t GEPOffset = Offset;
1662 isSafeGEP(GEPI, GEPOffset, Info);
1663 if (!Info.isUnsafe)
1664 isSafeForScalarRepl(GEPI, GEPOffset, Info);
1665 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
1666 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1667 if (Length == 0)
1668 return MarkUnsafe(Info, User);
1669 isSafeMemAccess(Offset, Length->getZExtValue(), 0,
1670 UI.getOperandNo() == 0, Info, MI,
1671 true /*AllowWholeAccess*/);
1672 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1673 if (LI->isVolatile())
1674 return MarkUnsafe(Info, User);
1675 const Type *LIType = LI->getType();
1676 isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
1677 LIType, false, Info, LI, true /*AllowWholeAccess*/);
1678 Info.hasALoadOrStore = true;
1680 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1681 // Store is ok if storing INTO the pointer, not storing the pointer
1682 if (SI->isVolatile() || SI->getOperand(0) == I)
1683 return MarkUnsafe(Info, User);
1685 const Type *SIType = SI->getOperand(0)->getType();
1686 isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
1687 SIType, true, Info, SI, true /*AllowWholeAccess*/);
1688 Info.hasALoadOrStore = true;
1689 } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
1690 isSafePHISelectUseForScalarRepl(User, Offset, Info);
1691 } else {
1692 return MarkUnsafe(Info, User);
1694 if (Info.isUnsafe) return;
1699 /// isSafePHIUseForScalarRepl - If we see a PHI node or select using a pointer
1700 /// derived from the alloca, we can often still split the alloca into elements.
1701 /// This is useful if we have a large alloca where one element is phi'd
1702 /// together somewhere: we can SRoA and promote all the other elements even if
1703 /// we end up not being able to promote this one.
1705 /// All we require is that the uses of the PHI do not index into other parts of
1706 /// the alloca. The most important use case for this is single load and stores
1707 /// that are PHI'd together, which can happen due to code sinking.
1708 void SROA::isSafePHISelectUseForScalarRepl(Instruction *I, uint64_t Offset,
1709 AllocaInfo &Info) {
1710 // If we've already checked this PHI, don't do it again.
1711 if (PHINode *PN = dyn_cast<PHINode>(I))
1712 if (!Info.CheckedPHIs.insert(PN))
1713 return;
1715 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E; ++UI) {
1716 Instruction *User = cast<Instruction>(*UI);
1718 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
1719 isSafePHISelectUseForScalarRepl(BC, Offset, Info);
1720 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1721 // Only allow "bitcast" GEPs for simplicity. We could generalize this,
1722 // but would have to prove that we're staying inside of an element being
1723 // promoted.
1724 if (!GEPI->hasAllZeroIndices())
1725 return MarkUnsafe(Info, User);
1726 isSafePHISelectUseForScalarRepl(GEPI, Offset, Info);
1727 } else if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1728 if (LI->isVolatile())
1729 return MarkUnsafe(Info, User);
1730 const Type *LIType = LI->getType();
1731 isSafeMemAccess(Offset, TD->getTypeAllocSize(LIType),
1732 LIType, false, Info, LI, false /*AllowWholeAccess*/);
1733 Info.hasALoadOrStore = true;
1735 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1736 // Store is ok if storing INTO the pointer, not storing the pointer
1737 if (SI->isVolatile() || SI->getOperand(0) == I)
1738 return MarkUnsafe(Info, User);
1740 const Type *SIType = SI->getOperand(0)->getType();
1741 isSafeMemAccess(Offset, TD->getTypeAllocSize(SIType),
1742 SIType, true, Info, SI, false /*AllowWholeAccess*/);
1743 Info.hasALoadOrStore = true;
1744 } else if (isa<PHINode>(User) || isa<SelectInst>(User)) {
1745 isSafePHISelectUseForScalarRepl(User, Offset, Info);
1746 } else {
1747 return MarkUnsafe(Info, User);
1749 if (Info.isUnsafe) return;
1753 /// isSafeGEP - Check if a GEP instruction can be handled for scalar
1754 /// replacement. It is safe when all the indices are constant, in-bounds
1755 /// references, and when the resulting offset corresponds to an element within
1756 /// the alloca type. The results are flagged in the Info parameter. Upon
1757 /// return, Offset is adjusted as specified by the GEP indices.
1758 void SROA::isSafeGEP(GetElementPtrInst *GEPI,
1759 uint64_t &Offset, AllocaInfo &Info) {
1760 gep_type_iterator GEPIt = gep_type_begin(GEPI), E = gep_type_end(GEPI);
1761 if (GEPIt == E)
1762 return;
1764 // Walk through the GEP type indices, checking the types that this indexes
1765 // into.
1766 for (; GEPIt != E; ++GEPIt) {
1767 // Ignore struct elements, no extra checking needed for these.
1768 if ((*GEPIt)->isStructTy())
1769 continue;
1771 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPIt.getOperand());
1772 if (!IdxVal)
1773 return MarkUnsafe(Info, GEPI);
1776 // Compute the offset due to this GEP and check if the alloca has a
1777 // component element at that offset.
1778 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
1779 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
1780 &Indices[0], Indices.size());
1781 if (!TypeHasComponent(Info.AI->getAllocatedType(), Offset, 0))
1782 MarkUnsafe(Info, GEPI);
1785 /// isHomogeneousAggregate - Check if type T is a struct or array containing
1786 /// elements of the same type (which is always true for arrays). If so,
1787 /// return true with NumElts and EltTy set to the number of elements and the
1788 /// element type, respectively.
1789 static bool isHomogeneousAggregate(const Type *T, unsigned &NumElts,
1790 const Type *&EltTy) {
1791 if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
1792 NumElts = AT->getNumElements();
1793 EltTy = (NumElts == 0 ? 0 : AT->getElementType());
1794 return true;
1796 if (const StructType *ST = dyn_cast<StructType>(T)) {
1797 NumElts = ST->getNumContainedTypes();
1798 EltTy = (NumElts == 0 ? 0 : ST->getContainedType(0));
1799 for (unsigned n = 1; n < NumElts; ++n) {
1800 if (ST->getContainedType(n) != EltTy)
1801 return false;
1803 return true;
1805 return false;
1808 /// isCompatibleAggregate - Check if T1 and T2 are either the same type or are
1809 /// "homogeneous" aggregates with the same element type and number of elements.
1810 static bool isCompatibleAggregate(const Type *T1, const Type *T2) {
1811 if (T1 == T2)
1812 return true;
1814 unsigned NumElts1, NumElts2;
1815 const Type *EltTy1, *EltTy2;
1816 if (isHomogeneousAggregate(T1, NumElts1, EltTy1) &&
1817 isHomogeneousAggregate(T2, NumElts2, EltTy2) &&
1818 NumElts1 == NumElts2 &&
1819 EltTy1 == EltTy2)
1820 return true;
1822 return false;
1825 /// isSafeMemAccess - Check if a load/store/memcpy operates on the entire AI
1826 /// alloca or has an offset and size that corresponds to a component element
1827 /// within it. The offset checked here may have been formed from a GEP with a
1828 /// pointer bitcasted to a different type.
1830 /// If AllowWholeAccess is true, then this allows uses of the entire alloca as a
1831 /// unit. If false, it only allows accesses known to be in a single element.
1832 void SROA::isSafeMemAccess(uint64_t Offset, uint64_t MemSize,
1833 const Type *MemOpType, bool isStore,
1834 AllocaInfo &Info, Instruction *TheAccess,
1835 bool AllowWholeAccess) {
1836 // Check if this is a load/store of the entire alloca.
1837 if (Offset == 0 && AllowWholeAccess &&
1838 MemSize == TD->getTypeAllocSize(Info.AI->getAllocatedType())) {
1839 // This can be safe for MemIntrinsics (where MemOpType is 0) and integer
1840 // loads/stores (which are essentially the same as the MemIntrinsics with
1841 // regard to copying padding between elements). But, if an alloca is
1842 // flagged as both a source and destination of such operations, we'll need
1843 // to check later for padding between elements.
1844 if (!MemOpType || MemOpType->isIntegerTy()) {
1845 if (isStore)
1846 Info.isMemCpyDst = true;
1847 else
1848 Info.isMemCpySrc = true;
1849 return;
1851 // This is also safe for references using a type that is compatible with
1852 // the type of the alloca, so that loads/stores can be rewritten using
1853 // insertvalue/extractvalue.
1854 if (isCompatibleAggregate(MemOpType, Info.AI->getAllocatedType())) {
1855 Info.hasSubelementAccess = true;
1856 return;
1859 // Check if the offset/size correspond to a component within the alloca type.
1860 const Type *T = Info.AI->getAllocatedType();
1861 if (TypeHasComponent(T, Offset, MemSize)) {
1862 Info.hasSubelementAccess = true;
1863 return;
1866 return MarkUnsafe(Info, TheAccess);
1869 /// TypeHasComponent - Return true if T has a component type with the
1870 /// specified offset and size. If Size is zero, do not check the size.
1871 bool SROA::TypeHasComponent(const Type *T, uint64_t Offset, uint64_t Size) {
1872 const Type *EltTy;
1873 uint64_t EltSize;
1874 if (const StructType *ST = dyn_cast<StructType>(T)) {
1875 const StructLayout *Layout = TD->getStructLayout(ST);
1876 unsigned EltIdx = Layout->getElementContainingOffset(Offset);
1877 EltTy = ST->getContainedType(EltIdx);
1878 EltSize = TD->getTypeAllocSize(EltTy);
1879 Offset -= Layout->getElementOffset(EltIdx);
1880 } else if (const ArrayType *AT = dyn_cast<ArrayType>(T)) {
1881 EltTy = AT->getElementType();
1882 EltSize = TD->getTypeAllocSize(EltTy);
1883 if (Offset >= AT->getNumElements() * EltSize)
1884 return false;
1885 Offset %= EltSize;
1886 } else {
1887 return false;
1889 if (Offset == 0 && (Size == 0 || EltSize == Size))
1890 return true;
1891 // Check if the component spans multiple elements.
1892 if (Offset + Size > EltSize)
1893 return false;
1894 return TypeHasComponent(EltTy, Offset, Size);
1897 /// RewriteForScalarRepl - Alloca AI is being split into NewElts, so rewrite
1898 /// the instruction I, which references it, to use the separate elements.
1899 /// Offset indicates the position within AI that is referenced by this
1900 /// instruction.
1901 void SROA::RewriteForScalarRepl(Instruction *I, AllocaInst *AI, uint64_t Offset,
1902 SmallVector<AllocaInst*, 32> &NewElts) {
1903 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI!=E;) {
1904 Use &TheUse = UI.getUse();
1905 Instruction *User = cast<Instruction>(*UI++);
1907 if (BitCastInst *BC = dyn_cast<BitCastInst>(User)) {
1908 RewriteBitCast(BC, AI, Offset, NewElts);
1909 continue;
1912 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(User)) {
1913 RewriteGEP(GEPI, AI, Offset, NewElts);
1914 continue;
1917 if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(User)) {
1918 ConstantInt *Length = dyn_cast<ConstantInt>(MI->getLength());
1919 uint64_t MemSize = Length->getZExtValue();
1920 if (Offset == 0 &&
1921 MemSize == TD->getTypeAllocSize(AI->getAllocatedType()))
1922 RewriteMemIntrinUserOfAlloca(MI, I, AI, NewElts);
1923 // Otherwise the intrinsic can only touch a single element and the
1924 // address operand will be updated, so nothing else needs to be done.
1925 continue;
1928 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1929 const Type *LIType = LI->getType();
1931 if (isCompatibleAggregate(LIType, AI->getAllocatedType())) {
1932 // Replace:
1933 // %res = load { i32, i32 }* %alloc
1934 // with:
1935 // %load.0 = load i32* %alloc.0
1936 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
1937 // %load.1 = load i32* %alloc.1
1938 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
1939 // (Also works for arrays instead of structs)
1940 Value *Insert = UndefValue::get(LIType);
1941 IRBuilder<> Builder(LI);
1942 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1943 Value *Load = Builder.CreateLoad(NewElts[i], "load");
1944 Insert = Builder.CreateInsertValue(Insert, Load, i, "insert");
1946 LI->replaceAllUsesWith(Insert);
1947 DeadInsts.push_back(LI);
1948 } else if (LIType->isIntegerTy() &&
1949 TD->getTypeAllocSize(LIType) ==
1950 TD->getTypeAllocSize(AI->getAllocatedType())) {
1951 // If this is a load of the entire alloca to an integer, rewrite it.
1952 RewriteLoadUserOfWholeAlloca(LI, AI, NewElts);
1954 continue;
1957 if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
1958 Value *Val = SI->getOperand(0);
1959 const Type *SIType = Val->getType();
1960 if (isCompatibleAggregate(SIType, AI->getAllocatedType())) {
1961 // Replace:
1962 // store { i32, i32 } %val, { i32, i32 }* %alloc
1963 // with:
1964 // %val.0 = extractvalue { i32, i32 } %val, 0
1965 // store i32 %val.0, i32* %alloc.0
1966 // %val.1 = extractvalue { i32, i32 } %val, 1
1967 // store i32 %val.1, i32* %alloc.1
1968 // (Also works for arrays instead of structs)
1969 IRBuilder<> Builder(SI);
1970 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
1971 Value *Extract = Builder.CreateExtractValue(Val, i, Val->getName());
1972 Builder.CreateStore(Extract, NewElts[i]);
1974 DeadInsts.push_back(SI);
1975 } else if (SIType->isIntegerTy() &&
1976 TD->getTypeAllocSize(SIType) ==
1977 TD->getTypeAllocSize(AI->getAllocatedType())) {
1978 // If this is a store of the entire alloca from an integer, rewrite it.
1979 RewriteStoreUserOfWholeAlloca(SI, AI, NewElts);
1981 continue;
1984 if (isa<SelectInst>(User) || isa<PHINode>(User)) {
1985 // If we have a PHI user of the alloca itself (as opposed to a GEP or
1986 // bitcast) we have to rewrite it. GEP and bitcast uses will be RAUW'd to
1987 // the new pointer.
1988 if (!isa<AllocaInst>(I)) continue;
1990 assert(Offset == 0 && NewElts[0] &&
1991 "Direct alloca use should have a zero offset");
1993 // If we have a use of the alloca, we know the derived uses will be
1994 // utilizing just the first element of the scalarized result. Insert a
1995 // bitcast of the first alloca before the user as required.
1996 AllocaInst *NewAI = NewElts[0];
1997 BitCastInst *BCI = new BitCastInst(NewAI, AI->getType(), "", NewAI);
1998 NewAI->moveBefore(BCI);
1999 TheUse = BCI;
2000 continue;
2005 /// RewriteBitCast - Update a bitcast reference to the alloca being replaced
2006 /// and recursively continue updating all of its uses.
2007 void SROA::RewriteBitCast(BitCastInst *BC, AllocaInst *AI, uint64_t Offset,
2008 SmallVector<AllocaInst*, 32> &NewElts) {
2009 RewriteForScalarRepl(BC, AI, Offset, NewElts);
2010 if (BC->getOperand(0) != AI)
2011 return;
2013 // The bitcast references the original alloca. Replace its uses with
2014 // references to the first new element alloca.
2015 Instruction *Val = NewElts[0];
2016 if (Val->getType() != BC->getDestTy()) {
2017 Val = new BitCastInst(Val, BC->getDestTy(), "", BC);
2018 Val->takeName(BC);
2020 BC->replaceAllUsesWith(Val);
2021 DeadInsts.push_back(BC);
2024 /// FindElementAndOffset - Return the index of the element containing Offset
2025 /// within the specified type, which must be either a struct or an array.
2026 /// Sets T to the type of the element and Offset to the offset within that
2027 /// element. IdxTy is set to the type of the index result to be used in a
2028 /// GEP instruction.
2029 uint64_t SROA::FindElementAndOffset(const Type *&T, uint64_t &Offset,
2030 const Type *&IdxTy) {
2031 uint64_t Idx = 0;
2032 if (const StructType *ST = dyn_cast<StructType>(T)) {
2033 const StructLayout *Layout = TD->getStructLayout(ST);
2034 Idx = Layout->getElementContainingOffset(Offset);
2035 T = ST->getContainedType(Idx);
2036 Offset -= Layout->getElementOffset(Idx);
2037 IdxTy = Type::getInt32Ty(T->getContext());
2038 return Idx;
2040 const ArrayType *AT = cast<ArrayType>(T);
2041 T = AT->getElementType();
2042 uint64_t EltSize = TD->getTypeAllocSize(T);
2043 Idx = Offset / EltSize;
2044 Offset -= Idx * EltSize;
2045 IdxTy = Type::getInt64Ty(T->getContext());
2046 return Idx;
2049 /// RewriteGEP - Check if this GEP instruction moves the pointer across
2050 /// elements of the alloca that are being split apart, and if so, rewrite
2051 /// the GEP to be relative to the new element.
2052 void SROA::RewriteGEP(GetElementPtrInst *GEPI, AllocaInst *AI, uint64_t Offset,
2053 SmallVector<AllocaInst*, 32> &NewElts) {
2054 uint64_t OldOffset = Offset;
2055 SmallVector<Value*, 8> Indices(GEPI->op_begin() + 1, GEPI->op_end());
2056 Offset += TD->getIndexedOffset(GEPI->getPointerOperandType(),
2057 &Indices[0], Indices.size());
2059 RewriteForScalarRepl(GEPI, AI, Offset, NewElts);
2061 const Type *T = AI->getAllocatedType();
2062 const Type *IdxTy;
2063 uint64_t OldIdx = FindElementAndOffset(T, OldOffset, IdxTy);
2064 if (GEPI->getOperand(0) == AI)
2065 OldIdx = ~0ULL; // Force the GEP to be rewritten.
2067 T = AI->getAllocatedType();
2068 uint64_t EltOffset = Offset;
2069 uint64_t Idx = FindElementAndOffset(T, EltOffset, IdxTy);
2071 // If this GEP does not move the pointer across elements of the alloca
2072 // being split, then it does not needs to be rewritten.
2073 if (Idx == OldIdx)
2074 return;
2076 const Type *i32Ty = Type::getInt32Ty(AI->getContext());
2077 SmallVector<Value*, 8> NewArgs;
2078 NewArgs.push_back(Constant::getNullValue(i32Ty));
2079 while (EltOffset != 0) {
2080 uint64_t EltIdx = FindElementAndOffset(T, EltOffset, IdxTy);
2081 NewArgs.push_back(ConstantInt::get(IdxTy, EltIdx));
2083 Instruction *Val = NewElts[Idx];
2084 if (NewArgs.size() > 1) {
2085 Val = GetElementPtrInst::CreateInBounds(Val, NewArgs.begin(),
2086 NewArgs.end(), "", GEPI);
2087 Val->takeName(GEPI);
2089 if (Val->getType() != GEPI->getType())
2090 Val = new BitCastInst(Val, GEPI->getType(), Val->getName(), GEPI);
2091 GEPI->replaceAllUsesWith(Val);
2092 DeadInsts.push_back(GEPI);
2095 /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
2096 /// Rewrite it to copy or set the elements of the scalarized memory.
2097 void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic *MI, Instruction *Inst,
2098 AllocaInst *AI,
2099 SmallVector<AllocaInst*, 32> &NewElts) {
2100 // If this is a memcpy/memmove, construct the other pointer as the
2101 // appropriate type. The "Other" pointer is the pointer that goes to memory
2102 // that doesn't have anything to do with the alloca that we are promoting. For
2103 // memset, this Value* stays null.
2104 Value *OtherPtr = 0;
2105 unsigned MemAlignment = MI->getAlignment();
2106 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) { // memmove/memcopy
2107 if (Inst == MTI->getRawDest())
2108 OtherPtr = MTI->getRawSource();
2109 else {
2110 assert(Inst == MTI->getRawSource());
2111 OtherPtr = MTI->getRawDest();
2115 // If there is an other pointer, we want to convert it to the same pointer
2116 // type as AI has, so we can GEP through it safely.
2117 if (OtherPtr) {
2118 unsigned AddrSpace =
2119 cast<PointerType>(OtherPtr->getType())->getAddressSpace();
2121 // Remove bitcasts and all-zero GEPs from OtherPtr. This is an
2122 // optimization, but it's also required to detect the corner case where
2123 // both pointer operands are referencing the same memory, and where
2124 // OtherPtr may be a bitcast or GEP that currently being rewritten. (This
2125 // function is only called for mem intrinsics that access the whole
2126 // aggregate, so non-zero GEPs are not an issue here.)
2127 OtherPtr = OtherPtr->stripPointerCasts();
2129 // Copying the alloca to itself is a no-op: just delete it.
2130 if (OtherPtr == AI || OtherPtr == NewElts[0]) {
2131 // This code will run twice for a no-op memcpy -- once for each operand.
2132 // Put only one reference to MI on the DeadInsts list.
2133 for (SmallVector<Value*, 32>::const_iterator I = DeadInsts.begin(),
2134 E = DeadInsts.end(); I != E; ++I)
2135 if (*I == MI) return;
2136 DeadInsts.push_back(MI);
2137 return;
2140 // If the pointer is not the right type, insert a bitcast to the right
2141 // type.
2142 const Type *NewTy =
2143 PointerType::get(AI->getType()->getElementType(), AddrSpace);
2145 if (OtherPtr->getType() != NewTy)
2146 OtherPtr = new BitCastInst(OtherPtr, NewTy, OtherPtr->getName(), MI);
2149 // Process each element of the aggregate.
2150 bool SROADest = MI->getRawDest() == Inst;
2152 Constant *Zero = Constant::getNullValue(Type::getInt32Ty(MI->getContext()));
2154 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
2155 // If this is a memcpy/memmove, emit a GEP of the other element address.
2156 Value *OtherElt = 0;
2157 unsigned OtherEltAlign = MemAlignment;
2159 if (OtherPtr) {
2160 Value *Idx[2] = { Zero,
2161 ConstantInt::get(Type::getInt32Ty(MI->getContext()), i) };
2162 OtherElt = GetElementPtrInst::CreateInBounds(OtherPtr, Idx, Idx + 2,
2163 OtherPtr->getName()+"."+Twine(i),
2164 MI);
2165 uint64_t EltOffset;
2166 const PointerType *OtherPtrTy = cast<PointerType>(OtherPtr->getType());
2167 const Type *OtherTy = OtherPtrTy->getElementType();
2168 if (const StructType *ST = dyn_cast<StructType>(OtherTy)) {
2169 EltOffset = TD->getStructLayout(ST)->getElementOffset(i);
2170 } else {
2171 const Type *EltTy = cast<SequentialType>(OtherTy)->getElementType();
2172 EltOffset = TD->getTypeAllocSize(EltTy)*i;
2175 // The alignment of the other pointer is the guaranteed alignment of the
2176 // element, which is affected by both the known alignment of the whole
2177 // mem intrinsic and the alignment of the element. If the alignment of
2178 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
2179 // known alignment is just 4 bytes.
2180 OtherEltAlign = (unsigned)MinAlign(OtherEltAlign, EltOffset);
2183 Value *EltPtr = NewElts[i];
2184 const Type *EltTy = cast<PointerType>(EltPtr->getType())->getElementType();
2186 // If we got down to a scalar, insert a load or store as appropriate.
2187 if (EltTy->isSingleValueType()) {
2188 if (isa<MemTransferInst>(MI)) {
2189 if (SROADest) {
2190 // From Other to Alloca.
2191 Value *Elt = new LoadInst(OtherElt, "tmp", false, OtherEltAlign, MI);
2192 new StoreInst(Elt, EltPtr, MI);
2193 } else {
2194 // From Alloca to Other.
2195 Value *Elt = new LoadInst(EltPtr, "tmp", MI);
2196 new StoreInst(Elt, OtherElt, false, OtherEltAlign, MI);
2198 continue;
2200 assert(isa<MemSetInst>(MI));
2202 // If the stored element is zero (common case), just store a null
2203 // constant.
2204 Constant *StoreVal;
2205 if (ConstantInt *CI = dyn_cast<ConstantInt>(MI->getArgOperand(1))) {
2206 if (CI->isZero()) {
2207 StoreVal = Constant::getNullValue(EltTy); // 0.0, null, 0, <0,0>
2208 } else {
2209 // If EltTy is a vector type, get the element type.
2210 const Type *ValTy = EltTy->getScalarType();
2212 // Construct an integer with the right value.
2213 unsigned EltSize = TD->getTypeSizeInBits(ValTy);
2214 APInt OneVal(EltSize, CI->getZExtValue());
2215 APInt TotalVal(OneVal);
2216 // Set each byte.
2217 for (unsigned i = 0; 8*i < EltSize; ++i) {
2218 TotalVal = TotalVal.shl(8);
2219 TotalVal |= OneVal;
2222 // Convert the integer value to the appropriate type.
2223 StoreVal = ConstantInt::get(CI->getContext(), TotalVal);
2224 if (ValTy->isPointerTy())
2225 StoreVal = ConstantExpr::getIntToPtr(StoreVal, ValTy);
2226 else if (ValTy->isFloatingPointTy())
2227 StoreVal = ConstantExpr::getBitCast(StoreVal, ValTy);
2228 assert(StoreVal->getType() == ValTy && "Type mismatch!");
2230 // If the requested value was a vector constant, create it.
2231 if (EltTy != ValTy) {
2232 unsigned NumElts = cast<VectorType>(ValTy)->getNumElements();
2233 SmallVector<Constant*, 16> Elts(NumElts, StoreVal);
2234 StoreVal = ConstantVector::get(Elts);
2237 new StoreInst(StoreVal, EltPtr, MI);
2238 continue;
2240 // Otherwise, if we're storing a byte variable, use a memset call for
2241 // this element.
2244 unsigned EltSize = TD->getTypeAllocSize(EltTy);
2246 IRBuilder<> Builder(MI);
2248 // Finally, insert the meminst for this element.
2249 if (isa<MemSetInst>(MI)) {
2250 Builder.CreateMemSet(EltPtr, MI->getArgOperand(1), EltSize,
2251 MI->isVolatile());
2252 } else {
2253 assert(isa<MemTransferInst>(MI));
2254 Value *Dst = SROADest ? EltPtr : OtherElt; // Dest ptr
2255 Value *Src = SROADest ? OtherElt : EltPtr; // Src ptr
2257 if (isa<MemCpyInst>(MI))
2258 Builder.CreateMemCpy(Dst, Src, EltSize, OtherEltAlign,MI->isVolatile());
2259 else
2260 Builder.CreateMemMove(Dst, Src, EltSize,OtherEltAlign,MI->isVolatile());
2263 DeadInsts.push_back(MI);
2266 /// RewriteStoreUserOfWholeAlloca - We found a store of an integer that
2267 /// overwrites the entire allocation. Extract out the pieces of the stored
2268 /// integer and store them individually.
2269 void SROA::RewriteStoreUserOfWholeAlloca(StoreInst *SI, AllocaInst *AI,
2270 SmallVector<AllocaInst*, 32> &NewElts){
2271 // Extract each element out of the integer according to its structure offset
2272 // and store the element value to the individual alloca.
2273 Value *SrcVal = SI->getOperand(0);
2274 const Type *AllocaEltTy = AI->getAllocatedType();
2275 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
2277 IRBuilder<> Builder(SI);
2279 // Handle tail padding by extending the operand
2280 if (TD->getTypeSizeInBits(SrcVal->getType()) != AllocaSizeBits)
2281 SrcVal = Builder.CreateZExt(SrcVal,
2282 IntegerType::get(SI->getContext(), AllocaSizeBits));
2284 DEBUG(dbgs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI << '\n' << *SI
2285 << '\n');
2287 // There are two forms here: AI could be an array or struct. Both cases
2288 // have different ways to compute the element offset.
2289 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
2290 const StructLayout *Layout = TD->getStructLayout(EltSTy);
2292 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
2293 // Get the number of bits to shift SrcVal to get the value.
2294 const Type *FieldTy = EltSTy->getElementType(i);
2295 uint64_t Shift = Layout->getElementOffsetInBits(i);
2297 if (TD->isBigEndian())
2298 Shift = AllocaSizeBits-Shift-TD->getTypeAllocSizeInBits(FieldTy);
2300 Value *EltVal = SrcVal;
2301 if (Shift) {
2302 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
2303 EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
2306 // Truncate down to an integer of the right size.
2307 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
2309 // Ignore zero sized fields like {}, they obviously contain no data.
2310 if (FieldSizeBits == 0) continue;
2312 if (FieldSizeBits != AllocaSizeBits)
2313 EltVal = Builder.CreateTrunc(EltVal,
2314 IntegerType::get(SI->getContext(), FieldSizeBits));
2315 Value *DestField = NewElts[i];
2316 if (EltVal->getType() == FieldTy) {
2317 // Storing to an integer field of this size, just do it.
2318 } else if (FieldTy->isFloatingPointTy() || FieldTy->isVectorTy()) {
2319 // Bitcast to the right element type (for fp/vector values).
2320 EltVal = Builder.CreateBitCast(EltVal, FieldTy);
2321 } else {
2322 // Otherwise, bitcast the dest pointer (for aggregates).
2323 DestField = Builder.CreateBitCast(DestField,
2324 PointerType::getUnqual(EltVal->getType()));
2326 new StoreInst(EltVal, DestField, SI);
2329 } else {
2330 const ArrayType *ATy = cast<ArrayType>(AllocaEltTy);
2331 const Type *ArrayEltTy = ATy->getElementType();
2332 uint64_t ElementOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
2333 uint64_t ElementSizeBits = TD->getTypeSizeInBits(ArrayEltTy);
2335 uint64_t Shift;
2337 if (TD->isBigEndian())
2338 Shift = AllocaSizeBits-ElementOffset;
2339 else
2340 Shift = 0;
2342 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
2343 // Ignore zero sized fields like {}, they obviously contain no data.
2344 if (ElementSizeBits == 0) continue;
2346 Value *EltVal = SrcVal;
2347 if (Shift) {
2348 Value *ShiftVal = ConstantInt::get(EltVal->getType(), Shift);
2349 EltVal = Builder.CreateLShr(EltVal, ShiftVal, "sroa.store.elt");
2352 // Truncate down to an integer of the right size.
2353 if (ElementSizeBits != AllocaSizeBits)
2354 EltVal = Builder.CreateTrunc(EltVal,
2355 IntegerType::get(SI->getContext(),
2356 ElementSizeBits));
2357 Value *DestField = NewElts[i];
2358 if (EltVal->getType() == ArrayEltTy) {
2359 // Storing to an integer field of this size, just do it.
2360 } else if (ArrayEltTy->isFloatingPointTy() ||
2361 ArrayEltTy->isVectorTy()) {
2362 // Bitcast to the right element type (for fp/vector values).
2363 EltVal = Builder.CreateBitCast(EltVal, ArrayEltTy);
2364 } else {
2365 // Otherwise, bitcast the dest pointer (for aggregates).
2366 DestField = Builder.CreateBitCast(DestField,
2367 PointerType::getUnqual(EltVal->getType()));
2369 new StoreInst(EltVal, DestField, SI);
2371 if (TD->isBigEndian())
2372 Shift -= ElementOffset;
2373 else
2374 Shift += ElementOffset;
2378 DeadInsts.push_back(SI);
2381 /// RewriteLoadUserOfWholeAlloca - We found a load of the entire allocation to
2382 /// an integer. Load the individual pieces to form the aggregate value.
2383 void SROA::RewriteLoadUserOfWholeAlloca(LoadInst *LI, AllocaInst *AI,
2384 SmallVector<AllocaInst*, 32> &NewElts) {
2385 // Extract each element out of the NewElts according to its structure offset
2386 // and form the result value.
2387 const Type *AllocaEltTy = AI->getAllocatedType();
2388 uint64_t AllocaSizeBits = TD->getTypeAllocSizeInBits(AllocaEltTy);
2390 DEBUG(dbgs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI << '\n' << *LI
2391 << '\n');
2393 // There are two forms here: AI could be an array or struct. Both cases
2394 // have different ways to compute the element offset.
2395 const StructLayout *Layout = 0;
2396 uint64_t ArrayEltBitOffset = 0;
2397 if (const StructType *EltSTy = dyn_cast<StructType>(AllocaEltTy)) {
2398 Layout = TD->getStructLayout(EltSTy);
2399 } else {
2400 const Type *ArrayEltTy = cast<ArrayType>(AllocaEltTy)->getElementType();
2401 ArrayEltBitOffset = TD->getTypeAllocSizeInBits(ArrayEltTy);
2404 Value *ResultVal =
2405 Constant::getNullValue(IntegerType::get(LI->getContext(), AllocaSizeBits));
2407 for (unsigned i = 0, e = NewElts.size(); i != e; ++i) {
2408 // Load the value from the alloca. If the NewElt is an aggregate, cast
2409 // the pointer to an integer of the same size before doing the load.
2410 Value *SrcField = NewElts[i];
2411 const Type *FieldTy =
2412 cast<PointerType>(SrcField->getType())->getElementType();
2413 uint64_t FieldSizeBits = TD->getTypeSizeInBits(FieldTy);
2415 // Ignore zero sized fields like {}, they obviously contain no data.
2416 if (FieldSizeBits == 0) continue;
2418 const IntegerType *FieldIntTy = IntegerType::get(LI->getContext(),
2419 FieldSizeBits);
2420 if (!FieldTy->isIntegerTy() && !FieldTy->isFloatingPointTy() &&
2421 !FieldTy->isVectorTy())
2422 SrcField = new BitCastInst(SrcField,
2423 PointerType::getUnqual(FieldIntTy),
2424 "", LI);
2425 SrcField = new LoadInst(SrcField, "sroa.load.elt", LI);
2427 // If SrcField is a fp or vector of the right size but that isn't an
2428 // integer type, bitcast to an integer so we can shift it.
2429 if (SrcField->getType() != FieldIntTy)
2430 SrcField = new BitCastInst(SrcField, FieldIntTy, "", LI);
2432 // Zero extend the field to be the same size as the final alloca so that
2433 // we can shift and insert it.
2434 if (SrcField->getType() != ResultVal->getType())
2435 SrcField = new ZExtInst(SrcField, ResultVal->getType(), "", LI);
2437 // Determine the number of bits to shift SrcField.
2438 uint64_t Shift;
2439 if (Layout) // Struct case.
2440 Shift = Layout->getElementOffsetInBits(i);
2441 else // Array case.
2442 Shift = i*ArrayEltBitOffset;
2444 if (TD->isBigEndian())
2445 Shift = AllocaSizeBits-Shift-FieldIntTy->getBitWidth();
2447 if (Shift) {
2448 Value *ShiftVal = ConstantInt::get(SrcField->getType(), Shift);
2449 SrcField = BinaryOperator::CreateShl(SrcField, ShiftVal, "", LI);
2452 // Don't create an 'or x, 0' on the first iteration.
2453 if (!isa<Constant>(ResultVal) ||
2454 !cast<Constant>(ResultVal)->isNullValue())
2455 ResultVal = BinaryOperator::CreateOr(SrcField, ResultVal, "", LI);
2456 else
2457 ResultVal = SrcField;
2460 // Handle tail padding by truncating the result
2461 if (TD->getTypeSizeInBits(LI->getType()) != AllocaSizeBits)
2462 ResultVal = new TruncInst(ResultVal, LI->getType(), "", LI);
2464 LI->replaceAllUsesWith(ResultVal);
2465 DeadInsts.push_back(LI);
2468 /// HasPadding - Return true if the specified type has any structure or
2469 /// alignment padding in between the elements that would be split apart
2470 /// by SROA; return false otherwise.
2471 static bool HasPadding(const Type *Ty, const TargetData &TD) {
2472 if (const ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
2473 Ty = ATy->getElementType();
2474 return TD.getTypeSizeInBits(Ty) != TD.getTypeAllocSizeInBits(Ty);
2477 // SROA currently handles only Arrays and Structs.
2478 const StructType *STy = cast<StructType>(Ty);
2479 const StructLayout *SL = TD.getStructLayout(STy);
2480 unsigned PrevFieldBitOffset = 0;
2481 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2482 unsigned FieldBitOffset = SL->getElementOffsetInBits(i);
2484 // Check to see if there is any padding between this element and the
2485 // previous one.
2486 if (i) {
2487 unsigned PrevFieldEnd =
2488 PrevFieldBitOffset+TD.getTypeSizeInBits(STy->getElementType(i-1));
2489 if (PrevFieldEnd < FieldBitOffset)
2490 return true;
2492 PrevFieldBitOffset = FieldBitOffset;
2494 // Check for tail padding.
2495 if (unsigned EltCount = STy->getNumElements()) {
2496 unsigned PrevFieldEnd = PrevFieldBitOffset +
2497 TD.getTypeSizeInBits(STy->getElementType(EltCount-1));
2498 if (PrevFieldEnd < SL->getSizeInBits())
2499 return true;
2501 return false;
2504 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
2505 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
2506 /// or 1 if safe after canonicalization has been performed.
2507 bool SROA::isSafeAllocaToScalarRepl(AllocaInst *AI) {
2508 // Loop over the use list of the alloca. We can only transform it if all of
2509 // the users are safe to transform.
2510 AllocaInfo Info(AI);
2512 isSafeForScalarRepl(AI, 0, Info);
2513 if (Info.isUnsafe) {
2514 DEBUG(dbgs() << "Cannot transform: " << *AI << '\n');
2515 return false;
2518 // Okay, we know all the users are promotable. If the aggregate is a memcpy
2519 // source and destination, we have to be careful. In particular, the memcpy
2520 // could be moving around elements that live in structure padding of the LLVM
2521 // types, but may actually be used. In these cases, we refuse to promote the
2522 // struct.
2523 if (Info.isMemCpySrc && Info.isMemCpyDst &&
2524 HasPadding(AI->getAllocatedType(), *TD))
2525 return false;
2527 // If the alloca never has an access to just *part* of it, but is accessed
2528 // via loads and stores, then we should use ConvertToScalarInfo to promote
2529 // the alloca instead of promoting each piece at a time and inserting fission
2530 // and fusion code.
2531 if (!Info.hasSubelementAccess && Info.hasALoadOrStore) {
2532 // If the struct/array just has one element, use basic SRoA.
2533 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
2534 if (ST->getNumElements() > 1) return false;
2535 } else {
2536 if (cast<ArrayType>(AI->getAllocatedType())->getNumElements() > 1)
2537 return false;
2541 return true;
2546 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
2547 /// some part of a constant global variable. This intentionally only accepts
2548 /// constant expressions because we don't can't rewrite arbitrary instructions.
2549 static bool PointsToConstantGlobal(Value *V) {
2550 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
2551 return GV->isConstant();
2552 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2553 if (CE->getOpcode() == Instruction::BitCast ||
2554 CE->getOpcode() == Instruction::GetElementPtr)
2555 return PointsToConstantGlobal(CE->getOperand(0));
2556 return false;
2559 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
2560 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
2561 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
2562 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
2563 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
2564 /// the alloca, and if the source pointer is a pointer to a constant global, we
2565 /// can optimize this.
2566 static bool
2567 isOnlyCopiedFromConstantGlobal(Value *V, MemTransferInst *&TheCopy,
2568 bool isOffset,
2569 SmallVector<Instruction *, 4> &LifetimeMarkers) {
2570 // We track lifetime intrinsics as we encounter them. If we decide to go
2571 // ahead and replace the value with the global, this lets the caller quickly
2572 // eliminate the markers.
2574 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
2575 User *U = cast<Instruction>(*UI);
2577 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
2578 // Ignore non-volatile loads, they are always ok.
2579 if (LI->isVolatile()) return false;
2580 continue;
2583 if (BitCastInst *BCI = dyn_cast<BitCastInst>(U)) {
2584 // If uses of the bitcast are ok, we are ok.
2585 if (!isOnlyCopiedFromConstantGlobal(BCI, TheCopy, isOffset,
2586 LifetimeMarkers))
2587 return false;
2588 continue;
2590 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
2591 // If the GEP has all zero indices, it doesn't offset the pointer. If it
2592 // doesn't, it does.
2593 if (!isOnlyCopiedFromConstantGlobal(GEP, TheCopy,
2594 isOffset || !GEP->hasAllZeroIndices(),
2595 LifetimeMarkers))
2596 return false;
2597 continue;
2600 if (CallSite CS = U) {
2601 // If this is the function being called then we treat it like a load and
2602 // ignore it.
2603 if (CS.isCallee(UI))
2604 continue;
2606 // If this is a readonly/readnone call site, then we know it is just a
2607 // load (but one that potentially returns the value itself), so we can
2608 // ignore it if we know that the value isn't captured.
2609 unsigned ArgNo = CS.getArgumentNo(UI);
2610 if (CS.onlyReadsMemory() &&
2611 (CS.getInstruction()->use_empty() ||
2612 CS.paramHasAttr(ArgNo+1, Attribute::NoCapture)))
2613 continue;
2615 // If this is being passed as a byval argument, the caller is making a
2616 // copy, so it is only a read of the alloca.
2617 if (CS.paramHasAttr(ArgNo+1, Attribute::ByVal))
2618 continue;
2621 // Lifetime intrinsics can be handled by the caller.
2622 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
2623 if (II->getIntrinsicID() == Intrinsic::lifetime_start ||
2624 II->getIntrinsicID() == Intrinsic::lifetime_end) {
2625 assert(II->use_empty() && "Lifetime markers have no result to use!");
2626 LifetimeMarkers.push_back(II);
2627 continue;
2631 // If this is isn't our memcpy/memmove, reject it as something we can't
2632 // handle.
2633 MemTransferInst *MI = dyn_cast<MemTransferInst>(U);
2634 if (MI == 0)
2635 return false;
2637 // If the transfer is using the alloca as a source of the transfer, then
2638 // ignore it since it is a load (unless the transfer is volatile).
2639 if (UI.getOperandNo() == 1) {
2640 if (MI->isVolatile()) return false;
2641 continue;
2644 // If we already have seen a copy, reject the second one.
2645 if (TheCopy) return false;
2647 // If the pointer has been offset from the start of the alloca, we can't
2648 // safely handle this.
2649 if (isOffset) return false;
2651 // If the memintrinsic isn't using the alloca as the dest, reject it.
2652 if (UI.getOperandNo() != 0) return false;
2654 // If the source of the memcpy/move is not a constant global, reject it.
2655 if (!PointsToConstantGlobal(MI->getSource()))
2656 return false;
2658 // Otherwise, the transform is safe. Remember the copy instruction.
2659 TheCopy = MI;
2661 return true;
2664 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
2665 /// modified by a copy from a constant global. If we can prove this, we can
2666 /// replace any uses of the alloca with uses of the global directly.
2667 MemTransferInst *
2668 SROA::isOnlyCopiedFromConstantGlobal(AllocaInst *AI,
2669 SmallVector<Instruction*, 4> &ToDelete) {
2670 MemTransferInst *TheCopy = 0;
2671 if (::isOnlyCopiedFromConstantGlobal(AI, TheCopy, false, ToDelete))
2672 return TheCopy;
2673 return 0;