1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/LLVMContext.h"
31 #include "llvm/Pass.h"
32 #include "llvm/Analysis/Dominators.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/GetElementPtrTypeIterator.h"
39 #include "llvm/Support/IRBuilder.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/ADT/SmallVector.h"
43 #include "llvm/ADT/Statistic.h"
46 STATISTIC(NumReplaced
, "Number of allocas broken up");
47 STATISTIC(NumPromoted
, "Number of allocas promoted");
48 STATISTIC(NumConverted
, "Number of aggregates converted to scalar");
49 STATISTIC(NumGlobals
, "Number of allocas copied from constant global");
52 struct SROA
: public FunctionPass
{
53 static char ID
; // Pass identification, replacement for typeid
54 explicit SROA(signed T
= -1) : FunctionPass(&ID
) {
61 bool runOnFunction(Function
&F
);
63 bool performScalarRepl(Function
&F
);
64 bool performPromotion(Function
&F
);
66 // getAnalysisUsage - This pass does not require any passes, but we know it
67 // will not alter the CFG, so say so.
68 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
69 AU
.addRequired
<DominatorTree
>();
70 AU
.addRequired
<DominanceFrontier
>();
77 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
78 /// information about the uses. All these fields are initialized to false
79 /// and set to true when something is learned.
81 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
84 /// needsCleanup - This is set to true if there is some use of the alloca
85 /// that requires cleanup.
86 bool needsCleanup
: 1;
88 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
91 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
95 : isUnsafe(false), needsCleanup(false),
96 isMemCpySrc(false), isMemCpyDst(false) {}
101 void MarkUnsafe(AllocaInfo
&I
) { I
.isUnsafe
= true; }
103 int isSafeAllocaToScalarRepl(AllocationInst
*AI
);
105 void isSafeUseOfAllocation(Instruction
*User
, AllocationInst
*AI
,
107 void isSafeElementUse(Value
*Ptr
, bool isFirstElt
, AllocationInst
*AI
,
109 void isSafeMemIntrinsicOnAllocation(MemIntrinsic
*MI
, AllocationInst
*AI
,
110 unsigned OpNo
, AllocaInfo
&Info
);
111 void isSafeUseOfBitCastedAllocation(BitCastInst
*User
, AllocationInst
*AI
,
114 void DoScalarReplacement(AllocationInst
*AI
,
115 std::vector
<AllocationInst
*> &WorkList
);
116 void CleanupGEP(GetElementPtrInst
*GEP
);
117 void CleanupAllocaUsers(AllocationInst
*AI
);
118 AllocaInst
*AddNewAlloca(Function
&F
, const Type
*Ty
, AllocationInst
*Base
);
120 void RewriteBitCastUserOfAlloca(Instruction
*BCInst
, AllocationInst
*AI
,
121 SmallVector
<AllocaInst
*, 32> &NewElts
);
123 void RewriteMemIntrinUserOfAlloca(MemIntrinsic
*MI
, Instruction
*BCInst
,
125 SmallVector
<AllocaInst
*, 32> &NewElts
);
126 void RewriteStoreUserOfWholeAlloca(StoreInst
*SI
, AllocationInst
*AI
,
127 SmallVector
<AllocaInst
*, 32> &NewElts
);
128 void RewriteLoadUserOfWholeAlloca(LoadInst
*LI
, AllocationInst
*AI
,
129 SmallVector
<AllocaInst
*, 32> &NewElts
);
131 bool CanConvertToScalar(Value
*V
, bool &IsNotTrivial
, const Type
*&VecTy
,
132 bool &SawVec
, uint64_t Offset
, unsigned AllocaSize
);
133 void ConvertUsesToScalar(Value
*Ptr
, AllocaInst
*NewAI
, uint64_t Offset
);
134 Value
*ConvertScalar_ExtractValue(Value
*NV
, const Type
*ToType
,
135 uint64_t Offset
, IRBuilder
<> &Builder
);
136 Value
*ConvertScalar_InsertValue(Value
*StoredVal
, Value
*ExistingVal
,
137 uint64_t Offset
, IRBuilder
<> &Builder
);
138 static Instruction
*isOnlyCopiedFromConstantGlobal(AllocationInst
*AI
);
143 static RegisterPass
<SROA
> X("scalarrepl", "Scalar Replacement of Aggregates");
145 // Public interface to the ScalarReplAggregates pass
146 FunctionPass
*llvm::createScalarReplAggregatesPass(signed int Threshold
) {
147 return new SROA(Threshold
);
151 bool SROA::runOnFunction(Function
&F
) {
152 TD
= getAnalysisIfAvailable
<TargetData
>();
154 bool Changed
= performPromotion(F
);
156 // FIXME: ScalarRepl currently depends on TargetData more than it
157 // theoretically needs to. It should be refactored in order to support
158 // target-independent IR. Until this is done, just skip the actual
159 // scalar-replacement portion of this pass.
160 if (!TD
) return Changed
;
163 bool LocalChange
= performScalarRepl(F
);
164 if (!LocalChange
) break; // No need to repromote if no scalarrepl
166 LocalChange
= performPromotion(F
);
167 if (!LocalChange
) break; // No need to re-scalarrepl if no promotion
174 bool SROA::performPromotion(Function
&F
) {
175 std::vector
<AllocaInst
*> Allocas
;
176 DominatorTree
&DT
= getAnalysis
<DominatorTree
>();
177 DominanceFrontier
&DF
= getAnalysis
<DominanceFrontier
>();
179 BasicBlock
&BB
= F
.getEntryBlock(); // Get the entry node for the function
181 bool Changed
= false;
186 // Find allocas that are safe to promote, by looking at all instructions in
188 for (BasicBlock::iterator I
= BB
.begin(), E
= --BB
.end(); I
!= E
; ++I
)
189 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
)) // Is it an alloca?
190 if (isAllocaPromotable(AI
))
191 Allocas
.push_back(AI
);
193 if (Allocas
.empty()) break;
195 PromoteMemToReg(Allocas
, DT
, DF
, F
.getContext());
196 NumPromoted
+= Allocas
.size();
203 /// getNumSAElements - Return the number of elements in the specific struct or
205 static uint64_t getNumSAElements(const Type
*T
) {
206 if (const StructType
*ST
= dyn_cast
<StructType
>(T
))
207 return ST
->getNumElements();
208 return cast
<ArrayType
>(T
)->getNumElements();
211 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
212 // which runs on all of the malloc/alloca instructions in the function, removing
213 // them if they are only used by getelementptr instructions.
215 bool SROA::performScalarRepl(Function
&F
) {
216 std::vector
<AllocationInst
*> WorkList
;
218 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
219 BasicBlock
&BB
= F
.getEntryBlock();
220 for (BasicBlock::iterator I
= BB
.begin(), E
= BB
.end(); I
!= E
; ++I
)
221 if (AllocationInst
*A
= dyn_cast
<AllocationInst
>(I
))
222 WorkList
.push_back(A
);
224 // Process the worklist
225 bool Changed
= false;
226 while (!WorkList
.empty()) {
227 AllocationInst
*AI
= WorkList
.back();
230 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
231 // with unused elements.
232 if (AI
->use_empty()) {
233 AI
->eraseFromParent();
237 // If this alloca is impossible for us to promote, reject it early.
238 if (AI
->isArrayAllocation() || !AI
->getAllocatedType()->isSized())
241 // Check to see if this allocation is only modified by a memcpy/memmove from
242 // a constant global. If this is the case, we can change all users to use
243 // the constant global instead. This is commonly produced by the CFE by
244 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
245 // is only subsequently read.
246 if (Instruction
*TheCopy
= isOnlyCopiedFromConstantGlobal(AI
)) {
247 DEBUG(errs() << "Found alloca equal to global: " << *AI
);
248 DEBUG(errs() << " memcpy = " << *TheCopy
);
249 Constant
*TheSrc
= cast
<Constant
>(TheCopy
->getOperand(2));
250 AI
->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc
, AI
->getType()));
251 TheCopy
->eraseFromParent(); // Don't mutate the global.
252 AI
->eraseFromParent();
258 // Check to see if we can perform the core SROA transformation. We cannot
259 // transform the allocation instruction if it is an array allocation
260 // (allocations OF arrays are ok though), and an allocation of a scalar
261 // value cannot be decomposed at all.
262 uint64_t AllocaSize
= TD
->getTypeAllocSize(AI
->getAllocatedType());
264 // Do not promote [0 x %struct].
265 if (AllocaSize
== 0) continue;
267 // Do not promote any struct whose size is too big.
268 if (AllocaSize
> SRThreshold
) continue;
270 if ((isa
<StructType
>(AI
->getAllocatedType()) ||
271 isa
<ArrayType
>(AI
->getAllocatedType())) &&
272 // Do not promote any struct into more than "32" separate vars.
273 getNumSAElements(AI
->getAllocatedType()) <= SRThreshold
/4) {
274 // Check that all of the users of the allocation are capable of being
276 switch (isSafeAllocaToScalarRepl(AI
)) {
277 default: llvm_unreachable("Unexpected value!");
278 case 0: // Not safe to scalar replace.
280 case 1: // Safe, but requires cleanup/canonicalizations first
281 CleanupAllocaUsers(AI
);
283 case 3: // Safe to scalar replace.
284 DoScalarReplacement(AI
, WorkList
);
290 // If we can turn this aggregate value (potentially with casts) into a
291 // simple scalar value that can be mem2reg'd into a register value.
292 // IsNotTrivial tracks whether this is something that mem2reg could have
293 // promoted itself. If so, we don't want to transform it needlessly. Note
294 // that we can't just check based on the type: the alloca may be of an i32
295 // but that has pointer arithmetic to set byte 3 of it or something.
296 bool IsNotTrivial
= false;
297 const Type
*VectorTy
= 0;
298 bool HadAVector
= false;
299 if (CanConvertToScalar(AI
, IsNotTrivial
, VectorTy
, HadAVector
,
300 0, unsigned(AllocaSize
)) && IsNotTrivial
) {
302 // If we were able to find a vector type that can handle this with
303 // insert/extract elements, and if there was at least one use that had
304 // a vector type, promote this to a vector. We don't want to promote
305 // random stuff that doesn't use vectors (e.g. <9 x double>) because then
306 // we just get a lot of insert/extracts. If at least one vector is
307 // involved, then we probably really do have a union of vector/array.
308 if (VectorTy
&& isa
<VectorType
>(VectorTy
) && HadAVector
) {
309 DEBUG(errs() << "CONVERT TO VECTOR: " << *AI
<< " TYPE = "
310 << *VectorTy
<< '\n');
312 // Create and insert the vector alloca.
313 NewAI
= new AllocaInst(VectorTy
, 0, "", AI
->getParent()->begin());
314 ConvertUsesToScalar(AI
, NewAI
, 0);
316 DEBUG(errs() << "CONVERT TO SCALAR INTEGER: " << *AI
<< "\n");
318 // Create and insert the integer alloca.
319 const Type
*NewTy
= IntegerType::get(AI
->getContext(), AllocaSize
*8);
320 NewAI
= new AllocaInst(NewTy
, 0, "", AI
->getParent()->begin());
321 ConvertUsesToScalar(AI
, NewAI
, 0);
324 AI
->eraseFromParent();
330 // Otherwise, couldn't process this alloca.
336 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
337 /// predicate, do SROA now.
338 void SROA::DoScalarReplacement(AllocationInst
*AI
,
339 std::vector
<AllocationInst
*> &WorkList
) {
340 DEBUG(errs() << "Found inst to SROA: " << *AI
);
341 SmallVector
<AllocaInst
*, 32> ElementAllocas
;
342 if (const StructType
*ST
= dyn_cast
<StructType
>(AI
->getAllocatedType())) {
343 ElementAllocas
.reserve(ST
->getNumContainedTypes());
344 for (unsigned i
= 0, e
= ST
->getNumContainedTypes(); i
!= e
; ++i
) {
345 AllocaInst
*NA
= new AllocaInst(ST
->getContainedType(i
), 0,
347 AI
->getName() + "." + Twine(i
), AI
);
348 ElementAllocas
.push_back(NA
);
349 WorkList
.push_back(NA
); // Add to worklist for recursive processing
352 const ArrayType
*AT
= cast
<ArrayType
>(AI
->getAllocatedType());
353 ElementAllocas
.reserve(AT
->getNumElements());
354 const Type
*ElTy
= AT
->getElementType();
355 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
356 AllocaInst
*NA
= new AllocaInst(ElTy
, 0, AI
->getAlignment(),
357 AI
->getName() + "." + Twine(i
), AI
);
358 ElementAllocas
.push_back(NA
);
359 WorkList
.push_back(NA
); // Add to worklist for recursive processing
363 // Now that we have created the alloca instructions that we want to use,
364 // expand the getelementptr instructions to use them.
366 while (!AI
->use_empty()) {
367 Instruction
*User
= cast
<Instruction
>(AI
->use_back());
368 if (BitCastInst
*BCInst
= dyn_cast
<BitCastInst
>(User
)) {
369 RewriteBitCastUserOfAlloca(BCInst
, AI
, ElementAllocas
);
370 BCInst
->eraseFromParent();
375 // %res = load { i32, i32 }* %alloc
377 // %load.0 = load i32* %alloc.0
378 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
379 // %load.1 = load i32* %alloc.1
380 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
381 // (Also works for arrays instead of structs)
382 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
383 Value
*Insert
= UndefValue::get(LI
->getType());
384 for (unsigned i
= 0, e
= ElementAllocas
.size(); i
!= e
; ++i
) {
385 Value
*Load
= new LoadInst(ElementAllocas
[i
], "load", LI
);
386 Insert
= InsertValueInst::Create(Insert
, Load
, i
, "insert", LI
);
388 LI
->replaceAllUsesWith(Insert
);
389 LI
->eraseFromParent();
394 // store { i32, i32 } %val, { i32, i32 }* %alloc
396 // %val.0 = extractvalue { i32, i32 } %val, 0
397 // store i32 %val.0, i32* %alloc.0
398 // %val.1 = extractvalue { i32, i32 } %val, 1
399 // store i32 %val.1, i32* %alloc.1
400 // (Also works for arrays instead of structs)
401 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
402 Value
*Val
= SI
->getOperand(0);
403 for (unsigned i
= 0, e
= ElementAllocas
.size(); i
!= e
; ++i
) {
404 Value
*Extract
= ExtractValueInst::Create(Val
, i
, Val
->getName(), SI
);
405 new StoreInst(Extract
, ElementAllocas
[i
], SI
);
407 SI
->eraseFromParent();
411 GetElementPtrInst
*GEPI
= cast
<GetElementPtrInst
>(User
);
412 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
414 (unsigned)cast
<ConstantInt
>(GEPI
->getOperand(2))->getZExtValue();
416 assert(Idx
< ElementAllocas
.size() && "Index out of range?");
417 AllocaInst
*AllocaToUse
= ElementAllocas
[Idx
];
420 if (GEPI
->getNumOperands() == 3) {
421 // Do not insert a new getelementptr instruction with zero indices, only
422 // to have it optimized out later.
423 RepValue
= AllocaToUse
;
425 // We are indexing deeply into the structure, so we still need a
426 // getelement ptr instruction to finish the indexing. This may be
427 // expanded itself once the worklist is rerun.
429 SmallVector
<Value
*, 8> NewArgs
;
430 NewArgs
.push_back(Constant::getNullValue(
431 Type::getInt32Ty(AI
->getContext())));
432 NewArgs
.append(GEPI
->op_begin()+3, GEPI
->op_end());
433 RepValue
= GetElementPtrInst::Create(AllocaToUse
, NewArgs
.begin(),
434 NewArgs
.end(), "", GEPI
);
435 RepValue
->takeName(GEPI
);
438 // If this GEP is to the start of the aggregate, check for memcpys.
439 if (Idx
== 0 && GEPI
->hasAllZeroIndices())
440 RewriteBitCastUserOfAlloca(GEPI
, AI
, ElementAllocas
);
442 // Move all of the users over to the new GEP.
443 GEPI
->replaceAllUsesWith(RepValue
);
444 // Delete the old GEP
445 GEPI
->eraseFromParent();
448 // Finally, delete the Alloca instruction
449 AI
->eraseFromParent();
454 /// isSafeElementUse - Check to see if this use is an allowed use for a
455 /// getelementptr instruction of an array aggregate allocation. isFirstElt
456 /// indicates whether Ptr is known to the start of the aggregate.
458 void SROA::isSafeElementUse(Value
*Ptr
, bool isFirstElt
, AllocationInst
*AI
,
460 for (Value::use_iterator I
= Ptr
->use_begin(), E
= Ptr
->use_end();
462 Instruction
*User
= cast
<Instruction
>(*I
);
463 switch (User
->getOpcode()) {
464 case Instruction::Load
: break;
465 case Instruction::Store
:
466 // Store is ok if storing INTO the pointer, not storing the pointer
467 if (User
->getOperand(0) == Ptr
) return MarkUnsafe(Info
);
469 case Instruction::GetElementPtr
: {
470 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(User
);
471 bool AreAllZeroIndices
= isFirstElt
;
472 if (GEP
->getNumOperands() > 1) {
473 if (!isa
<ConstantInt
>(GEP
->getOperand(1)) ||
474 !cast
<ConstantInt
>(GEP
->getOperand(1))->isZero())
475 // Using pointer arithmetic to navigate the array.
476 return MarkUnsafe(Info
);
478 if (AreAllZeroIndices
)
479 AreAllZeroIndices
= GEP
->hasAllZeroIndices();
481 isSafeElementUse(GEP
, AreAllZeroIndices
, AI
, Info
);
482 if (Info
.isUnsafe
) return;
485 case Instruction::BitCast
:
487 isSafeUseOfBitCastedAllocation(cast
<BitCastInst
>(User
), AI
, Info
);
488 if (Info
.isUnsafe
) return;
491 DEBUG(errs() << " Transformation preventing inst: " << *User
);
492 return MarkUnsafe(Info
);
493 case Instruction::Call
:
494 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(User
)) {
496 isSafeMemIntrinsicOnAllocation(MI
, AI
, I
.getOperandNo(), Info
);
497 if (Info
.isUnsafe
) return;
501 DEBUG(errs() << " Transformation preventing inst: " << *User
);
502 return MarkUnsafe(Info
);
504 DEBUG(errs() << " Transformation preventing inst: " << *User
);
505 return MarkUnsafe(Info
);
508 return; // All users look ok :)
511 /// AllUsersAreLoads - Return true if all users of this value are loads.
512 static bool AllUsersAreLoads(Value
*Ptr
) {
513 for (Value::use_iterator I
= Ptr
->use_begin(), E
= Ptr
->use_end();
515 if (cast
<Instruction
>(*I
)->getOpcode() != Instruction::Load
)
520 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
521 /// aggregate allocation.
523 void SROA::isSafeUseOfAllocation(Instruction
*User
, AllocationInst
*AI
,
525 if (BitCastInst
*C
= dyn_cast
<BitCastInst
>(User
))
526 return isSafeUseOfBitCastedAllocation(C
, AI
, Info
);
528 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
))
529 if (!LI
->isVolatile())
530 return;// Loads (returning a first class aggregrate) are always rewritable
532 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
))
533 if (!SI
->isVolatile() && SI
->getOperand(0) != AI
)
534 return;// Store is ok if storing INTO the pointer, not storing the pointer
536 GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(User
);
538 return MarkUnsafe(Info
);
540 gep_type_iterator I
= gep_type_begin(GEPI
), E
= gep_type_end(GEPI
);
542 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
544 I
.getOperand() != Constant::getNullValue(I
.getOperand()->getType())) {
545 return MarkUnsafe(Info
);
549 if (I
== E
) return MarkUnsafe(Info
); // ran out of GEP indices??
551 bool IsAllZeroIndices
= true;
553 // If the first index is a non-constant index into an array, see if we can
554 // handle it as a special case.
555 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
)) {
556 if (!isa
<ConstantInt
>(I
.getOperand())) {
557 IsAllZeroIndices
= 0;
558 uint64_t NumElements
= AT
->getNumElements();
560 // If this is an array index and the index is not constant, we cannot
561 // promote... that is unless the array has exactly one or two elements in
562 // it, in which case we CAN promote it, but we have to canonicalize this
563 // out if this is the only problem.
564 if ((NumElements
== 1 || NumElements
== 2) &&
565 AllUsersAreLoads(GEPI
)) {
566 Info
.needsCleanup
= true;
567 return; // Canonicalization required!
569 return MarkUnsafe(Info
);
573 // Walk through the GEP type indices, checking the types that this indexes
575 for (; I
!= E
; ++I
) {
576 // Ignore struct elements, no extra checking needed for these.
577 if (isa
<StructType
>(*I
))
580 ConstantInt
*IdxVal
= dyn_cast
<ConstantInt
>(I
.getOperand());
581 if (!IdxVal
) return MarkUnsafe(Info
);
583 // Are all indices still zero?
584 IsAllZeroIndices
&= IdxVal
->isZero();
586 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
)) {
587 // This GEP indexes an array. Verify that this is an in-range constant
588 // integer. Specifically, consider A[0][i]. We cannot know that the user
589 // isn't doing invalid things like allowing i to index an out-of-range
590 // subscript that accesses A[1]. Because of this, we have to reject SROA
591 // of any accesses into structs where any of the components are variables.
592 if (IdxVal
->getZExtValue() >= AT
->getNumElements())
593 return MarkUnsafe(Info
);
594 } else if (const VectorType
*VT
= dyn_cast
<VectorType
>(*I
)) {
595 if (IdxVal
->getZExtValue() >= VT
->getNumElements())
596 return MarkUnsafe(Info
);
600 // If there are any non-simple uses of this getelementptr, make sure to reject
602 return isSafeElementUse(GEPI
, IsAllZeroIndices
, AI
, Info
);
605 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
606 /// intrinsic can be promoted by SROA. At this point, we know that the operand
607 /// of the memintrinsic is a pointer to the beginning of the allocation.
608 void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic
*MI
, AllocationInst
*AI
,
609 unsigned OpNo
, AllocaInfo
&Info
) {
610 // If not constant length, give up.
611 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(MI
->getLength());
612 if (!Length
) return MarkUnsafe(Info
);
614 // If not the whole aggregate, give up.
615 if (Length
->getZExtValue() !=
616 TD
->getTypeAllocSize(AI
->getType()->getElementType()))
617 return MarkUnsafe(Info
);
619 // We only know about memcpy/memset/memmove.
620 if (!isa
<MemIntrinsic
>(MI
))
621 return MarkUnsafe(Info
);
623 // Otherwise, we can transform it. Determine whether this is a memcpy/set
624 // into or out of the aggregate.
626 Info
.isMemCpyDst
= true;
629 Info
.isMemCpySrc
= true;
633 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
635 void SROA::isSafeUseOfBitCastedAllocation(BitCastInst
*BC
, AllocationInst
*AI
,
637 for (Value::use_iterator UI
= BC
->use_begin(), E
= BC
->use_end();
639 if (BitCastInst
*BCU
= dyn_cast
<BitCastInst
>(UI
)) {
640 isSafeUseOfBitCastedAllocation(BCU
, AI
, Info
);
641 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(UI
)) {
642 isSafeMemIntrinsicOnAllocation(MI
, AI
, UI
.getOperandNo(), Info
);
643 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UI
)) {
644 if (SI
->isVolatile())
645 return MarkUnsafe(Info
);
647 // If storing the entire alloca in one chunk through a bitcasted pointer
648 // to integer, we can transform it. This happens (for example) when you
649 // cast a {i32,i32}* to i64* and store through it. This is similar to the
650 // memcpy case and occurs in various "byval" cases and emulated memcpys.
651 if (isa
<IntegerType
>(SI
->getOperand(0)->getType()) &&
652 TD
->getTypeAllocSize(SI
->getOperand(0)->getType()) ==
653 TD
->getTypeAllocSize(AI
->getType()->getElementType())) {
654 Info
.isMemCpyDst
= true;
657 return MarkUnsafe(Info
);
658 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UI
)) {
659 if (LI
->isVolatile())
660 return MarkUnsafe(Info
);
662 // If loading the entire alloca in one chunk through a bitcasted pointer
663 // to integer, we can transform it. This happens (for example) when you
664 // cast a {i32,i32}* to i64* and load through it. This is similar to the
665 // memcpy case and occurs in various "byval" cases and emulated memcpys.
666 if (isa
<IntegerType
>(LI
->getType()) &&
667 TD
->getTypeAllocSize(LI
->getType()) ==
668 TD
->getTypeAllocSize(AI
->getType()->getElementType())) {
669 Info
.isMemCpySrc
= true;
672 return MarkUnsafe(Info
);
673 } else if (isa
<DbgInfoIntrinsic
>(UI
)) {
674 // If one user is DbgInfoIntrinsic then check if all users are
675 // DbgInfoIntrinsics.
676 if (OnlyUsedByDbgInfoIntrinsics(BC
)) {
677 Info
.needsCleanup
= true;
684 return MarkUnsafe(Info
);
686 if (Info
.isUnsafe
) return;
690 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
691 /// to its first element. Transform users of the cast to use the new values
693 void SROA::RewriteBitCastUserOfAlloca(Instruction
*BCInst
, AllocationInst
*AI
,
694 SmallVector
<AllocaInst
*, 32> &NewElts
) {
695 Value::use_iterator UI
= BCInst
->use_begin(), UE
= BCInst
->use_end();
697 Instruction
*User
= cast
<Instruction
>(*UI
++);
698 if (BitCastInst
*BCU
= dyn_cast
<BitCastInst
>(User
)) {
699 RewriteBitCastUserOfAlloca(BCU
, AI
, NewElts
);
700 if (BCU
->use_empty()) BCU
->eraseFromParent();
704 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(User
)) {
705 // This must be memcpy/memmove/memset of the entire aggregate.
706 // Split into one per element.
707 RewriteMemIntrinUserOfAlloca(MI
, BCInst
, AI
, NewElts
);
711 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
712 // If this is a store of the entire alloca from an integer, rewrite it.
713 RewriteStoreUserOfWholeAlloca(SI
, AI
, NewElts
);
717 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
718 // If this is a load of the entire alloca to an integer, rewrite it.
719 RewriteLoadUserOfWholeAlloca(LI
, AI
, NewElts
);
723 // Otherwise it must be some other user of a gep of the first pointer. Just
724 // leave these alone.
729 /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
730 /// Rewrite it to copy or set the elements of the scalarized memory.
731 void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic
*MI
, Instruction
*BCInst
,
733 SmallVector
<AllocaInst
*, 32> &NewElts
) {
735 // If this is a memcpy/memmove, construct the other pointer as the
736 // appropriate type. The "Other" pointer is the pointer that goes to memory
737 // that doesn't have anything to do with the alloca that we are promoting. For
738 // memset, this Value* stays null.
740 LLVMContext
&Context
= MI
->getContext();
741 unsigned MemAlignment
= MI
->getAlignment();
742 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) { // memmove/memcopy
743 if (BCInst
== MTI
->getRawDest())
744 OtherPtr
= MTI
->getRawSource();
746 assert(BCInst
== MTI
->getRawSource());
747 OtherPtr
= MTI
->getRawDest();
751 // If there is an other pointer, we want to convert it to the same pointer
752 // type as AI has, so we can GEP through it safely.
754 // It is likely that OtherPtr is a bitcast, if so, remove it.
755 if (BitCastInst
*BC
= dyn_cast
<BitCastInst
>(OtherPtr
))
756 OtherPtr
= BC
->getOperand(0);
757 // All zero GEPs are effectively bitcasts.
758 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(OtherPtr
))
759 if (GEP
->hasAllZeroIndices())
760 OtherPtr
= GEP
->getOperand(0);
762 if (ConstantExpr
*BCE
= dyn_cast
<ConstantExpr
>(OtherPtr
))
763 if (BCE
->getOpcode() == Instruction::BitCast
)
764 OtherPtr
= BCE
->getOperand(0);
766 // If the pointer is not the right type, insert a bitcast to the right
768 if (OtherPtr
->getType() != AI
->getType())
769 OtherPtr
= new BitCastInst(OtherPtr
, AI
->getType(), OtherPtr
->getName(),
773 // Process each element of the aggregate.
774 Value
*TheFn
= MI
->getOperand(0);
775 const Type
*BytePtrTy
= MI
->getRawDest()->getType();
776 bool SROADest
= MI
->getRawDest() == BCInst
;
778 Constant
*Zero
= Constant::getNullValue(Type::getInt32Ty(MI
->getContext()));
780 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
781 // If this is a memcpy/memmove, emit a GEP of the other element address.
783 unsigned OtherEltAlign
= MemAlignment
;
786 Value
*Idx
[2] = { Zero
,
787 ConstantInt::get(Type::getInt32Ty(MI
->getContext()), i
) };
788 OtherElt
= GetElementPtrInst::Create(OtherPtr
, Idx
, Idx
+ 2,
789 OtherPtr
->getNameStr()+"."+Twine(i
),
792 const PointerType
*OtherPtrTy
= cast
<PointerType
>(OtherPtr
->getType());
793 if (const StructType
*ST
=
794 dyn_cast
<StructType
>(OtherPtrTy
->getElementType())) {
795 EltOffset
= TD
->getStructLayout(ST
)->getElementOffset(i
);
798 cast
<SequentialType
>(OtherPtr
->getType())->getElementType();
799 EltOffset
= TD
->getTypeAllocSize(EltTy
)*i
;
802 // The alignment of the other pointer is the guaranteed alignment of the
803 // element, which is affected by both the known alignment of the whole
804 // mem intrinsic and the alignment of the element. If the alignment of
805 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
806 // known alignment is just 4 bytes.
807 OtherEltAlign
= (unsigned)MinAlign(OtherEltAlign
, EltOffset
);
810 Value
*EltPtr
= NewElts
[i
];
811 const Type
*EltTy
= cast
<PointerType
>(EltPtr
->getType())->getElementType();
813 // If we got down to a scalar, insert a load or store as appropriate.
814 if (EltTy
->isSingleValueType()) {
815 if (isa
<MemTransferInst
>(MI
)) {
817 // From Other to Alloca.
818 Value
*Elt
= new LoadInst(OtherElt
, "tmp", false, OtherEltAlign
, MI
);
819 new StoreInst(Elt
, EltPtr
, MI
);
821 // From Alloca to Other.
822 Value
*Elt
= new LoadInst(EltPtr
, "tmp", MI
);
823 new StoreInst(Elt
, OtherElt
, false, OtherEltAlign
, MI
);
827 assert(isa
<MemSetInst
>(MI
));
829 // If the stored element is zero (common case), just store a null
832 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(MI
->getOperand(2))) {
834 StoreVal
= Constant::getNullValue(EltTy
); // 0.0, null, 0, <0,0>
836 // If EltTy is a vector type, get the element type.
837 const Type
*ValTy
= EltTy
->getScalarType();
839 // Construct an integer with the right value.
840 unsigned EltSize
= TD
->getTypeSizeInBits(ValTy
);
841 APInt
OneVal(EltSize
, CI
->getZExtValue());
842 APInt
TotalVal(OneVal
);
844 for (unsigned i
= 0; 8*i
< EltSize
; ++i
) {
845 TotalVal
= TotalVal
.shl(8);
849 // Convert the integer value to the appropriate type.
850 StoreVal
= ConstantInt::get(Context
, TotalVal
);
851 if (isa
<PointerType
>(ValTy
))
852 StoreVal
= ConstantExpr::getIntToPtr(StoreVal
, ValTy
);
853 else if (ValTy
->isFloatingPoint())
854 StoreVal
= ConstantExpr::getBitCast(StoreVal
, ValTy
);
855 assert(StoreVal
->getType() == ValTy
&& "Type mismatch!");
857 // If the requested value was a vector constant, create it.
858 if (EltTy
!= ValTy
) {
859 unsigned NumElts
= cast
<VectorType
>(ValTy
)->getNumElements();
860 SmallVector
<Constant
*, 16> Elts(NumElts
, StoreVal
);
861 StoreVal
= ConstantVector::get(&Elts
[0], NumElts
);
864 new StoreInst(StoreVal
, EltPtr
, MI
);
867 // Otherwise, if we're storing a byte variable, use a memset call for
871 // Cast the element pointer to BytePtrTy.
872 if (EltPtr
->getType() != BytePtrTy
)
873 EltPtr
= new BitCastInst(EltPtr
, BytePtrTy
, EltPtr
->getNameStr(), MI
);
875 // Cast the other pointer (if we have one) to BytePtrTy.
876 if (OtherElt
&& OtherElt
->getType() != BytePtrTy
)
877 OtherElt
= new BitCastInst(OtherElt
, BytePtrTy
,OtherElt
->getNameStr(),
880 unsigned EltSize
= TD
->getTypeAllocSize(EltTy
);
882 // Finally, insert the meminst for this element.
883 if (isa
<MemTransferInst
>(MI
)) {
885 SROADest
? EltPtr
: OtherElt
, // Dest ptr
886 SROADest
? OtherElt
: EltPtr
, // Src ptr
887 ConstantInt::get(MI
->getOperand(3)->getType(), EltSize
), // Size
889 ConstantInt::get(Type::getInt32Ty(MI
->getContext()), OtherEltAlign
)
891 CallInst::Create(TheFn
, Ops
, Ops
+ 4, "", MI
);
893 assert(isa
<MemSetInst
>(MI
));
895 EltPtr
, MI
->getOperand(2), // Dest, Value,
896 ConstantInt::get(MI
->getOperand(3)->getType(), EltSize
), // Size
899 CallInst::Create(TheFn
, Ops
, Ops
+ 4, "", MI
);
902 MI
->eraseFromParent();
905 /// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
906 /// overwrites the entire allocation. Extract out the pieces of the stored
907 /// integer and store them individually.
908 void SROA::RewriteStoreUserOfWholeAlloca(StoreInst
*SI
,
910 SmallVector
<AllocaInst
*, 32> &NewElts
){
911 // Extract each element out of the integer according to its structure offset
912 // and store the element value to the individual alloca.
913 Value
*SrcVal
= SI
->getOperand(0);
914 const Type
*AllocaEltTy
= AI
->getType()->getElementType();
915 uint64_t AllocaSizeBits
= TD
->getTypeAllocSizeInBits(AllocaEltTy
);
917 // If this isn't a store of an integer to the whole alloca, it may be a store
918 // to the first element. Just ignore the store in this case and normal SROA
920 if (!isa
<IntegerType
>(SrcVal
->getType()) ||
921 TD
->getTypeAllocSizeInBits(SrcVal
->getType()) != AllocaSizeBits
)
923 // Handle tail padding by extending the operand
924 if (TD
->getTypeSizeInBits(SrcVal
->getType()) != AllocaSizeBits
)
925 SrcVal
= new ZExtInst(SrcVal
,
926 IntegerType::get(SI
->getContext(), AllocaSizeBits
),
929 DEBUG(errs() << "PROMOTING STORE TO WHOLE ALLOCA: " << *AI
<< *SI
);
931 // There are two forms here: AI could be an array or struct. Both cases
932 // have different ways to compute the element offset.
933 if (const StructType
*EltSTy
= dyn_cast
<StructType
>(AllocaEltTy
)) {
934 const StructLayout
*Layout
= TD
->getStructLayout(EltSTy
);
936 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
937 // Get the number of bits to shift SrcVal to get the value.
938 const Type
*FieldTy
= EltSTy
->getElementType(i
);
939 uint64_t Shift
= Layout
->getElementOffsetInBits(i
);
941 if (TD
->isBigEndian())
942 Shift
= AllocaSizeBits
-Shift
-TD
->getTypeAllocSizeInBits(FieldTy
);
944 Value
*EltVal
= SrcVal
;
946 Value
*ShiftVal
= ConstantInt::get(EltVal
->getType(), Shift
);
947 EltVal
= BinaryOperator::CreateLShr(EltVal
, ShiftVal
,
948 "sroa.store.elt", SI
);
951 // Truncate down to an integer of the right size.
952 uint64_t FieldSizeBits
= TD
->getTypeSizeInBits(FieldTy
);
954 // Ignore zero sized fields like {}, they obviously contain no data.
955 if (FieldSizeBits
== 0) continue;
957 if (FieldSizeBits
!= AllocaSizeBits
)
958 EltVal
= new TruncInst(EltVal
,
959 IntegerType::get(SI
->getContext(), FieldSizeBits
),
961 Value
*DestField
= NewElts
[i
];
962 if (EltVal
->getType() == FieldTy
) {
963 // Storing to an integer field of this size, just do it.
964 } else if (FieldTy
->isFloatingPoint() || isa
<VectorType
>(FieldTy
)) {
965 // Bitcast to the right element type (for fp/vector values).
966 EltVal
= new BitCastInst(EltVal
, FieldTy
, "", SI
);
968 // Otherwise, bitcast the dest pointer (for aggregates).
969 DestField
= new BitCastInst(DestField
,
970 PointerType::getUnqual(EltVal
->getType()),
973 new StoreInst(EltVal
, DestField
, SI
);
977 const ArrayType
*ATy
= cast
<ArrayType
>(AllocaEltTy
);
978 const Type
*ArrayEltTy
= ATy
->getElementType();
979 uint64_t ElementOffset
= TD
->getTypeAllocSizeInBits(ArrayEltTy
);
980 uint64_t ElementSizeBits
= TD
->getTypeSizeInBits(ArrayEltTy
);
984 if (TD
->isBigEndian())
985 Shift
= AllocaSizeBits
-ElementOffset
;
989 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
990 // Ignore zero sized fields like {}, they obviously contain no data.
991 if (ElementSizeBits
== 0) continue;
993 Value
*EltVal
= SrcVal
;
995 Value
*ShiftVal
= ConstantInt::get(EltVal
->getType(), Shift
);
996 EltVal
= BinaryOperator::CreateLShr(EltVal
, ShiftVal
,
997 "sroa.store.elt", SI
);
1000 // Truncate down to an integer of the right size.
1001 if (ElementSizeBits
!= AllocaSizeBits
)
1002 EltVal
= new TruncInst(EltVal
,
1003 IntegerType::get(SI
->getContext(),
1004 ElementSizeBits
),"",SI
);
1005 Value
*DestField
= NewElts
[i
];
1006 if (EltVal
->getType() == ArrayEltTy
) {
1007 // Storing to an integer field of this size, just do it.
1008 } else if (ArrayEltTy
->isFloatingPoint() || isa
<VectorType
>(ArrayEltTy
)) {
1009 // Bitcast to the right element type (for fp/vector values).
1010 EltVal
= new BitCastInst(EltVal
, ArrayEltTy
, "", SI
);
1012 // Otherwise, bitcast the dest pointer (for aggregates).
1013 DestField
= new BitCastInst(DestField
,
1014 PointerType::getUnqual(EltVal
->getType()),
1017 new StoreInst(EltVal
, DestField
, SI
);
1019 if (TD
->isBigEndian())
1020 Shift
-= ElementOffset
;
1022 Shift
+= ElementOffset
;
1026 SI
->eraseFromParent();
1029 /// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
1030 /// an integer. Load the individual pieces to form the aggregate value.
1031 void SROA::RewriteLoadUserOfWholeAlloca(LoadInst
*LI
, AllocationInst
*AI
,
1032 SmallVector
<AllocaInst
*, 32> &NewElts
) {
1033 // Extract each element out of the NewElts according to its structure offset
1034 // and form the result value.
1035 const Type
*AllocaEltTy
= AI
->getType()->getElementType();
1036 uint64_t AllocaSizeBits
= TD
->getTypeAllocSizeInBits(AllocaEltTy
);
1038 // If this isn't a load of the whole alloca to an integer, it may be a load
1039 // of the first element. Just ignore the load in this case and normal SROA
1041 if (!isa
<IntegerType
>(LI
->getType()) ||
1042 TD
->getTypeAllocSizeInBits(LI
->getType()) != AllocaSizeBits
)
1045 DEBUG(errs() << "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI
<< *LI
);
1047 // There are two forms here: AI could be an array or struct. Both cases
1048 // have different ways to compute the element offset.
1049 const StructLayout
*Layout
= 0;
1050 uint64_t ArrayEltBitOffset
= 0;
1051 if (const StructType
*EltSTy
= dyn_cast
<StructType
>(AllocaEltTy
)) {
1052 Layout
= TD
->getStructLayout(EltSTy
);
1054 const Type
*ArrayEltTy
= cast
<ArrayType
>(AllocaEltTy
)->getElementType();
1055 ArrayEltBitOffset
= TD
->getTypeAllocSizeInBits(ArrayEltTy
);
1059 Constant::getNullValue(IntegerType::get(LI
->getContext(), AllocaSizeBits
));
1061 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
1062 // Load the value from the alloca. If the NewElt is an aggregate, cast
1063 // the pointer to an integer of the same size before doing the load.
1064 Value
*SrcField
= NewElts
[i
];
1065 const Type
*FieldTy
=
1066 cast
<PointerType
>(SrcField
->getType())->getElementType();
1067 uint64_t FieldSizeBits
= TD
->getTypeSizeInBits(FieldTy
);
1069 // Ignore zero sized fields like {}, they obviously contain no data.
1070 if (FieldSizeBits
== 0) continue;
1072 const IntegerType
*FieldIntTy
= IntegerType::get(LI
->getContext(),
1074 if (!isa
<IntegerType
>(FieldTy
) && !FieldTy
->isFloatingPoint() &&
1075 !isa
<VectorType
>(FieldTy
))
1076 SrcField
= new BitCastInst(SrcField
,
1077 PointerType::getUnqual(FieldIntTy
),
1079 SrcField
= new LoadInst(SrcField
, "sroa.load.elt", LI
);
1081 // If SrcField is a fp or vector of the right size but that isn't an
1082 // integer type, bitcast to an integer so we can shift it.
1083 if (SrcField
->getType() != FieldIntTy
)
1084 SrcField
= new BitCastInst(SrcField
, FieldIntTy
, "", LI
);
1086 // Zero extend the field to be the same size as the final alloca so that
1087 // we can shift and insert it.
1088 if (SrcField
->getType() != ResultVal
->getType())
1089 SrcField
= new ZExtInst(SrcField
, ResultVal
->getType(), "", LI
);
1091 // Determine the number of bits to shift SrcField.
1093 if (Layout
) // Struct case.
1094 Shift
= Layout
->getElementOffsetInBits(i
);
1096 Shift
= i
*ArrayEltBitOffset
;
1098 if (TD
->isBigEndian())
1099 Shift
= AllocaSizeBits
-Shift
-FieldIntTy
->getBitWidth();
1102 Value
*ShiftVal
= ConstantInt::get(SrcField
->getType(), Shift
);
1103 SrcField
= BinaryOperator::CreateShl(SrcField
, ShiftVal
, "", LI
);
1106 ResultVal
= BinaryOperator::CreateOr(SrcField
, ResultVal
, "", LI
);
1109 // Handle tail padding by truncating the result
1110 if (TD
->getTypeSizeInBits(LI
->getType()) != AllocaSizeBits
)
1111 ResultVal
= new TruncInst(ResultVal
, LI
->getType(), "", LI
);
1113 LI
->replaceAllUsesWith(ResultVal
);
1114 LI
->eraseFromParent();
1118 /// HasPadding - Return true if the specified type has any structure or
1119 /// alignment padding, false otherwise.
1120 static bool HasPadding(const Type
*Ty
, const TargetData
&TD
) {
1121 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
1122 const StructLayout
*SL
= TD
.getStructLayout(STy
);
1123 unsigned PrevFieldBitOffset
= 0;
1124 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
1125 unsigned FieldBitOffset
= SL
->getElementOffsetInBits(i
);
1127 // Padding in sub-elements?
1128 if (HasPadding(STy
->getElementType(i
), TD
))
1131 // Check to see if there is any padding between this element and the
1134 unsigned PrevFieldEnd
=
1135 PrevFieldBitOffset
+TD
.getTypeSizeInBits(STy
->getElementType(i
-1));
1136 if (PrevFieldEnd
< FieldBitOffset
)
1140 PrevFieldBitOffset
= FieldBitOffset
;
1143 // Check for tail padding.
1144 if (unsigned EltCount
= STy
->getNumElements()) {
1145 unsigned PrevFieldEnd
= PrevFieldBitOffset
+
1146 TD
.getTypeSizeInBits(STy
->getElementType(EltCount
-1));
1147 if (PrevFieldEnd
< SL
->getSizeInBits())
1151 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
1152 return HasPadding(ATy
->getElementType(), TD
);
1153 } else if (const VectorType
*VTy
= dyn_cast
<VectorType
>(Ty
)) {
1154 return HasPadding(VTy
->getElementType(), TD
);
1156 return TD
.getTypeSizeInBits(Ty
) != TD
.getTypeAllocSizeInBits(Ty
);
1159 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1160 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
1161 /// or 1 if safe after canonicalization has been performed.
1163 int SROA::isSafeAllocaToScalarRepl(AllocationInst
*AI
) {
1164 // Loop over the use list of the alloca. We can only transform it if all of
1165 // the users are safe to transform.
1168 for (Value::use_iterator I
= AI
->use_begin(), E
= AI
->use_end();
1170 isSafeUseOfAllocation(cast
<Instruction
>(*I
), AI
, Info
);
1171 if (Info
.isUnsafe
) {
1172 DEBUG(errs() << "Cannot transform: " << *AI
<< " due to user: " << **I
);
1177 // Okay, we know all the users are promotable. If the aggregate is a memcpy
1178 // source and destination, we have to be careful. In particular, the memcpy
1179 // could be moving around elements that live in structure padding of the LLVM
1180 // types, but may actually be used. In these cases, we refuse to promote the
1182 if (Info
.isMemCpySrc
&& Info
.isMemCpyDst
&&
1183 HasPadding(AI
->getType()->getElementType(), *TD
))
1186 // If we require cleanup, return 1, otherwise return 3.
1187 return Info
.needsCleanup
? 1 : 3;
1190 /// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP
1191 /// is canonicalized here.
1192 void SROA::CleanupGEP(GetElementPtrInst
*GEPI
) {
1193 gep_type_iterator I
= gep_type_begin(GEPI
);
1196 const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
);
1200 uint64_t NumElements
= AT
->getNumElements();
1202 if (isa
<ConstantInt
>(I
.getOperand()))
1205 if (NumElements
== 1) {
1207 Constant::getNullValue(Type::getInt32Ty(GEPI
->getContext())));
1211 assert(NumElements
== 2 && "Unhandled case!");
1212 // All users of the GEP must be loads. At each use of the GEP, insert
1213 // two loads of the appropriate indexed GEP and select between them.
1214 Value
*IsOne
= new ICmpInst(GEPI
, ICmpInst::ICMP_NE
, I
.getOperand(),
1215 Constant::getNullValue(I
.getOperand()->getType()),
1217 // Insert the new GEP instructions, which are properly indexed.
1218 SmallVector
<Value
*, 8> Indices(GEPI
->op_begin()+1, GEPI
->op_end());
1219 Indices
[1] = Constant::getNullValue(Type::getInt32Ty(GEPI
->getContext()));
1220 Value
*ZeroIdx
= GetElementPtrInst::Create(GEPI
->getOperand(0),
1223 GEPI
->getName()+".0", GEPI
);
1224 Indices
[1] = ConstantInt::get(Type::getInt32Ty(GEPI
->getContext()), 1);
1225 Value
*OneIdx
= GetElementPtrInst::Create(GEPI
->getOperand(0),
1228 GEPI
->getName()+".1", GEPI
);
1229 // Replace all loads of the variable index GEP with loads from both
1230 // indexes and a select.
1231 while (!GEPI
->use_empty()) {
1232 LoadInst
*LI
= cast
<LoadInst
>(GEPI
->use_back());
1233 Value
*Zero
= new LoadInst(ZeroIdx
, LI
->getName()+".0", LI
);
1234 Value
*One
= new LoadInst(OneIdx
, LI
->getName()+".1", LI
);
1235 Value
*R
= SelectInst::Create(IsOne
, One
, Zero
, LI
->getName(), LI
);
1236 LI
->replaceAllUsesWith(R
);
1237 LI
->eraseFromParent();
1239 GEPI
->eraseFromParent();
1243 /// CleanupAllocaUsers - If SROA reported that it can promote the specified
1244 /// allocation, but only if cleaned up, perform the cleanups required.
1245 void SROA::CleanupAllocaUsers(AllocationInst
*AI
) {
1246 // At this point, we know that the end result will be SROA'd and promoted, so
1247 // we can insert ugly code if required so long as sroa+mem2reg will clean it
1249 for (Value::use_iterator UI
= AI
->use_begin(), E
= AI
->use_end();
1252 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(U
))
1255 Instruction
*I
= cast
<Instruction
>(U
);
1256 SmallVector
<DbgInfoIntrinsic
*, 2> DbgInUses
;
1257 if (!isa
<StoreInst
>(I
) && OnlyUsedByDbgInfoIntrinsics(I
, &DbgInUses
)) {
1258 // Safe to remove debug info uses.
1259 while (!DbgInUses
.empty()) {
1260 DbgInfoIntrinsic
*DI
= DbgInUses
.back(); DbgInUses
.pop_back();
1261 DI
->eraseFromParent();
1263 I
->eraseFromParent();
1269 /// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1270 /// the offset specified by Offset (which is specified in bytes).
1272 /// There are two cases we handle here:
1273 /// 1) A union of vector types of the same size and potentially its elements.
1274 /// Here we turn element accesses into insert/extract element operations.
1275 /// This promotes a <4 x float> with a store of float to the third element
1276 /// into a <4 x float> that uses insert element.
1277 /// 2) A fully general blob of memory, which we turn into some (potentially
1278 /// large) integer type with extract and insert operations where the loads
1279 /// and stores would mutate the memory.
1280 static void MergeInType(const Type
*In
, uint64_t Offset
, const Type
*&VecTy
,
1281 unsigned AllocaSize
, const TargetData
&TD
,
1282 LLVMContext
&Context
) {
1283 // If this could be contributing to a vector, analyze it.
1284 if (VecTy
!= Type::getVoidTy(Context
)) { // either null or a vector type.
1286 // If the In type is a vector that is the same size as the alloca, see if it
1287 // matches the existing VecTy.
1288 if (const VectorType
*VInTy
= dyn_cast
<VectorType
>(In
)) {
1289 if (VInTy
->getBitWidth()/8 == AllocaSize
&& Offset
== 0) {
1290 // If we're storing/loading a vector of the right size, allow it as a
1291 // vector. If this the first vector we see, remember the type so that
1292 // we know the element size.
1297 } else if (In
== Type::getFloatTy(Context
) ||
1298 In
== Type::getDoubleTy(Context
) ||
1299 (isa
<IntegerType
>(In
) && In
->getPrimitiveSizeInBits() >= 8 &&
1300 isPowerOf2_32(In
->getPrimitiveSizeInBits()))) {
1301 // If we're accessing something that could be an element of a vector, see
1302 // if the implied vector agrees with what we already have and if Offset is
1303 // compatible with it.
1304 unsigned EltSize
= In
->getPrimitiveSizeInBits()/8;
1305 if (Offset
% EltSize
== 0 &&
1306 AllocaSize
% EltSize
== 0 &&
1308 cast
<VectorType
>(VecTy
)->getElementType()
1309 ->getPrimitiveSizeInBits()/8 == EltSize
)) {
1311 VecTy
= VectorType::get(In
, AllocaSize
/EltSize
);
1317 // Otherwise, we have a case that we can't handle with an optimized vector
1318 // form. We can still turn this into a large integer.
1319 VecTy
= Type::getVoidTy(Context
);
1322 /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
1323 /// its accesses to use a to single vector type, return true, and set VecTy to
1324 /// the new type. If we could convert the alloca into a single promotable
1325 /// integer, return true but set VecTy to VoidTy. Further, if the use is not a
1326 /// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
1327 /// is the current offset from the base of the alloca being analyzed.
1329 /// If we see at least one access to the value that is as a vector type, set the
1332 bool SROA::CanConvertToScalar(Value
*V
, bool &IsNotTrivial
, const Type
*&VecTy
,
1333 bool &SawVec
, uint64_t Offset
,
1334 unsigned AllocaSize
) {
1335 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!=E
; ++UI
) {
1336 Instruction
*User
= cast
<Instruction
>(*UI
);
1338 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
1339 // Don't break volatile loads.
1340 if (LI
->isVolatile())
1342 MergeInType(LI
->getType(), Offset
, VecTy
,
1343 AllocaSize
, *TD
, V
->getContext());
1344 SawVec
|= isa
<VectorType
>(LI
->getType());
1348 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
1349 // Storing the pointer, not into the value?
1350 if (SI
->getOperand(0) == V
|| SI
->isVolatile()) return 0;
1351 MergeInType(SI
->getOperand(0)->getType(), Offset
,
1352 VecTy
, AllocaSize
, *TD
, V
->getContext());
1353 SawVec
|= isa
<VectorType
>(SI
->getOperand(0)->getType());
1357 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(User
)) {
1358 if (!CanConvertToScalar(BCI
, IsNotTrivial
, VecTy
, SawVec
, Offset
,
1361 IsNotTrivial
= true;
1365 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(User
)) {
1366 // If this is a GEP with a variable indices, we can't handle it.
1367 if (!GEP
->hasAllConstantIndices())
1370 // Compute the offset that this GEP adds to the pointer.
1371 SmallVector
<Value
*, 8> Indices(GEP
->op_begin()+1, GEP
->op_end());
1372 uint64_t GEPOffset
= TD
->getIndexedOffset(GEP
->getOperand(0)->getType(),
1373 &Indices
[0], Indices
.size());
1374 // See if all uses can be converted.
1375 if (!CanConvertToScalar(GEP
, IsNotTrivial
, VecTy
, SawVec
,Offset
+GEPOffset
,
1378 IsNotTrivial
= true;
1382 // If this is a constant sized memset of a constant value (e.g. 0) we can
1384 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(User
)) {
1385 // Store of constant value and constant size.
1386 if (isa
<ConstantInt
>(MSI
->getValue()) &&
1387 isa
<ConstantInt
>(MSI
->getLength())) {
1388 IsNotTrivial
= true;
1393 // If this is a memcpy or memmove into or out of the whole allocation, we
1394 // can handle it like a load or store of the scalar type.
1395 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(User
)) {
1396 if (ConstantInt
*Len
= dyn_cast
<ConstantInt
>(MTI
->getLength()))
1397 if (Len
->getZExtValue() == AllocaSize
&& Offset
== 0) {
1398 IsNotTrivial
= true;
1403 // Ignore dbg intrinsic.
1404 if (isa
<DbgInfoIntrinsic
>(User
))
1407 // Otherwise, we cannot handle this!
1415 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1416 /// directly. This happens when we are converting an "integer union" to a
1417 /// single integer scalar, or when we are converting a "vector union" to a
1418 /// vector with insert/extractelement instructions.
1420 /// Offset is an offset from the original alloca, in bits that need to be
1421 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1422 void SROA::ConvertUsesToScalar(Value
*Ptr
, AllocaInst
*NewAI
, uint64_t Offset
) {
1423 while (!Ptr
->use_empty()) {
1424 Instruction
*User
= cast
<Instruction
>(Ptr
->use_back());
1426 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(User
)) {
1427 ConvertUsesToScalar(CI
, NewAI
, Offset
);
1428 CI
->eraseFromParent();
1432 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(User
)) {
1433 // Compute the offset that this GEP adds to the pointer.
1434 SmallVector
<Value
*, 8> Indices(GEP
->op_begin()+1, GEP
->op_end());
1435 uint64_t GEPOffset
= TD
->getIndexedOffset(GEP
->getOperand(0)->getType(),
1436 &Indices
[0], Indices
.size());
1437 ConvertUsesToScalar(GEP
, NewAI
, Offset
+GEPOffset
*8);
1438 GEP
->eraseFromParent();
1442 IRBuilder
<> Builder(User
->getParent(), User
);
1444 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
1445 // The load is a bit extract from NewAI shifted right by Offset bits.
1446 Value
*LoadedVal
= Builder
.CreateLoad(NewAI
, "tmp");
1448 = ConvertScalar_ExtractValue(LoadedVal
, LI
->getType(), Offset
, Builder
);
1449 LI
->replaceAllUsesWith(NewLoadVal
);
1450 LI
->eraseFromParent();
1454 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
1455 assert(SI
->getOperand(0) != Ptr
&& "Consistency error!");
1456 // FIXME: Remove once builder has Twine API.
1457 Value
*Old
= Builder
.CreateLoad(NewAI
, (NewAI
->getName()+".in").str().c_str());
1458 Value
*New
= ConvertScalar_InsertValue(SI
->getOperand(0), Old
, Offset
,
1460 Builder
.CreateStore(New
, NewAI
);
1461 SI
->eraseFromParent();
1465 // If this is a constant sized memset of a constant value (e.g. 0) we can
1466 // transform it into a store of the expanded constant value.
1467 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(User
)) {
1468 assert(MSI
->getRawDest() == Ptr
&& "Consistency error!");
1469 unsigned NumBytes
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
1470 if (NumBytes
!= 0) {
1471 unsigned Val
= cast
<ConstantInt
>(MSI
->getValue())->getZExtValue();
1473 // Compute the value replicated the right number of times.
1474 APInt
APVal(NumBytes
*8, Val
);
1476 // Splat the value if non-zero.
1478 for (unsigned i
= 1; i
!= NumBytes
; ++i
)
1479 APVal
|= APVal
<< 8;
1481 // FIXME: Remove once builder has Twine API.
1482 Value
*Old
= Builder
.CreateLoad(NewAI
, (NewAI
->getName()+".in").str().c_str());
1483 Value
*New
= ConvertScalar_InsertValue(
1484 ConstantInt::get(User
->getContext(), APVal
),
1485 Old
, Offset
, Builder
);
1486 Builder
.CreateStore(New
, NewAI
);
1488 MSI
->eraseFromParent();
1492 // If this is a memcpy or memmove into or out of the whole allocation, we
1493 // can handle it like a load or store of the scalar type.
1494 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(User
)) {
1495 assert(Offset
== 0 && "must be store to start of alloca");
1497 // If the source and destination are both to the same alloca, then this is
1498 // a noop copy-to-self, just delete it. Otherwise, emit a load and store
1500 AllocaInst
*OrigAI
= cast
<AllocaInst
>(Ptr
->getUnderlyingObject());
1502 if (MTI
->getSource()->getUnderlyingObject() != OrigAI
) {
1503 // Dest must be OrigAI, change this to be a load from the original
1504 // pointer (bitcasted), then a store to our new alloca.
1505 assert(MTI
->getRawDest() == Ptr
&& "Neither use is of pointer?");
1506 Value
*SrcPtr
= MTI
->getSource();
1507 SrcPtr
= Builder
.CreateBitCast(SrcPtr
, NewAI
->getType());
1509 LoadInst
*SrcVal
= Builder
.CreateLoad(SrcPtr
, "srcval");
1510 SrcVal
->setAlignment(MTI
->getAlignment());
1511 Builder
.CreateStore(SrcVal
, NewAI
);
1512 } else if (MTI
->getDest()->getUnderlyingObject() != OrigAI
) {
1513 // Src must be OrigAI, change this to be a load from NewAI then a store
1514 // through the original dest pointer (bitcasted).
1515 assert(MTI
->getRawSource() == Ptr
&& "Neither use is of pointer?");
1516 LoadInst
*SrcVal
= Builder
.CreateLoad(NewAI
, "srcval");
1518 Value
*DstPtr
= Builder
.CreateBitCast(MTI
->getDest(), NewAI
->getType());
1519 StoreInst
*NewStore
= Builder
.CreateStore(SrcVal
, DstPtr
);
1520 NewStore
->setAlignment(MTI
->getAlignment());
1522 // Noop transfer. Src == Dst
1526 MTI
->eraseFromParent();
1530 // If user is a dbg info intrinsic then it is safe to remove it.
1531 if (isa
<DbgInfoIntrinsic
>(User
)) {
1532 User
->eraseFromParent();
1536 llvm_unreachable("Unsupported operation!");
1540 /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1541 /// or vector value FromVal, extracting the bits from the offset specified by
1542 /// Offset. This returns the value, which is of type ToType.
1544 /// This happens when we are converting an "integer union" to a single
1545 /// integer scalar, or when we are converting a "vector union" to a vector with
1546 /// insert/extractelement instructions.
1548 /// Offset is an offset from the original alloca, in bits that need to be
1549 /// shifted to the right.
1550 Value
*SROA::ConvertScalar_ExtractValue(Value
*FromVal
, const Type
*ToType
,
1551 uint64_t Offset
, IRBuilder
<> &Builder
) {
1552 // If the load is of the whole new alloca, no conversion is needed.
1553 if (FromVal
->getType() == ToType
&& Offset
== 0)
1556 // If the result alloca is a vector type, this is either an element
1557 // access or a bitcast to another vector type of the same size.
1558 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(FromVal
->getType())) {
1559 if (isa
<VectorType
>(ToType
))
1560 return Builder
.CreateBitCast(FromVal
, ToType
, "tmp");
1562 // Otherwise it must be an element access.
1565 unsigned EltSize
= TD
->getTypeAllocSizeInBits(VTy
->getElementType());
1566 Elt
= Offset
/EltSize
;
1567 assert(EltSize
*Elt
== Offset
&& "Invalid modulus in validity checking");
1569 // Return the element extracted out of it.
1570 Value
*V
= Builder
.CreateExtractElement(FromVal
, ConstantInt::get(
1571 Type::getInt32Ty(FromVal
->getContext()), Elt
), "tmp");
1572 if (V
->getType() != ToType
)
1573 V
= Builder
.CreateBitCast(V
, ToType
, "tmp");
1577 // If ToType is a first class aggregate, extract out each of the pieces and
1578 // use insertvalue's to form the FCA.
1579 if (const StructType
*ST
= dyn_cast
<StructType
>(ToType
)) {
1580 const StructLayout
&Layout
= *TD
->getStructLayout(ST
);
1581 Value
*Res
= UndefValue::get(ST
);
1582 for (unsigned i
= 0, e
= ST
->getNumElements(); i
!= e
; ++i
) {
1583 Value
*Elt
= ConvertScalar_ExtractValue(FromVal
, ST
->getElementType(i
),
1584 Offset
+Layout
.getElementOffsetInBits(i
),
1586 Res
= Builder
.CreateInsertValue(Res
, Elt
, i
, "tmp");
1591 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(ToType
)) {
1592 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(AT
->getElementType());
1593 Value
*Res
= UndefValue::get(AT
);
1594 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
1595 Value
*Elt
= ConvertScalar_ExtractValue(FromVal
, AT
->getElementType(),
1596 Offset
+i
*EltSize
, Builder
);
1597 Res
= Builder
.CreateInsertValue(Res
, Elt
, i
, "tmp");
1602 // Otherwise, this must be a union that was converted to an integer value.
1603 const IntegerType
*NTy
= cast
<IntegerType
>(FromVal
->getType());
1605 // If this is a big-endian system and the load is narrower than the
1606 // full alloca type, we need to do a shift to get the right bits.
1608 if (TD
->isBigEndian()) {
1609 // On big-endian machines, the lowest bit is stored at the bit offset
1610 // from the pointer given by getTypeStoreSizeInBits. This matters for
1611 // integers with a bitwidth that is not a multiple of 8.
1612 ShAmt
= TD
->getTypeStoreSizeInBits(NTy
) -
1613 TD
->getTypeStoreSizeInBits(ToType
) - Offset
;
1618 // Note: we support negative bitwidths (with shl) which are not defined.
1619 // We do this to support (f.e.) loads off the end of a structure where
1620 // only some bits are used.
1621 if (ShAmt
> 0 && (unsigned)ShAmt
< NTy
->getBitWidth())
1622 FromVal
= Builder
.CreateLShr(FromVal
,
1623 ConstantInt::get(FromVal
->getType(),
1625 else if (ShAmt
< 0 && (unsigned)-ShAmt
< NTy
->getBitWidth())
1626 FromVal
= Builder
.CreateShl(FromVal
,
1627 ConstantInt::get(FromVal
->getType(),
1630 // Finally, unconditionally truncate the integer to the right width.
1631 unsigned LIBitWidth
= TD
->getTypeSizeInBits(ToType
);
1632 if (LIBitWidth
< NTy
->getBitWidth())
1634 Builder
.CreateTrunc(FromVal
, IntegerType::get(FromVal
->getContext(),
1635 LIBitWidth
), "tmp");
1636 else if (LIBitWidth
> NTy
->getBitWidth())
1638 Builder
.CreateZExt(FromVal
, IntegerType::get(FromVal
->getContext(),
1639 LIBitWidth
), "tmp");
1641 // If the result is an integer, this is a trunc or bitcast.
1642 if (isa
<IntegerType
>(ToType
)) {
1644 } else if (ToType
->isFloatingPoint() || isa
<VectorType
>(ToType
)) {
1645 // Just do a bitcast, we know the sizes match up.
1646 FromVal
= Builder
.CreateBitCast(FromVal
, ToType
, "tmp");
1648 // Otherwise must be a pointer.
1649 FromVal
= Builder
.CreateIntToPtr(FromVal
, ToType
, "tmp");
1651 assert(FromVal
->getType() == ToType
&& "Didn't convert right?");
1656 /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1657 /// or vector value "Old" at the offset specified by Offset.
1659 /// This happens when we are converting an "integer union" to a
1660 /// single integer scalar, or when we are converting a "vector union" to a
1661 /// vector with insert/extractelement instructions.
1663 /// Offset is an offset from the original alloca, in bits that need to be
1664 /// shifted to the right.
1665 Value
*SROA::ConvertScalar_InsertValue(Value
*SV
, Value
*Old
,
1666 uint64_t Offset
, IRBuilder
<> &Builder
) {
1668 // Convert the stored type to the actual type, shift it left to insert
1669 // then 'or' into place.
1670 const Type
*AllocaType
= Old
->getType();
1671 LLVMContext
&Context
= Old
->getContext();
1673 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(AllocaType
)) {
1674 uint64_t VecSize
= TD
->getTypeAllocSizeInBits(VTy
);
1675 uint64_t ValSize
= TD
->getTypeAllocSizeInBits(SV
->getType());
1677 // Changing the whole vector with memset or with an access of a different
1679 if (ValSize
== VecSize
)
1680 return Builder
.CreateBitCast(SV
, AllocaType
, "tmp");
1682 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(VTy
->getElementType());
1684 // Must be an element insertion.
1685 unsigned Elt
= Offset
/EltSize
;
1687 if (SV
->getType() != VTy
->getElementType())
1688 SV
= Builder
.CreateBitCast(SV
, VTy
->getElementType(), "tmp");
1690 SV
= Builder
.CreateInsertElement(Old
, SV
,
1691 ConstantInt::get(Type::getInt32Ty(SV
->getContext()), Elt
),
1696 // If SV is a first-class aggregate value, insert each value recursively.
1697 if (const StructType
*ST
= dyn_cast
<StructType
>(SV
->getType())) {
1698 const StructLayout
&Layout
= *TD
->getStructLayout(ST
);
1699 for (unsigned i
= 0, e
= ST
->getNumElements(); i
!= e
; ++i
) {
1700 Value
*Elt
= Builder
.CreateExtractValue(SV
, i
, "tmp");
1701 Old
= ConvertScalar_InsertValue(Elt
, Old
,
1702 Offset
+Layout
.getElementOffsetInBits(i
),
1708 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(SV
->getType())) {
1709 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(AT
->getElementType());
1710 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
1711 Value
*Elt
= Builder
.CreateExtractValue(SV
, i
, "tmp");
1712 Old
= ConvertScalar_InsertValue(Elt
, Old
, Offset
+i
*EltSize
, Builder
);
1717 // If SV is a float, convert it to the appropriate integer type.
1718 // If it is a pointer, do the same.
1719 unsigned SrcWidth
= TD
->getTypeSizeInBits(SV
->getType());
1720 unsigned DestWidth
= TD
->getTypeSizeInBits(AllocaType
);
1721 unsigned SrcStoreWidth
= TD
->getTypeStoreSizeInBits(SV
->getType());
1722 unsigned DestStoreWidth
= TD
->getTypeStoreSizeInBits(AllocaType
);
1723 if (SV
->getType()->isFloatingPoint() || isa
<VectorType
>(SV
->getType()))
1724 SV
= Builder
.CreateBitCast(SV
,
1725 IntegerType::get(SV
->getContext(),SrcWidth
), "tmp");
1726 else if (isa
<PointerType
>(SV
->getType()))
1727 SV
= Builder
.CreatePtrToInt(SV
, TD
->getIntPtrType(SV
->getContext()), "tmp");
1729 // Zero extend or truncate the value if needed.
1730 if (SV
->getType() != AllocaType
) {
1731 if (SV
->getType()->getPrimitiveSizeInBits() <
1732 AllocaType
->getPrimitiveSizeInBits())
1733 SV
= Builder
.CreateZExt(SV
, AllocaType
, "tmp");
1735 // Truncation may be needed if storing more than the alloca can hold
1736 // (undefined behavior).
1737 SV
= Builder
.CreateTrunc(SV
, AllocaType
, "tmp");
1738 SrcWidth
= DestWidth
;
1739 SrcStoreWidth
= DestStoreWidth
;
1743 // If this is a big-endian system and the store is narrower than the
1744 // full alloca type, we need to do a shift to get the right bits.
1746 if (TD
->isBigEndian()) {
1747 // On big-endian machines, the lowest bit is stored at the bit offset
1748 // from the pointer given by getTypeStoreSizeInBits. This matters for
1749 // integers with a bitwidth that is not a multiple of 8.
1750 ShAmt
= DestStoreWidth
- SrcStoreWidth
- Offset
;
1755 // Note: we support negative bitwidths (with shr) which are not defined.
1756 // We do this to support (f.e.) stores off the end of a structure where
1757 // only some bits in the structure are set.
1758 APInt
Mask(APInt::getLowBitsSet(DestWidth
, SrcWidth
));
1759 if (ShAmt
> 0 && (unsigned)ShAmt
< DestWidth
) {
1760 SV
= Builder
.CreateShl(SV
, ConstantInt::get(SV
->getType(),
1763 } else if (ShAmt
< 0 && (unsigned)-ShAmt
< DestWidth
) {
1764 SV
= Builder
.CreateLShr(SV
, ConstantInt::get(SV
->getType(),
1766 Mask
= Mask
.lshr(-ShAmt
);
1769 // Mask out the bits we are about to insert from the old value, and or
1771 if (SrcWidth
!= DestWidth
) {
1772 assert(DestWidth
> SrcWidth
);
1773 Old
= Builder
.CreateAnd(Old
, ConstantInt::get(Context
, ~Mask
), "mask");
1774 SV
= Builder
.CreateOr(Old
, SV
, "ins");
1781 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1782 /// some part of a constant global variable. This intentionally only accepts
1783 /// constant expressions because we don't can't rewrite arbitrary instructions.
1784 static bool PointsToConstantGlobal(Value
*V
) {
1785 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
1786 return GV
->isConstant();
1787 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
))
1788 if (CE
->getOpcode() == Instruction::BitCast
||
1789 CE
->getOpcode() == Instruction::GetElementPtr
)
1790 return PointsToConstantGlobal(CE
->getOperand(0));
1794 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1795 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1796 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1797 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1798 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1799 /// the alloca, and if the source pointer is a pointer to a constant global, we
1800 /// can optimize this.
1801 static bool isOnlyCopiedFromConstantGlobal(Value
*V
, Instruction
*&TheCopy
,
1803 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!=E
; ++UI
) {
1804 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(*UI
))
1805 // Ignore non-volatile loads, they are always ok.
1806 if (!LI
->isVolatile())
1809 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(*UI
)) {
1810 // If uses of the bitcast are ok, we are ok.
1811 if (!isOnlyCopiedFromConstantGlobal(BCI
, TheCopy
, isOffset
))
1815 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(*UI
)) {
1816 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1817 // doesn't, it does.
1818 if (!isOnlyCopiedFromConstantGlobal(GEP
, TheCopy
,
1819 isOffset
|| !GEP
->hasAllZeroIndices()))
1824 // If this is isn't our memcpy/memmove, reject it as something we can't
1826 if (!isa
<MemTransferInst
>(*UI
))
1829 // If we already have seen a copy, reject the second one.
1830 if (TheCopy
) return false;
1832 // If the pointer has been offset from the start of the alloca, we can't
1833 // safely handle this.
1834 if (isOffset
) return false;
1836 // If the memintrinsic isn't using the alloca as the dest, reject it.
1837 if (UI
.getOperandNo() != 1) return false;
1839 MemIntrinsic
*MI
= cast
<MemIntrinsic
>(*UI
);
1841 // If the source of the memcpy/move is not a constant global, reject it.
1842 if (!PointsToConstantGlobal(MI
->getOperand(2)))
1845 // Otherwise, the transform is safe. Remember the copy instruction.
1851 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1852 /// modified by a copy from a constant global. If we can prove this, we can
1853 /// replace any uses of the alloca with uses of the global directly.
1854 Instruction
*SROA::isOnlyCopiedFromConstantGlobal(AllocationInst
*AI
) {
1855 Instruction
*TheCopy
= 0;
1856 if (::isOnlyCopiedFromConstantGlobal(AI
, TheCopy
, false))