1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #define DEBUG_TYPE "scalarrepl"
23 #include "llvm/Transforms/Scalar.h"
24 #include "llvm/Constants.h"
25 #include "llvm/DerivedTypes.h"
26 #include "llvm/Function.h"
27 #include "llvm/GlobalVariable.h"
28 #include "llvm/Instructions.h"
29 #include "llvm/IntrinsicInst.h"
30 #include "llvm/LLVMContext.h"
31 #include "llvm/Pass.h"
32 #include "llvm/Analysis/Dominators.h"
33 #include "llvm/Target/TargetData.h"
34 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
35 #include "llvm/Transforms/Utils/Local.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/Support/GetElementPtrTypeIterator.h"
39 #include "llvm/Support/IRBuilder.h"
40 #include "llvm/Support/MathExtras.h"
41 #include "llvm/Support/Compiler.h"
42 #include "llvm/ADT/SmallVector.h"
43 #include "llvm/ADT/Statistic.h"
44 #include "llvm/ADT/StringExtras.h"
47 STATISTIC(NumReplaced
, "Number of allocas broken up");
48 STATISTIC(NumPromoted
, "Number of allocas promoted");
49 STATISTIC(NumConverted
, "Number of aggregates converted to scalar");
50 STATISTIC(NumGlobals
, "Number of allocas copied from constant global");
53 struct VISIBILITY_HIDDEN SROA
: public FunctionPass
{
54 static char ID
; // Pass identification, replacement for typeid
55 explicit SROA(signed T
= -1) : FunctionPass(&ID
) {
62 bool runOnFunction(Function
&F
);
64 bool performScalarRepl(Function
&F
);
65 bool performPromotion(Function
&F
);
67 // getAnalysisUsage - This pass does not require any passes, but we know it
68 // will not alter the CFG, so say so.
69 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
70 AU
.addRequired
<DominatorTree
>();
71 AU
.addRequired
<DominanceFrontier
>();
72 AU
.addRequired
<TargetData
>();
79 /// AllocaInfo - When analyzing uses of an alloca instruction, this captures
80 /// information about the uses. All these fields are initialized to false
81 /// and set to true when something is learned.
83 /// isUnsafe - This is set to true if the alloca cannot be SROA'd.
86 /// needsCleanup - This is set to true if there is some use of the alloca
87 /// that requires cleanup.
88 bool needsCleanup
: 1;
90 /// isMemCpySrc - This is true if this aggregate is memcpy'd from.
93 /// isMemCpyDst - This is true if this aggregate is memcpy'd into.
97 : isUnsafe(false), needsCleanup(false),
98 isMemCpySrc(false), isMemCpyDst(false) {}
101 unsigned SRThreshold
;
103 void MarkUnsafe(AllocaInfo
&I
) { I
.isUnsafe
= true; }
105 int isSafeAllocaToScalarRepl(AllocationInst
*AI
);
107 void isSafeUseOfAllocation(Instruction
*User
, AllocationInst
*AI
,
109 void isSafeElementUse(Value
*Ptr
, bool isFirstElt
, AllocationInst
*AI
,
111 void isSafeMemIntrinsicOnAllocation(MemIntrinsic
*MI
, AllocationInst
*AI
,
112 unsigned OpNo
, AllocaInfo
&Info
);
113 void isSafeUseOfBitCastedAllocation(BitCastInst
*User
, AllocationInst
*AI
,
116 void DoScalarReplacement(AllocationInst
*AI
,
117 std::vector
<AllocationInst
*> &WorkList
);
118 void CleanupGEP(GetElementPtrInst
*GEP
);
119 void CleanupAllocaUsers(AllocationInst
*AI
);
120 AllocaInst
*AddNewAlloca(Function
&F
, const Type
*Ty
, AllocationInst
*Base
);
122 void RewriteBitCastUserOfAlloca(Instruction
*BCInst
, AllocationInst
*AI
,
123 SmallVector
<AllocaInst
*, 32> &NewElts
);
125 void RewriteMemIntrinUserOfAlloca(MemIntrinsic
*MI
, Instruction
*BCInst
,
127 SmallVector
<AllocaInst
*, 32> &NewElts
);
128 void RewriteStoreUserOfWholeAlloca(StoreInst
*SI
, AllocationInst
*AI
,
129 SmallVector
<AllocaInst
*, 32> &NewElts
);
130 void RewriteLoadUserOfWholeAlloca(LoadInst
*LI
, AllocationInst
*AI
,
131 SmallVector
<AllocaInst
*, 32> &NewElts
);
133 bool CanConvertToScalar(Value
*V
, bool &IsNotTrivial
, const Type
*&VecTy
,
134 bool &SawVec
, uint64_t Offset
, unsigned AllocaSize
);
135 void ConvertUsesToScalar(Value
*Ptr
, AllocaInst
*NewAI
, uint64_t Offset
);
136 Value
*ConvertScalar_ExtractValue(Value
*NV
, const Type
*ToType
,
137 uint64_t Offset
, IRBuilder
<> &Builder
);
138 Value
*ConvertScalar_InsertValue(Value
*StoredVal
, Value
*ExistingVal
,
139 uint64_t Offset
, IRBuilder
<> &Builder
);
140 static Instruction
*isOnlyCopiedFromConstantGlobal(AllocationInst
*AI
);
145 static RegisterPass
<SROA
> X("scalarrepl", "Scalar Replacement of Aggregates");
147 // Public interface to the ScalarReplAggregates pass
148 FunctionPass
*llvm::createScalarReplAggregatesPass(signed int Threshold
) {
149 return new SROA(Threshold
);
153 bool SROA::runOnFunction(Function
&F
) {
154 TD
= &getAnalysis
<TargetData
>();
156 bool Changed
= performPromotion(F
);
158 bool LocalChange
= performScalarRepl(F
);
159 if (!LocalChange
) break; // No need to repromote if no scalarrepl
161 LocalChange
= performPromotion(F
);
162 if (!LocalChange
) break; // No need to re-scalarrepl if no promotion
169 bool SROA::performPromotion(Function
&F
) {
170 std::vector
<AllocaInst
*> Allocas
;
171 DominatorTree
&DT
= getAnalysis
<DominatorTree
>();
172 DominanceFrontier
&DF
= getAnalysis
<DominanceFrontier
>();
174 BasicBlock
&BB
= F
.getEntryBlock(); // Get the entry node for the function
176 bool Changed
= false;
181 // Find allocas that are safe to promote, by looking at all instructions in
183 for (BasicBlock::iterator I
= BB
.begin(), E
= --BB
.end(); I
!= E
; ++I
)
184 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
)) // Is it an alloca?
185 if (isAllocaPromotable(AI
))
186 Allocas
.push_back(AI
);
188 if (Allocas
.empty()) break;
190 PromoteMemToReg(Allocas
, DT
, DF
, F
.getContext());
191 NumPromoted
+= Allocas
.size();
198 /// getNumSAElements - Return the number of elements in the specific struct or
200 static uint64_t getNumSAElements(const Type
*T
) {
201 if (const StructType
*ST
= dyn_cast
<StructType
>(T
))
202 return ST
->getNumElements();
203 return cast
<ArrayType
>(T
)->getNumElements();
206 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
207 // which runs on all of the malloc/alloca instructions in the function, removing
208 // them if they are only used by getelementptr instructions.
210 bool SROA::performScalarRepl(Function
&F
) {
211 std::vector
<AllocationInst
*> WorkList
;
213 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
214 BasicBlock
&BB
= F
.getEntryBlock();
215 for (BasicBlock::iterator I
= BB
.begin(), E
= BB
.end(); I
!= E
; ++I
)
216 if (AllocationInst
*A
= dyn_cast
<AllocationInst
>(I
))
217 WorkList
.push_back(A
);
219 // Process the worklist
220 bool Changed
= false;
221 while (!WorkList
.empty()) {
222 AllocationInst
*AI
= WorkList
.back();
225 // Handle dead allocas trivially. These can be formed by SROA'ing arrays
226 // with unused elements.
227 if (AI
->use_empty()) {
228 AI
->eraseFromParent();
232 // If this alloca is impossible for us to promote, reject it early.
233 if (AI
->isArrayAllocation() || !AI
->getAllocatedType()->isSized())
236 // Check to see if this allocation is only modified by a memcpy/memmove from
237 // a constant global. If this is the case, we can change all users to use
238 // the constant global instead. This is commonly produced by the CFE by
239 // constructs like "void foo() { int A[] = {1,2,3,4,5,6,7,8,9...}; }" if 'A'
240 // is only subsequently read.
241 if (Instruction
*TheCopy
= isOnlyCopiedFromConstantGlobal(AI
)) {
242 DOUT
<< "Found alloca equal to global: " << *AI
;
243 DOUT
<< " memcpy = " << *TheCopy
;
244 Constant
*TheSrc
= cast
<Constant
>(TheCopy
->getOperand(2));
245 AI
->replaceAllUsesWith(ConstantExpr::getBitCast(TheSrc
, AI
->getType()));
246 TheCopy
->eraseFromParent(); // Don't mutate the global.
247 AI
->eraseFromParent();
253 // Check to see if we can perform the core SROA transformation. We cannot
254 // transform the allocation instruction if it is an array allocation
255 // (allocations OF arrays are ok though), and an allocation of a scalar
256 // value cannot be decomposed at all.
257 uint64_t AllocaSize
= TD
->getTypeAllocSize(AI
->getAllocatedType());
259 // Do not promote any struct whose size is too big.
260 if (AllocaSize
> SRThreshold
) continue;
262 if ((isa
<StructType
>(AI
->getAllocatedType()) ||
263 isa
<ArrayType
>(AI
->getAllocatedType())) &&
264 // Do not promote any struct into more than "32" separate vars.
265 getNumSAElements(AI
->getAllocatedType()) <= SRThreshold
/4) {
266 // Check that all of the users of the allocation are capable of being
268 switch (isSafeAllocaToScalarRepl(AI
)) {
269 default: llvm_unreachable("Unexpected value!");
270 case 0: // Not safe to scalar replace.
272 case 1: // Safe, but requires cleanup/canonicalizations first
273 CleanupAllocaUsers(AI
);
275 case 3: // Safe to scalar replace.
276 DoScalarReplacement(AI
, WorkList
);
282 // If we can turn this aggregate value (potentially with casts) into a
283 // simple scalar value that can be mem2reg'd into a register value.
284 // IsNotTrivial tracks whether this is something that mem2reg could have
285 // promoted itself. If so, we don't want to transform it needlessly. Note
286 // that we can't just check based on the type: the alloca may be of an i32
287 // but that has pointer arithmetic to set byte 3 of it or something.
288 bool IsNotTrivial
= false;
289 const Type
*VectorTy
= 0;
290 bool HadAVector
= false;
291 if (CanConvertToScalar(AI
, IsNotTrivial
, VectorTy
, HadAVector
,
292 0, unsigned(AllocaSize
)) && IsNotTrivial
) {
294 // If we were able to find a vector type that can handle this with
295 // insert/extract elements, and if there was at least one use that had
296 // a vector type, promote this to a vector. We don't want to promote
297 // random stuff that doesn't use vectors (e.g. <9 x double>) because then
298 // we just get a lot of insert/extracts. If at least one vector is
299 // involved, then we probably really do have a union of vector/array.
300 if (VectorTy
&& isa
<VectorType
>(VectorTy
) && HadAVector
) {
301 DOUT
<< "CONVERT TO VECTOR: " << *AI
<< " TYPE = " << *VectorTy
<<"\n";
303 // Create and insert the vector alloca.
304 NewAI
= new AllocaInst(VectorTy
, 0, "", AI
->getParent()->begin());
305 ConvertUsesToScalar(AI
, NewAI
, 0);
307 DOUT
<< "CONVERT TO SCALAR INTEGER: " << *AI
<< "\n";
309 // Create and insert the integer alloca.
310 const Type
*NewTy
= F
.getContext().getIntegerType(AllocaSize
*8);
311 NewAI
= new AllocaInst(NewTy
, 0, "", AI
->getParent()->begin());
312 ConvertUsesToScalar(AI
, NewAI
, 0);
315 AI
->eraseFromParent();
321 // Otherwise, couldn't process this alloca.
327 /// DoScalarReplacement - This alloca satisfied the isSafeAllocaToScalarRepl
328 /// predicate, do SROA now.
329 void SROA::DoScalarReplacement(AllocationInst
*AI
,
330 std::vector
<AllocationInst
*> &WorkList
) {
331 DOUT
<< "Found inst to SROA: " << *AI
;
332 SmallVector
<AllocaInst
*, 32> ElementAllocas
;
333 LLVMContext
&Context
= AI
->getContext();
334 if (const StructType
*ST
= dyn_cast
<StructType
>(AI
->getAllocatedType())) {
335 ElementAllocas
.reserve(ST
->getNumContainedTypes());
336 for (unsigned i
= 0, e
= ST
->getNumContainedTypes(); i
!= e
; ++i
) {
337 AllocaInst
*NA
= new AllocaInst(ST
->getContainedType(i
), 0,
339 AI
->getName() + "." + utostr(i
), AI
);
340 ElementAllocas
.push_back(NA
);
341 WorkList
.push_back(NA
); // Add to worklist for recursive processing
344 const ArrayType
*AT
= cast
<ArrayType
>(AI
->getAllocatedType());
345 ElementAllocas
.reserve(AT
->getNumElements());
346 const Type
*ElTy
= AT
->getElementType();
347 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
348 AllocaInst
*NA
= new AllocaInst(ElTy
, 0, AI
->getAlignment(),
349 AI
->getName() + "." + utostr(i
), AI
);
350 ElementAllocas
.push_back(NA
);
351 WorkList
.push_back(NA
); // Add to worklist for recursive processing
355 // Now that we have created the alloca instructions that we want to use,
356 // expand the getelementptr instructions to use them.
358 while (!AI
->use_empty()) {
359 Instruction
*User
= cast
<Instruction
>(AI
->use_back());
360 if (BitCastInst
*BCInst
= dyn_cast
<BitCastInst
>(User
)) {
361 RewriteBitCastUserOfAlloca(BCInst
, AI
, ElementAllocas
);
362 BCInst
->eraseFromParent();
367 // %res = load { i32, i32 }* %alloc
369 // %load.0 = load i32* %alloc.0
370 // %insert.0 insertvalue { i32, i32 } zeroinitializer, i32 %load.0, 0
371 // %load.1 = load i32* %alloc.1
372 // %insert = insertvalue { i32, i32 } %insert.0, i32 %load.1, 1
373 // (Also works for arrays instead of structs)
374 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
375 Value
*Insert
= Context
.getUndef(LI
->getType());
376 for (unsigned i
= 0, e
= ElementAllocas
.size(); i
!= e
; ++i
) {
377 Value
*Load
= new LoadInst(ElementAllocas
[i
], "load", LI
);
378 Insert
= InsertValueInst::Create(Insert
, Load
, i
, "insert", LI
);
380 LI
->replaceAllUsesWith(Insert
);
381 LI
->eraseFromParent();
386 // store { i32, i32 } %val, { i32, i32 }* %alloc
388 // %val.0 = extractvalue { i32, i32 } %val, 0
389 // store i32 %val.0, i32* %alloc.0
390 // %val.1 = extractvalue { i32, i32 } %val, 1
391 // store i32 %val.1, i32* %alloc.1
392 // (Also works for arrays instead of structs)
393 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
394 Value
*Val
= SI
->getOperand(0);
395 for (unsigned i
= 0, e
= ElementAllocas
.size(); i
!= e
; ++i
) {
396 Value
*Extract
= ExtractValueInst::Create(Val
, i
, Val
->getName(), SI
);
397 new StoreInst(Extract
, ElementAllocas
[i
], SI
);
399 SI
->eraseFromParent();
403 GetElementPtrInst
*GEPI
= cast
<GetElementPtrInst
>(User
);
404 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
406 (unsigned)cast
<ConstantInt
>(GEPI
->getOperand(2))->getZExtValue();
408 assert(Idx
< ElementAllocas
.size() && "Index out of range?");
409 AllocaInst
*AllocaToUse
= ElementAllocas
[Idx
];
412 if (GEPI
->getNumOperands() == 3) {
413 // Do not insert a new getelementptr instruction with zero indices, only
414 // to have it optimized out later.
415 RepValue
= AllocaToUse
;
417 // We are indexing deeply into the structure, so we still need a
418 // getelement ptr instruction to finish the indexing. This may be
419 // expanded itself once the worklist is rerun.
421 SmallVector
<Value
*, 8> NewArgs
;
422 NewArgs
.push_back(Context
.getNullValue(Type::Int32Ty
));
423 NewArgs
.append(GEPI
->op_begin()+3, GEPI
->op_end());
424 RepValue
= GetElementPtrInst::Create(AllocaToUse
, NewArgs
.begin(),
425 NewArgs
.end(), "", GEPI
);
426 RepValue
->takeName(GEPI
);
429 // If this GEP is to the start of the aggregate, check for memcpys.
430 if (Idx
== 0 && GEPI
->hasAllZeroIndices())
431 RewriteBitCastUserOfAlloca(GEPI
, AI
, ElementAllocas
);
433 // Move all of the users over to the new GEP.
434 GEPI
->replaceAllUsesWith(RepValue
);
435 // Delete the old GEP
436 GEPI
->eraseFromParent();
439 // Finally, delete the Alloca instruction
440 AI
->eraseFromParent();
445 /// isSafeElementUse - Check to see if this use is an allowed use for a
446 /// getelementptr instruction of an array aggregate allocation. isFirstElt
447 /// indicates whether Ptr is known to the start of the aggregate.
449 void SROA::isSafeElementUse(Value
*Ptr
, bool isFirstElt
, AllocationInst
*AI
,
451 for (Value::use_iterator I
= Ptr
->use_begin(), E
= Ptr
->use_end();
453 Instruction
*User
= cast
<Instruction
>(*I
);
454 switch (User
->getOpcode()) {
455 case Instruction::Load
: break;
456 case Instruction::Store
:
457 // Store is ok if storing INTO the pointer, not storing the pointer
458 if (User
->getOperand(0) == Ptr
) return MarkUnsafe(Info
);
460 case Instruction::GetElementPtr
: {
461 GetElementPtrInst
*GEP
= cast
<GetElementPtrInst
>(User
);
462 bool AreAllZeroIndices
= isFirstElt
;
463 if (GEP
->getNumOperands() > 1) {
464 if (!isa
<ConstantInt
>(GEP
->getOperand(1)) ||
465 !cast
<ConstantInt
>(GEP
->getOperand(1))->isZero())
466 // Using pointer arithmetic to navigate the array.
467 return MarkUnsafe(Info
);
469 if (AreAllZeroIndices
)
470 AreAllZeroIndices
= GEP
->hasAllZeroIndices();
472 isSafeElementUse(GEP
, AreAllZeroIndices
, AI
, Info
);
473 if (Info
.isUnsafe
) return;
476 case Instruction::BitCast
:
478 isSafeUseOfBitCastedAllocation(cast
<BitCastInst
>(User
), AI
, Info
);
479 if (Info
.isUnsafe
) return;
482 DOUT
<< " Transformation preventing inst: " << *User
;
483 return MarkUnsafe(Info
);
484 case Instruction::Call
:
485 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(User
)) {
487 isSafeMemIntrinsicOnAllocation(MI
, AI
, I
.getOperandNo(), Info
);
488 if (Info
.isUnsafe
) return;
492 DOUT
<< " Transformation preventing inst: " << *User
;
493 return MarkUnsafe(Info
);
495 DOUT
<< " Transformation preventing inst: " << *User
;
496 return MarkUnsafe(Info
);
499 return; // All users look ok :)
502 /// AllUsersAreLoads - Return true if all users of this value are loads.
503 static bool AllUsersAreLoads(Value
*Ptr
) {
504 for (Value::use_iterator I
= Ptr
->use_begin(), E
= Ptr
->use_end();
506 if (cast
<Instruction
>(*I
)->getOpcode() != Instruction::Load
)
511 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
512 /// aggregate allocation.
514 void SROA::isSafeUseOfAllocation(Instruction
*User
, AllocationInst
*AI
,
516 LLVMContext
&Context
= User
->getContext();
517 if (BitCastInst
*C
= dyn_cast
<BitCastInst
>(User
))
518 return isSafeUseOfBitCastedAllocation(C
, AI
, Info
);
520 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
))
521 if (!LI
->isVolatile())
522 return;// Loads (returning a first class aggregrate) are always rewritable
524 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
))
525 if (!SI
->isVolatile() && SI
->getOperand(0) != AI
)
526 return;// Store is ok if storing INTO the pointer, not storing the pointer
528 GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(User
);
530 return MarkUnsafe(Info
);
532 gep_type_iterator I
= gep_type_begin(GEPI
), E
= gep_type_end(GEPI
);
534 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
536 I
.getOperand() != Context
.getNullValue(I
.getOperand()->getType())) {
537 return MarkUnsafe(Info
);
541 if (I
== E
) return MarkUnsafe(Info
); // ran out of GEP indices??
543 bool IsAllZeroIndices
= true;
545 // If the first index is a non-constant index into an array, see if we can
546 // handle it as a special case.
547 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
)) {
548 if (!isa
<ConstantInt
>(I
.getOperand())) {
549 IsAllZeroIndices
= 0;
550 uint64_t NumElements
= AT
->getNumElements();
552 // If this is an array index and the index is not constant, we cannot
553 // promote... that is unless the array has exactly one or two elements in
554 // it, in which case we CAN promote it, but we have to canonicalize this
555 // out if this is the only problem.
556 if ((NumElements
== 1 || NumElements
== 2) &&
557 AllUsersAreLoads(GEPI
)) {
558 Info
.needsCleanup
= true;
559 return; // Canonicalization required!
561 return MarkUnsafe(Info
);
565 // Walk through the GEP type indices, checking the types that this indexes
567 for (; I
!= E
; ++I
) {
568 // Ignore struct elements, no extra checking needed for these.
569 if (isa
<StructType
>(*I
))
572 ConstantInt
*IdxVal
= dyn_cast
<ConstantInt
>(I
.getOperand());
573 if (!IdxVal
) return MarkUnsafe(Info
);
575 // Are all indices still zero?
576 IsAllZeroIndices
&= IdxVal
->isZero();
578 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
)) {
579 // This GEP indexes an array. Verify that this is an in-range constant
580 // integer. Specifically, consider A[0][i]. We cannot know that the user
581 // isn't doing invalid things like allowing i to index an out-of-range
582 // subscript that accesses A[1]. Because of this, we have to reject SROA
583 // of any accesses into structs where any of the components are variables.
584 if (IdxVal
->getZExtValue() >= AT
->getNumElements())
585 return MarkUnsafe(Info
);
586 } else if (const VectorType
*VT
= dyn_cast
<VectorType
>(*I
)) {
587 if (IdxVal
->getZExtValue() >= VT
->getNumElements())
588 return MarkUnsafe(Info
);
592 // If there are any non-simple uses of this getelementptr, make sure to reject
594 return isSafeElementUse(GEPI
, IsAllZeroIndices
, AI
, Info
);
597 /// isSafeMemIntrinsicOnAllocation - Return true if the specified memory
598 /// intrinsic can be promoted by SROA. At this point, we know that the operand
599 /// of the memintrinsic is a pointer to the beginning of the allocation.
600 void SROA::isSafeMemIntrinsicOnAllocation(MemIntrinsic
*MI
, AllocationInst
*AI
,
601 unsigned OpNo
, AllocaInfo
&Info
) {
602 // If not constant length, give up.
603 ConstantInt
*Length
= dyn_cast
<ConstantInt
>(MI
->getLength());
604 if (!Length
) return MarkUnsafe(Info
);
606 // If not the whole aggregate, give up.
607 if (Length
->getZExtValue() !=
608 TD
->getTypeAllocSize(AI
->getType()->getElementType()))
609 return MarkUnsafe(Info
);
611 // We only know about memcpy/memset/memmove.
612 if (!isa
<MemIntrinsic
>(MI
))
613 return MarkUnsafe(Info
);
615 // Otherwise, we can transform it. Determine whether this is a memcpy/set
616 // into or out of the aggregate.
618 Info
.isMemCpyDst
= true;
621 Info
.isMemCpySrc
= true;
625 /// isSafeUseOfBitCastedAllocation - Return true if all users of this bitcast
627 void SROA::isSafeUseOfBitCastedAllocation(BitCastInst
*BC
, AllocationInst
*AI
,
629 for (Value::use_iterator UI
= BC
->use_begin(), E
= BC
->use_end();
631 if (BitCastInst
*BCU
= dyn_cast
<BitCastInst
>(UI
)) {
632 isSafeUseOfBitCastedAllocation(BCU
, AI
, Info
);
633 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(UI
)) {
634 isSafeMemIntrinsicOnAllocation(MI
, AI
, UI
.getOperandNo(), Info
);
635 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UI
)) {
636 if (SI
->isVolatile())
637 return MarkUnsafe(Info
);
639 // If storing the entire alloca in one chunk through a bitcasted pointer
640 // to integer, we can transform it. This happens (for example) when you
641 // cast a {i32,i32}* to i64* and store through it. This is similar to the
642 // memcpy case and occurs in various "byval" cases and emulated memcpys.
643 if (isa
<IntegerType
>(SI
->getOperand(0)->getType()) &&
644 TD
->getTypeAllocSize(SI
->getOperand(0)->getType()) ==
645 TD
->getTypeAllocSize(AI
->getType()->getElementType())) {
646 Info
.isMemCpyDst
= true;
649 return MarkUnsafe(Info
);
650 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(UI
)) {
651 if (LI
->isVolatile())
652 return MarkUnsafe(Info
);
654 // If loading the entire alloca in one chunk through a bitcasted pointer
655 // to integer, we can transform it. This happens (for example) when you
656 // cast a {i32,i32}* to i64* and load through it. This is similar to the
657 // memcpy case and occurs in various "byval" cases and emulated memcpys.
658 if (isa
<IntegerType
>(LI
->getType()) &&
659 TD
->getTypeAllocSize(LI
->getType()) ==
660 TD
->getTypeAllocSize(AI
->getType()->getElementType())) {
661 Info
.isMemCpySrc
= true;
664 return MarkUnsafe(Info
);
665 } else if (isa
<DbgInfoIntrinsic
>(UI
)) {
666 // If one user is DbgInfoIntrinsic then check if all users are
667 // DbgInfoIntrinsics.
668 if (OnlyUsedByDbgInfoIntrinsics(BC
)) {
669 Info
.needsCleanup
= true;
676 return MarkUnsafe(Info
);
678 if (Info
.isUnsafe
) return;
682 /// RewriteBitCastUserOfAlloca - BCInst (transitively) bitcasts AI, or indexes
683 /// to its first element. Transform users of the cast to use the new values
685 void SROA::RewriteBitCastUserOfAlloca(Instruction
*BCInst
, AllocationInst
*AI
,
686 SmallVector
<AllocaInst
*, 32> &NewElts
) {
687 Value::use_iterator UI
= BCInst
->use_begin(), UE
= BCInst
->use_end();
689 Instruction
*User
= cast
<Instruction
>(*UI
++);
690 if (BitCastInst
*BCU
= dyn_cast
<BitCastInst
>(User
)) {
691 RewriteBitCastUserOfAlloca(BCU
, AI
, NewElts
);
692 if (BCU
->use_empty()) BCU
->eraseFromParent();
696 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(User
)) {
697 // This must be memcpy/memmove/memset of the entire aggregate.
698 // Split into one per element.
699 RewriteMemIntrinUserOfAlloca(MI
, BCInst
, AI
, NewElts
);
703 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
704 // If this is a store of the entire alloca from an integer, rewrite it.
705 RewriteStoreUserOfWholeAlloca(SI
, AI
, NewElts
);
709 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
710 // If this is a load of the entire alloca to an integer, rewrite it.
711 RewriteLoadUserOfWholeAlloca(LI
, AI
, NewElts
);
715 // Otherwise it must be some other user of a gep of the first pointer. Just
716 // leave these alone.
721 /// RewriteMemIntrinUserOfAlloca - MI is a memcpy/memset/memmove from or to AI.
722 /// Rewrite it to copy or set the elements of the scalarized memory.
723 void SROA::RewriteMemIntrinUserOfAlloca(MemIntrinsic
*MI
, Instruction
*BCInst
,
725 SmallVector
<AllocaInst
*, 32> &NewElts
) {
727 // If this is a memcpy/memmove, construct the other pointer as the
728 // appropriate type. The "Other" pointer is the pointer that goes to memory
729 // that doesn't have anything to do with the alloca that we are promoting. For
730 // memset, this Value* stays null.
732 LLVMContext
&Context
= MI
->getContext();
733 unsigned MemAlignment
= MI
->getAlignment();
734 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(MI
)) { // memmove/memcopy
735 if (BCInst
== MTI
->getRawDest())
736 OtherPtr
= MTI
->getRawSource();
738 assert(BCInst
== MTI
->getRawSource());
739 OtherPtr
= MTI
->getRawDest();
743 // If there is an other pointer, we want to convert it to the same pointer
744 // type as AI has, so we can GEP through it safely.
746 // It is likely that OtherPtr is a bitcast, if so, remove it.
747 if (BitCastInst
*BC
= dyn_cast
<BitCastInst
>(OtherPtr
))
748 OtherPtr
= BC
->getOperand(0);
749 // All zero GEPs are effectively bitcasts.
750 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(OtherPtr
))
751 if (GEP
->hasAllZeroIndices())
752 OtherPtr
= GEP
->getOperand(0);
754 if (ConstantExpr
*BCE
= dyn_cast
<ConstantExpr
>(OtherPtr
))
755 if (BCE
->getOpcode() == Instruction::BitCast
)
756 OtherPtr
= BCE
->getOperand(0);
758 // If the pointer is not the right type, insert a bitcast to the right
760 if (OtherPtr
->getType() != AI
->getType())
761 OtherPtr
= new BitCastInst(OtherPtr
, AI
->getType(), OtherPtr
->getName(),
765 // Process each element of the aggregate.
766 Value
*TheFn
= MI
->getOperand(0);
767 const Type
*BytePtrTy
= MI
->getRawDest()->getType();
768 bool SROADest
= MI
->getRawDest() == BCInst
;
770 Constant
*Zero
= Context
.getNullValue(Type::Int32Ty
);
772 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
773 // If this is a memcpy/memmove, emit a GEP of the other element address.
775 unsigned OtherEltAlign
= MemAlignment
;
778 Value
*Idx
[2] = { Zero
, ConstantInt::get(Type::Int32Ty
, i
) };
779 OtherElt
= GetElementPtrInst::Create(OtherPtr
, Idx
, Idx
+ 2,
780 OtherPtr
->getNameStr()+"."+utostr(i
),
783 const PointerType
*OtherPtrTy
= cast
<PointerType
>(OtherPtr
->getType());
784 if (const StructType
*ST
=
785 dyn_cast
<StructType
>(OtherPtrTy
->getElementType())) {
786 EltOffset
= TD
->getStructLayout(ST
)->getElementOffset(i
);
789 cast
<SequentialType
>(OtherPtr
->getType())->getElementType();
790 EltOffset
= TD
->getTypeAllocSize(EltTy
)*i
;
793 // The alignment of the other pointer is the guaranteed alignment of the
794 // element, which is affected by both the known alignment of the whole
795 // mem intrinsic and the alignment of the element. If the alignment of
796 // the memcpy (f.e.) is 32 but the element is at a 4-byte offset, then the
797 // known alignment is just 4 bytes.
798 OtherEltAlign
= (unsigned)MinAlign(OtherEltAlign
, EltOffset
);
801 Value
*EltPtr
= NewElts
[i
];
802 const Type
*EltTy
= cast
<PointerType
>(EltPtr
->getType())->getElementType();
804 // If we got down to a scalar, insert a load or store as appropriate.
805 if (EltTy
->isSingleValueType()) {
806 if (isa
<MemTransferInst
>(MI
)) {
808 // From Other to Alloca.
809 Value
*Elt
= new LoadInst(OtherElt
, "tmp", false, OtherEltAlign
, MI
);
810 new StoreInst(Elt
, EltPtr
, MI
);
812 // From Alloca to Other.
813 Value
*Elt
= new LoadInst(EltPtr
, "tmp", MI
);
814 new StoreInst(Elt
, OtherElt
, false, OtherEltAlign
, MI
);
818 assert(isa
<MemSetInst
>(MI
));
820 // If the stored element is zero (common case), just store a null
823 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(MI
->getOperand(2))) {
825 StoreVal
= Context
.getNullValue(EltTy
); // 0.0, null, 0, <0,0>
827 // If EltTy is a vector type, get the element type.
828 const Type
*ValTy
= EltTy
->getScalarType();
830 // Construct an integer with the right value.
831 unsigned EltSize
= TD
->getTypeSizeInBits(ValTy
);
832 APInt
OneVal(EltSize
, CI
->getZExtValue());
833 APInt
TotalVal(OneVal
);
835 for (unsigned i
= 0; 8*i
< EltSize
; ++i
) {
836 TotalVal
= TotalVal
.shl(8);
840 // Convert the integer value to the appropriate type.
841 StoreVal
= ConstantInt::get(Context
, TotalVal
);
842 if (isa
<PointerType
>(ValTy
))
843 StoreVal
= ConstantExpr::getIntToPtr(StoreVal
, ValTy
);
844 else if (ValTy
->isFloatingPoint())
845 StoreVal
= ConstantExpr::getBitCast(StoreVal
, ValTy
);
846 assert(StoreVal
->getType() == ValTy
&& "Type mismatch!");
848 // If the requested value was a vector constant, create it.
849 if (EltTy
!= ValTy
) {
850 unsigned NumElts
= cast
<VectorType
>(ValTy
)->getNumElements();
851 SmallVector
<Constant
*, 16> Elts(NumElts
, StoreVal
);
852 StoreVal
= ConstantVector::get(&Elts
[0], NumElts
);
855 new StoreInst(StoreVal
, EltPtr
, MI
);
858 // Otherwise, if we're storing a byte variable, use a memset call for
862 // Cast the element pointer to BytePtrTy.
863 if (EltPtr
->getType() != BytePtrTy
)
864 EltPtr
= new BitCastInst(EltPtr
, BytePtrTy
, EltPtr
->getNameStr(), MI
);
866 // Cast the other pointer (if we have one) to BytePtrTy.
867 if (OtherElt
&& OtherElt
->getType() != BytePtrTy
)
868 OtherElt
= new BitCastInst(OtherElt
, BytePtrTy
,OtherElt
->getNameStr(),
871 unsigned EltSize
= TD
->getTypeAllocSize(EltTy
);
873 // Finally, insert the meminst for this element.
874 if (isa
<MemTransferInst
>(MI
)) {
876 SROADest
? EltPtr
: OtherElt
, // Dest ptr
877 SROADest
? OtherElt
: EltPtr
, // Src ptr
878 ConstantInt::get(MI
->getOperand(3)->getType(), EltSize
), // Size
879 ConstantInt::get(Type::Int32Ty
, OtherEltAlign
) // Align
881 CallInst::Create(TheFn
, Ops
, Ops
+ 4, "", MI
);
883 assert(isa
<MemSetInst
>(MI
));
885 EltPtr
, MI
->getOperand(2), // Dest, Value,
886 ConstantInt::get(MI
->getOperand(3)->getType(), EltSize
), // Size
889 CallInst::Create(TheFn
, Ops
, Ops
+ 4, "", MI
);
892 MI
->eraseFromParent();
895 /// RewriteStoreUserOfWholeAlloca - We found an store of an integer that
896 /// overwrites the entire allocation. Extract out the pieces of the stored
897 /// integer and store them individually.
898 void SROA::RewriteStoreUserOfWholeAlloca(StoreInst
*SI
,
900 SmallVector
<AllocaInst
*, 32> &NewElts
){
901 // Extract each element out of the integer according to its structure offset
902 // and store the element value to the individual alloca.
903 LLVMContext
&Context
= SI
->getContext();
904 Value
*SrcVal
= SI
->getOperand(0);
905 const Type
*AllocaEltTy
= AI
->getType()->getElementType();
906 uint64_t AllocaSizeBits
= TD
->getTypeAllocSizeInBits(AllocaEltTy
);
908 // If this isn't a store of an integer to the whole alloca, it may be a store
909 // to the first element. Just ignore the store in this case and normal SROA
911 if (!isa
<IntegerType
>(SrcVal
->getType()) ||
912 TD
->getTypeAllocSizeInBits(SrcVal
->getType()) != AllocaSizeBits
)
914 // Handle tail padding by extending the operand
915 if (TD
->getTypeSizeInBits(SrcVal
->getType()) != AllocaSizeBits
)
916 SrcVal
= new ZExtInst(SrcVal
,
917 Context
.getIntegerType(AllocaSizeBits
), "", SI
);
919 DOUT
<< "PROMOTING STORE TO WHOLE ALLOCA: " << *AI
<< *SI
;
921 // There are two forms here: AI could be an array or struct. Both cases
922 // have different ways to compute the element offset.
923 if (const StructType
*EltSTy
= dyn_cast
<StructType
>(AllocaEltTy
)) {
924 const StructLayout
*Layout
= TD
->getStructLayout(EltSTy
);
926 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
927 // Get the number of bits to shift SrcVal to get the value.
928 const Type
*FieldTy
= EltSTy
->getElementType(i
);
929 uint64_t Shift
= Layout
->getElementOffsetInBits(i
);
931 if (TD
->isBigEndian())
932 Shift
= AllocaSizeBits
-Shift
-TD
->getTypeAllocSizeInBits(FieldTy
);
934 Value
*EltVal
= SrcVal
;
936 Value
*ShiftVal
= ConstantInt::get(EltVal
->getType(), Shift
);
937 EltVal
= BinaryOperator::CreateLShr(EltVal
, ShiftVal
,
938 "sroa.store.elt", SI
);
941 // Truncate down to an integer of the right size.
942 uint64_t FieldSizeBits
= TD
->getTypeSizeInBits(FieldTy
);
944 // Ignore zero sized fields like {}, they obviously contain no data.
945 if (FieldSizeBits
== 0) continue;
947 if (FieldSizeBits
!= AllocaSizeBits
)
948 EltVal
= new TruncInst(EltVal
,
949 Context
.getIntegerType(FieldSizeBits
), "", SI
);
950 Value
*DestField
= NewElts
[i
];
951 if (EltVal
->getType() == FieldTy
) {
952 // Storing to an integer field of this size, just do it.
953 } else if (FieldTy
->isFloatingPoint() || isa
<VectorType
>(FieldTy
)) {
954 // Bitcast to the right element type (for fp/vector values).
955 EltVal
= new BitCastInst(EltVal
, FieldTy
, "", SI
);
957 // Otherwise, bitcast the dest pointer (for aggregates).
958 DestField
= new BitCastInst(DestField
,
959 Context
.getPointerTypeUnqual(EltVal
->getType()),
962 new StoreInst(EltVal
, DestField
, SI
);
966 const ArrayType
*ATy
= cast
<ArrayType
>(AllocaEltTy
);
967 const Type
*ArrayEltTy
= ATy
->getElementType();
968 uint64_t ElementOffset
= TD
->getTypeAllocSizeInBits(ArrayEltTy
);
969 uint64_t ElementSizeBits
= TD
->getTypeSizeInBits(ArrayEltTy
);
973 if (TD
->isBigEndian())
974 Shift
= AllocaSizeBits
-ElementOffset
;
978 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
979 // Ignore zero sized fields like {}, they obviously contain no data.
980 if (ElementSizeBits
== 0) continue;
982 Value
*EltVal
= SrcVal
;
984 Value
*ShiftVal
= ConstantInt::get(EltVal
->getType(), Shift
);
985 EltVal
= BinaryOperator::CreateLShr(EltVal
, ShiftVal
,
986 "sroa.store.elt", SI
);
989 // Truncate down to an integer of the right size.
990 if (ElementSizeBits
!= AllocaSizeBits
)
991 EltVal
= new TruncInst(EltVal
,
992 Context
.getIntegerType(ElementSizeBits
),"",SI
);
993 Value
*DestField
= NewElts
[i
];
994 if (EltVal
->getType() == ArrayEltTy
) {
995 // Storing to an integer field of this size, just do it.
996 } else if (ArrayEltTy
->isFloatingPoint() || isa
<VectorType
>(ArrayEltTy
)) {
997 // Bitcast to the right element type (for fp/vector values).
998 EltVal
= new BitCastInst(EltVal
, ArrayEltTy
, "", SI
);
1000 // Otherwise, bitcast the dest pointer (for aggregates).
1001 DestField
= new BitCastInst(DestField
,
1002 Context
.getPointerTypeUnqual(EltVal
->getType()),
1005 new StoreInst(EltVal
, DestField
, SI
);
1007 if (TD
->isBigEndian())
1008 Shift
-= ElementOffset
;
1010 Shift
+= ElementOffset
;
1014 SI
->eraseFromParent();
1017 /// RewriteLoadUserOfWholeAlloca - We found an load of the entire allocation to
1018 /// an integer. Load the individual pieces to form the aggregate value.
1019 void SROA::RewriteLoadUserOfWholeAlloca(LoadInst
*LI
, AllocationInst
*AI
,
1020 SmallVector
<AllocaInst
*, 32> &NewElts
) {
1021 // Extract each element out of the NewElts according to its structure offset
1022 // and form the result value.
1023 const Type
*AllocaEltTy
= AI
->getType()->getElementType();
1024 uint64_t AllocaSizeBits
= TD
->getTypeAllocSizeInBits(AllocaEltTy
);
1026 // If this isn't a load of the whole alloca to an integer, it may be a load
1027 // of the first element. Just ignore the load in this case and normal SROA
1029 if (!isa
<IntegerType
>(LI
->getType()) ||
1030 TD
->getTypeAllocSizeInBits(LI
->getType()) != AllocaSizeBits
)
1033 DOUT
<< "PROMOTING LOAD OF WHOLE ALLOCA: " << *AI
<< *LI
;
1035 // There are two forms here: AI could be an array or struct. Both cases
1036 // have different ways to compute the element offset.
1037 const StructLayout
*Layout
= 0;
1038 uint64_t ArrayEltBitOffset
= 0;
1039 if (const StructType
*EltSTy
= dyn_cast
<StructType
>(AllocaEltTy
)) {
1040 Layout
= TD
->getStructLayout(EltSTy
);
1042 const Type
*ArrayEltTy
= cast
<ArrayType
>(AllocaEltTy
)->getElementType();
1043 ArrayEltBitOffset
= TD
->getTypeAllocSizeInBits(ArrayEltTy
);
1046 LLVMContext
&Context
= LI
->getContext();
1049 Context
.getNullValue(Context
.getIntegerType(AllocaSizeBits
));
1051 for (unsigned i
= 0, e
= NewElts
.size(); i
!= e
; ++i
) {
1052 // Load the value from the alloca. If the NewElt is an aggregate, cast
1053 // the pointer to an integer of the same size before doing the load.
1054 Value
*SrcField
= NewElts
[i
];
1055 const Type
*FieldTy
=
1056 cast
<PointerType
>(SrcField
->getType())->getElementType();
1057 uint64_t FieldSizeBits
= TD
->getTypeSizeInBits(FieldTy
);
1059 // Ignore zero sized fields like {}, they obviously contain no data.
1060 if (FieldSizeBits
== 0) continue;
1062 const IntegerType
*FieldIntTy
= Context
.getIntegerType(FieldSizeBits
);
1063 if (!isa
<IntegerType
>(FieldTy
) && !FieldTy
->isFloatingPoint() &&
1064 !isa
<VectorType
>(FieldTy
))
1065 SrcField
= new BitCastInst(SrcField
,
1066 Context
.getPointerTypeUnqual(FieldIntTy
),
1068 SrcField
= new LoadInst(SrcField
, "sroa.load.elt", LI
);
1070 // If SrcField is a fp or vector of the right size but that isn't an
1071 // integer type, bitcast to an integer so we can shift it.
1072 if (SrcField
->getType() != FieldIntTy
)
1073 SrcField
= new BitCastInst(SrcField
, FieldIntTy
, "", LI
);
1075 // Zero extend the field to be the same size as the final alloca so that
1076 // we can shift and insert it.
1077 if (SrcField
->getType() != ResultVal
->getType())
1078 SrcField
= new ZExtInst(SrcField
, ResultVal
->getType(), "", LI
);
1080 // Determine the number of bits to shift SrcField.
1082 if (Layout
) // Struct case.
1083 Shift
= Layout
->getElementOffsetInBits(i
);
1085 Shift
= i
*ArrayEltBitOffset
;
1087 if (TD
->isBigEndian())
1088 Shift
= AllocaSizeBits
-Shift
-FieldIntTy
->getBitWidth();
1091 Value
*ShiftVal
= ConstantInt::get(SrcField
->getType(), Shift
);
1092 SrcField
= BinaryOperator::CreateShl(SrcField
, ShiftVal
, "", LI
);
1095 ResultVal
= BinaryOperator::CreateOr(SrcField
, ResultVal
, "", LI
);
1098 // Handle tail padding by truncating the result
1099 if (TD
->getTypeSizeInBits(LI
->getType()) != AllocaSizeBits
)
1100 ResultVal
= new TruncInst(ResultVal
, LI
->getType(), "", LI
);
1102 LI
->replaceAllUsesWith(ResultVal
);
1103 LI
->eraseFromParent();
1107 /// HasPadding - Return true if the specified type has any structure or
1108 /// alignment padding, false otherwise.
1109 static bool HasPadding(const Type
*Ty
, const TargetData
&TD
) {
1110 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
1111 const StructLayout
*SL
= TD
.getStructLayout(STy
);
1112 unsigned PrevFieldBitOffset
= 0;
1113 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
1114 unsigned FieldBitOffset
= SL
->getElementOffsetInBits(i
);
1116 // Padding in sub-elements?
1117 if (HasPadding(STy
->getElementType(i
), TD
))
1120 // Check to see if there is any padding between this element and the
1123 unsigned PrevFieldEnd
=
1124 PrevFieldBitOffset
+TD
.getTypeSizeInBits(STy
->getElementType(i
-1));
1125 if (PrevFieldEnd
< FieldBitOffset
)
1129 PrevFieldBitOffset
= FieldBitOffset
;
1132 // Check for tail padding.
1133 if (unsigned EltCount
= STy
->getNumElements()) {
1134 unsigned PrevFieldEnd
= PrevFieldBitOffset
+
1135 TD
.getTypeSizeInBits(STy
->getElementType(EltCount
-1));
1136 if (PrevFieldEnd
< SL
->getSizeInBits())
1140 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
1141 return HasPadding(ATy
->getElementType(), TD
);
1142 } else if (const VectorType
*VTy
= dyn_cast
<VectorType
>(Ty
)) {
1143 return HasPadding(VTy
->getElementType(), TD
);
1145 return TD
.getTypeSizeInBits(Ty
) != TD
.getTypeAllocSizeInBits(Ty
);
1148 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
1149 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
1150 /// or 1 if safe after canonicalization has been performed.
1152 int SROA::isSafeAllocaToScalarRepl(AllocationInst
*AI
) {
1153 // Loop over the use list of the alloca. We can only transform it if all of
1154 // the users are safe to transform.
1157 for (Value::use_iterator I
= AI
->use_begin(), E
= AI
->use_end();
1159 isSafeUseOfAllocation(cast
<Instruction
>(*I
), AI
, Info
);
1160 if (Info
.isUnsafe
) {
1161 DOUT
<< "Cannot transform: " << *AI
<< " due to user: " << **I
;
1166 // Okay, we know all the users are promotable. If the aggregate is a memcpy
1167 // source and destination, we have to be careful. In particular, the memcpy
1168 // could be moving around elements that live in structure padding of the LLVM
1169 // types, but may actually be used. In these cases, we refuse to promote the
1171 if (Info
.isMemCpySrc
&& Info
.isMemCpyDst
&&
1172 HasPadding(AI
->getType()->getElementType(), *TD
))
1175 // If we require cleanup, return 1, otherwise return 3.
1176 return Info
.needsCleanup
? 1 : 3;
1179 /// CleanupGEP - GEP is used by an Alloca, which can be prompted after the GEP
1180 /// is canonicalized here.
1181 void SROA::CleanupGEP(GetElementPtrInst
*GEPI
) {
1182 gep_type_iterator I
= gep_type_begin(GEPI
);
1185 const ArrayType
*AT
= dyn_cast
<ArrayType
>(*I
);
1189 uint64_t NumElements
= AT
->getNumElements();
1191 if (isa
<ConstantInt
>(I
.getOperand()))
1194 LLVMContext
&Context
= GEPI
->getContext();
1196 if (NumElements
== 1) {
1197 GEPI
->setOperand(2, Context
.getNullValue(Type::Int32Ty
));
1201 assert(NumElements
== 2 && "Unhandled case!");
1202 // All users of the GEP must be loads. At each use of the GEP, insert
1203 // two loads of the appropriate indexed GEP and select between them.
1204 Value
*IsOne
= new ICmpInst(GEPI
, ICmpInst::ICMP_NE
, I
.getOperand(),
1205 Context
.getNullValue(I
.getOperand()->getType()),
1207 // Insert the new GEP instructions, which are properly indexed.
1208 SmallVector
<Value
*, 8> Indices(GEPI
->op_begin()+1, GEPI
->op_end());
1209 Indices
[1] = Context
.getNullValue(Type::Int32Ty
);
1210 Value
*ZeroIdx
= GetElementPtrInst::Create(GEPI
->getOperand(0),
1213 GEPI
->getName()+".0", GEPI
);
1214 Indices
[1] = ConstantInt::get(Type::Int32Ty
, 1);
1215 Value
*OneIdx
= GetElementPtrInst::Create(GEPI
->getOperand(0),
1218 GEPI
->getName()+".1", GEPI
);
1219 // Replace all loads of the variable index GEP with loads from both
1220 // indexes and a select.
1221 while (!GEPI
->use_empty()) {
1222 LoadInst
*LI
= cast
<LoadInst
>(GEPI
->use_back());
1223 Value
*Zero
= new LoadInst(ZeroIdx
, LI
->getName()+".0", LI
);
1224 Value
*One
= new LoadInst(OneIdx
, LI
->getName()+".1", LI
);
1225 Value
*R
= SelectInst::Create(IsOne
, One
, Zero
, LI
->getName(), LI
);
1226 LI
->replaceAllUsesWith(R
);
1227 LI
->eraseFromParent();
1229 GEPI
->eraseFromParent();
1233 /// CleanupAllocaUsers - If SROA reported that it can promote the specified
1234 /// allocation, but only if cleaned up, perform the cleanups required.
1235 void SROA::CleanupAllocaUsers(AllocationInst
*AI
) {
1236 // At this point, we know that the end result will be SROA'd and promoted, so
1237 // we can insert ugly code if required so long as sroa+mem2reg will clean it
1239 for (Value::use_iterator UI
= AI
->use_begin(), E
= AI
->use_end();
1242 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(U
))
1245 Instruction
*I
= cast
<Instruction
>(U
);
1246 SmallVector
<DbgInfoIntrinsic
*, 2> DbgInUses
;
1247 if (!isa
<StoreInst
>(I
) && OnlyUsedByDbgInfoIntrinsics(I
, &DbgInUses
)) {
1248 // Safe to remove debug info uses.
1249 while (!DbgInUses
.empty()) {
1250 DbgInfoIntrinsic
*DI
= DbgInUses
.back(); DbgInUses
.pop_back();
1251 DI
->eraseFromParent();
1253 I
->eraseFromParent();
1259 /// MergeInType - Add the 'In' type to the accumulated type (Accum) so far at
1260 /// the offset specified by Offset (which is specified in bytes).
1262 /// There are two cases we handle here:
1263 /// 1) A union of vector types of the same size and potentially its elements.
1264 /// Here we turn element accesses into insert/extract element operations.
1265 /// This promotes a <4 x float> with a store of float to the third element
1266 /// into a <4 x float> that uses insert element.
1267 /// 2) A fully general blob of memory, which we turn into some (potentially
1268 /// large) integer type with extract and insert operations where the loads
1269 /// and stores would mutate the memory.
1270 static void MergeInType(const Type
*In
, uint64_t Offset
, const Type
*&VecTy
,
1271 unsigned AllocaSize
, const TargetData
&TD
,
1272 LLVMContext
&Context
) {
1273 // If this could be contributing to a vector, analyze it.
1274 if (VecTy
!= Type::VoidTy
) { // either null or a vector type.
1276 // If the In type is a vector that is the same size as the alloca, see if it
1277 // matches the existing VecTy.
1278 if (const VectorType
*VInTy
= dyn_cast
<VectorType
>(In
)) {
1279 if (VInTy
->getBitWidth()/8 == AllocaSize
&& Offset
== 0) {
1280 // If we're storing/loading a vector of the right size, allow it as a
1281 // vector. If this the first vector we see, remember the type so that
1282 // we know the element size.
1287 } else if (In
== Type::FloatTy
|| In
== Type::DoubleTy
||
1288 (isa
<IntegerType
>(In
) && In
->getPrimitiveSizeInBits() >= 8 &&
1289 isPowerOf2_32(In
->getPrimitiveSizeInBits()))) {
1290 // If we're accessing something that could be an element of a vector, see
1291 // if the implied vector agrees with what we already have and if Offset is
1292 // compatible with it.
1293 unsigned EltSize
= In
->getPrimitiveSizeInBits()/8;
1294 if (Offset
% EltSize
== 0 &&
1295 AllocaSize
% EltSize
== 0 &&
1297 cast
<VectorType
>(VecTy
)->getElementType()
1298 ->getPrimitiveSizeInBits()/8 == EltSize
)) {
1300 VecTy
= In
->getContext().getVectorType(In
, AllocaSize
/EltSize
);
1306 // Otherwise, we have a case that we can't handle with an optimized vector
1307 // form. We can still turn this into a large integer.
1308 VecTy
= Type::VoidTy
;
1311 /// CanConvertToScalar - V is a pointer. If we can convert the pointee and all
1312 /// its accesses to use a to single vector type, return true, and set VecTy to
1313 /// the new type. If we could convert the alloca into a single promotable
1314 /// integer, return true but set VecTy to VoidTy. Further, if the use is not a
1315 /// completely trivial use that mem2reg could promote, set IsNotTrivial. Offset
1316 /// is the current offset from the base of the alloca being analyzed.
1318 /// If we see at least one access to the value that is as a vector type, set the
1321 bool SROA::CanConvertToScalar(Value
*V
, bool &IsNotTrivial
, const Type
*&VecTy
,
1322 bool &SawVec
, uint64_t Offset
,
1323 unsigned AllocaSize
) {
1324 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!=E
; ++UI
) {
1325 Instruction
*User
= cast
<Instruction
>(*UI
);
1327 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
1328 // Don't break volatile loads.
1329 if (LI
->isVolatile())
1331 MergeInType(LI
->getType(), Offset
, VecTy
,
1332 AllocaSize
, *TD
, V
->getContext());
1333 SawVec
|= isa
<VectorType
>(LI
->getType());
1337 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
1338 // Storing the pointer, not into the value?
1339 if (SI
->getOperand(0) == V
|| SI
->isVolatile()) return 0;
1340 MergeInType(SI
->getOperand(0)->getType(), Offset
,
1341 VecTy
, AllocaSize
, *TD
, V
->getContext());
1342 SawVec
|= isa
<VectorType
>(SI
->getOperand(0)->getType());
1346 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(User
)) {
1347 if (!CanConvertToScalar(BCI
, IsNotTrivial
, VecTy
, SawVec
, Offset
,
1350 IsNotTrivial
= true;
1354 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(User
)) {
1355 // If this is a GEP with a variable indices, we can't handle it.
1356 if (!GEP
->hasAllConstantIndices())
1359 // Compute the offset that this GEP adds to the pointer.
1360 SmallVector
<Value
*, 8> Indices(GEP
->op_begin()+1, GEP
->op_end());
1361 uint64_t GEPOffset
= TD
->getIndexedOffset(GEP
->getOperand(0)->getType(),
1362 &Indices
[0], Indices
.size());
1363 // See if all uses can be converted.
1364 if (!CanConvertToScalar(GEP
, IsNotTrivial
, VecTy
, SawVec
,Offset
+GEPOffset
,
1367 IsNotTrivial
= true;
1371 // If this is a constant sized memset of a constant value (e.g. 0) we can
1373 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(User
)) {
1374 // Store of constant value and constant size.
1375 if (isa
<ConstantInt
>(MSI
->getValue()) &&
1376 isa
<ConstantInt
>(MSI
->getLength())) {
1377 IsNotTrivial
= true;
1382 // If this is a memcpy or memmove into or out of the whole allocation, we
1383 // can handle it like a load or store of the scalar type.
1384 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(User
)) {
1385 if (ConstantInt
*Len
= dyn_cast
<ConstantInt
>(MTI
->getLength()))
1386 if (Len
->getZExtValue() == AllocaSize
&& Offset
== 0) {
1387 IsNotTrivial
= true;
1392 // Ignore dbg intrinsic.
1393 if (isa
<DbgInfoIntrinsic
>(User
))
1396 // Otherwise, we cannot handle this!
1404 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
1405 /// directly. This happens when we are converting an "integer union" to a
1406 /// single integer scalar, or when we are converting a "vector union" to a
1407 /// vector with insert/extractelement instructions.
1409 /// Offset is an offset from the original alloca, in bits that need to be
1410 /// shifted to the right. By the end of this, there should be no uses of Ptr.
1411 void SROA::ConvertUsesToScalar(Value
*Ptr
, AllocaInst
*NewAI
, uint64_t Offset
) {
1412 while (!Ptr
->use_empty()) {
1413 Instruction
*User
= cast
<Instruction
>(Ptr
->use_back());
1415 if (BitCastInst
*CI
= dyn_cast
<BitCastInst
>(User
)) {
1416 ConvertUsesToScalar(CI
, NewAI
, Offset
);
1417 CI
->eraseFromParent();
1421 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(User
)) {
1422 // Compute the offset that this GEP adds to the pointer.
1423 SmallVector
<Value
*, 8> Indices(GEP
->op_begin()+1, GEP
->op_end());
1424 uint64_t GEPOffset
= TD
->getIndexedOffset(GEP
->getOperand(0)->getType(),
1425 &Indices
[0], Indices
.size());
1426 ConvertUsesToScalar(GEP
, NewAI
, Offset
+GEPOffset
*8);
1427 GEP
->eraseFromParent();
1431 IRBuilder
<> Builder(User
->getParent(), User
);
1433 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
1434 // The load is a bit extract from NewAI shifted right by Offset bits.
1435 Value
*LoadedVal
= Builder
.CreateLoad(NewAI
, "tmp");
1437 = ConvertScalar_ExtractValue(LoadedVal
, LI
->getType(), Offset
, Builder
);
1438 LI
->replaceAllUsesWith(NewLoadVal
);
1439 LI
->eraseFromParent();
1443 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(User
)) {
1444 assert(SI
->getOperand(0) != Ptr
&& "Consistency error!");
1445 // FIXME: Remove once builder has Twine API.
1446 Value
*Old
= Builder
.CreateLoad(NewAI
, (NewAI
->getName()+".in").str().c_str());
1447 Value
*New
= ConvertScalar_InsertValue(SI
->getOperand(0), Old
, Offset
,
1449 Builder
.CreateStore(New
, NewAI
);
1450 SI
->eraseFromParent();
1454 // If this is a constant sized memset of a constant value (e.g. 0) we can
1455 // transform it into a store of the expanded constant value.
1456 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(User
)) {
1457 assert(MSI
->getRawDest() == Ptr
&& "Consistency error!");
1458 unsigned NumBytes
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
1459 if (NumBytes
!= 0) {
1460 unsigned Val
= cast
<ConstantInt
>(MSI
->getValue())->getZExtValue();
1462 // Compute the value replicated the right number of times.
1463 APInt
APVal(NumBytes
*8, Val
);
1465 // Splat the value if non-zero.
1467 for (unsigned i
= 1; i
!= NumBytes
; ++i
)
1468 APVal
|= APVal
<< 8;
1470 // FIXME: Remove once builder has Twine API.
1471 Value
*Old
= Builder
.CreateLoad(NewAI
, (NewAI
->getName()+".in").str().c_str());
1472 Value
*New
= ConvertScalar_InsertValue(
1473 ConstantInt::get(User
->getContext(), APVal
),
1474 Old
, Offset
, Builder
);
1475 Builder
.CreateStore(New
, NewAI
);
1477 MSI
->eraseFromParent();
1481 // If this is a memcpy or memmove into or out of the whole allocation, we
1482 // can handle it like a load or store of the scalar type.
1483 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(User
)) {
1484 assert(Offset
== 0 && "must be store to start of alloca");
1486 // If the source and destination are both to the same alloca, then this is
1487 // a noop copy-to-self, just delete it. Otherwise, emit a load and store
1489 AllocaInst
*OrigAI
= cast
<AllocaInst
>(Ptr
->getUnderlyingObject());
1491 if (MTI
->getSource()->getUnderlyingObject() != OrigAI
) {
1492 // Dest must be OrigAI, change this to be a load from the original
1493 // pointer (bitcasted), then a store to our new alloca.
1494 assert(MTI
->getRawDest() == Ptr
&& "Neither use is of pointer?");
1495 Value
*SrcPtr
= MTI
->getSource();
1496 SrcPtr
= Builder
.CreateBitCast(SrcPtr
, NewAI
->getType());
1498 LoadInst
*SrcVal
= Builder
.CreateLoad(SrcPtr
, "srcval");
1499 SrcVal
->setAlignment(MTI
->getAlignment());
1500 Builder
.CreateStore(SrcVal
, NewAI
);
1501 } else if (MTI
->getDest()->getUnderlyingObject() != OrigAI
) {
1502 // Src must be OrigAI, change this to be a load from NewAI then a store
1503 // through the original dest pointer (bitcasted).
1504 assert(MTI
->getRawSource() == Ptr
&& "Neither use is of pointer?");
1505 LoadInst
*SrcVal
= Builder
.CreateLoad(NewAI
, "srcval");
1507 Value
*DstPtr
= Builder
.CreateBitCast(MTI
->getDest(), NewAI
->getType());
1508 StoreInst
*NewStore
= Builder
.CreateStore(SrcVal
, DstPtr
);
1509 NewStore
->setAlignment(MTI
->getAlignment());
1511 // Noop transfer. Src == Dst
1515 MTI
->eraseFromParent();
1519 // If user is a dbg info intrinsic then it is safe to remove it.
1520 if (isa
<DbgInfoIntrinsic
>(User
)) {
1521 User
->eraseFromParent();
1525 llvm_unreachable("Unsupported operation!");
1529 /// ConvertScalar_ExtractValue - Extract a value of type ToType from an integer
1530 /// or vector value FromVal, extracting the bits from the offset specified by
1531 /// Offset. This returns the value, which is of type ToType.
1533 /// This happens when we are converting an "integer union" to a single
1534 /// integer scalar, or when we are converting a "vector union" to a vector with
1535 /// insert/extractelement instructions.
1537 /// Offset is an offset from the original alloca, in bits that need to be
1538 /// shifted to the right.
1539 Value
*SROA::ConvertScalar_ExtractValue(Value
*FromVal
, const Type
*ToType
,
1540 uint64_t Offset
, IRBuilder
<> &Builder
) {
1541 // If the load is of the whole new alloca, no conversion is needed.
1542 if (FromVal
->getType() == ToType
&& Offset
== 0)
1545 LLVMContext
&Context
= FromVal
->getContext();
1547 // If the result alloca is a vector type, this is either an element
1548 // access or a bitcast to another vector type of the same size.
1549 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(FromVal
->getType())) {
1550 if (isa
<VectorType
>(ToType
))
1551 return Builder
.CreateBitCast(FromVal
, ToType
, "tmp");
1553 // Otherwise it must be an element access.
1556 unsigned EltSize
= TD
->getTypeAllocSizeInBits(VTy
->getElementType());
1557 Elt
= Offset
/EltSize
;
1558 assert(EltSize
*Elt
== Offset
&& "Invalid modulus in validity checking");
1560 // Return the element extracted out of it.
1561 Value
*V
= Builder
.CreateExtractElement(FromVal
,
1562 ConstantInt::get(Type::Int32Ty
,Elt
),
1564 if (V
->getType() != ToType
)
1565 V
= Builder
.CreateBitCast(V
, ToType
, "tmp");
1569 // If ToType is a first class aggregate, extract out each of the pieces and
1570 // use insertvalue's to form the FCA.
1571 if (const StructType
*ST
= dyn_cast
<StructType
>(ToType
)) {
1572 const StructLayout
&Layout
= *TD
->getStructLayout(ST
);
1573 Value
*Res
= Context
.getUndef(ST
);
1574 for (unsigned i
= 0, e
= ST
->getNumElements(); i
!= e
; ++i
) {
1575 Value
*Elt
= ConvertScalar_ExtractValue(FromVal
, ST
->getElementType(i
),
1576 Offset
+Layout
.getElementOffsetInBits(i
),
1578 Res
= Builder
.CreateInsertValue(Res
, Elt
, i
, "tmp");
1583 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(ToType
)) {
1584 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(AT
->getElementType());
1585 Value
*Res
= Context
.getUndef(AT
);
1586 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
1587 Value
*Elt
= ConvertScalar_ExtractValue(FromVal
, AT
->getElementType(),
1588 Offset
+i
*EltSize
, Builder
);
1589 Res
= Builder
.CreateInsertValue(Res
, Elt
, i
, "tmp");
1594 // Otherwise, this must be a union that was converted to an integer value.
1595 const IntegerType
*NTy
= cast
<IntegerType
>(FromVal
->getType());
1597 // If this is a big-endian system and the load is narrower than the
1598 // full alloca type, we need to do a shift to get the right bits.
1600 if (TD
->isBigEndian()) {
1601 // On big-endian machines, the lowest bit is stored at the bit offset
1602 // from the pointer given by getTypeStoreSizeInBits. This matters for
1603 // integers with a bitwidth that is not a multiple of 8.
1604 ShAmt
= TD
->getTypeStoreSizeInBits(NTy
) -
1605 TD
->getTypeStoreSizeInBits(ToType
) - Offset
;
1610 // Note: we support negative bitwidths (with shl) which are not defined.
1611 // We do this to support (f.e.) loads off the end of a structure where
1612 // only some bits are used.
1613 if (ShAmt
> 0 && (unsigned)ShAmt
< NTy
->getBitWidth())
1614 FromVal
= Builder
.CreateLShr(FromVal
,
1615 ConstantInt::get(FromVal
->getType(),
1617 else if (ShAmt
< 0 && (unsigned)-ShAmt
< NTy
->getBitWidth())
1618 FromVal
= Builder
.CreateShl(FromVal
,
1619 ConstantInt::get(FromVal
->getType(),
1622 // Finally, unconditionally truncate the integer to the right width.
1623 unsigned LIBitWidth
= TD
->getTypeSizeInBits(ToType
);
1624 if (LIBitWidth
< NTy
->getBitWidth())
1626 Builder
.CreateTrunc(FromVal
, Context
.getIntegerType(LIBitWidth
), "tmp");
1627 else if (LIBitWidth
> NTy
->getBitWidth())
1629 Builder
.CreateZExt(FromVal
, Context
.getIntegerType(LIBitWidth
), "tmp");
1631 // If the result is an integer, this is a trunc or bitcast.
1632 if (isa
<IntegerType
>(ToType
)) {
1634 } else if (ToType
->isFloatingPoint() || isa
<VectorType
>(ToType
)) {
1635 // Just do a bitcast, we know the sizes match up.
1636 FromVal
= Builder
.CreateBitCast(FromVal
, ToType
, "tmp");
1638 // Otherwise must be a pointer.
1639 FromVal
= Builder
.CreateIntToPtr(FromVal
, ToType
, "tmp");
1641 assert(FromVal
->getType() == ToType
&& "Didn't convert right?");
1646 /// ConvertScalar_InsertValue - Insert the value "SV" into the existing integer
1647 /// or vector value "Old" at the offset specified by Offset.
1649 /// This happens when we are converting an "integer union" to a
1650 /// single integer scalar, or when we are converting a "vector union" to a
1651 /// vector with insert/extractelement instructions.
1653 /// Offset is an offset from the original alloca, in bits that need to be
1654 /// shifted to the right.
1655 Value
*SROA::ConvertScalar_InsertValue(Value
*SV
, Value
*Old
,
1656 uint64_t Offset
, IRBuilder
<> &Builder
) {
1658 // Convert the stored type to the actual type, shift it left to insert
1659 // then 'or' into place.
1660 const Type
*AllocaType
= Old
->getType();
1661 LLVMContext
&Context
= Old
->getContext();
1663 if (const VectorType
*VTy
= dyn_cast
<VectorType
>(AllocaType
)) {
1664 uint64_t VecSize
= TD
->getTypeAllocSizeInBits(VTy
);
1665 uint64_t ValSize
= TD
->getTypeAllocSizeInBits(SV
->getType());
1667 // Changing the whole vector with memset or with an access of a different
1669 if (ValSize
== VecSize
)
1670 return Builder
.CreateBitCast(SV
, AllocaType
, "tmp");
1672 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(VTy
->getElementType());
1674 // Must be an element insertion.
1675 unsigned Elt
= Offset
/EltSize
;
1677 if (SV
->getType() != VTy
->getElementType())
1678 SV
= Builder
.CreateBitCast(SV
, VTy
->getElementType(), "tmp");
1680 SV
= Builder
.CreateInsertElement(Old
, SV
,
1681 ConstantInt::get(Type::Int32Ty
, Elt
),
1686 // If SV is a first-class aggregate value, insert each value recursively.
1687 if (const StructType
*ST
= dyn_cast
<StructType
>(SV
->getType())) {
1688 const StructLayout
&Layout
= *TD
->getStructLayout(ST
);
1689 for (unsigned i
= 0, e
= ST
->getNumElements(); i
!= e
; ++i
) {
1690 Value
*Elt
= Builder
.CreateExtractValue(SV
, i
, "tmp");
1691 Old
= ConvertScalar_InsertValue(Elt
, Old
,
1692 Offset
+Layout
.getElementOffsetInBits(i
),
1698 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(SV
->getType())) {
1699 uint64_t EltSize
= TD
->getTypeAllocSizeInBits(AT
->getElementType());
1700 for (unsigned i
= 0, e
= AT
->getNumElements(); i
!= e
; ++i
) {
1701 Value
*Elt
= Builder
.CreateExtractValue(SV
, i
, "tmp");
1702 Old
= ConvertScalar_InsertValue(Elt
, Old
, Offset
+i
*EltSize
, Builder
);
1707 // If SV is a float, convert it to the appropriate integer type.
1708 // If it is a pointer, do the same.
1709 unsigned SrcWidth
= TD
->getTypeSizeInBits(SV
->getType());
1710 unsigned DestWidth
= TD
->getTypeSizeInBits(AllocaType
);
1711 unsigned SrcStoreWidth
= TD
->getTypeStoreSizeInBits(SV
->getType());
1712 unsigned DestStoreWidth
= TD
->getTypeStoreSizeInBits(AllocaType
);
1713 if (SV
->getType()->isFloatingPoint() || isa
<VectorType
>(SV
->getType()))
1714 SV
= Builder
.CreateBitCast(SV
, Context
.getIntegerType(SrcWidth
), "tmp");
1715 else if (isa
<PointerType
>(SV
->getType()))
1716 SV
= Builder
.CreatePtrToInt(SV
, TD
->getIntPtrType(), "tmp");
1718 // Zero extend or truncate the value if needed.
1719 if (SV
->getType() != AllocaType
) {
1720 if (SV
->getType()->getPrimitiveSizeInBits() <
1721 AllocaType
->getPrimitiveSizeInBits())
1722 SV
= Builder
.CreateZExt(SV
, AllocaType
, "tmp");
1724 // Truncation may be needed if storing more than the alloca can hold
1725 // (undefined behavior).
1726 SV
= Builder
.CreateTrunc(SV
, AllocaType
, "tmp");
1727 SrcWidth
= DestWidth
;
1728 SrcStoreWidth
= DestStoreWidth
;
1732 // If this is a big-endian system and the store is narrower than the
1733 // full alloca type, we need to do a shift to get the right bits.
1735 if (TD
->isBigEndian()) {
1736 // On big-endian machines, the lowest bit is stored at the bit offset
1737 // from the pointer given by getTypeStoreSizeInBits. This matters for
1738 // integers with a bitwidth that is not a multiple of 8.
1739 ShAmt
= DestStoreWidth
- SrcStoreWidth
- Offset
;
1744 // Note: we support negative bitwidths (with shr) which are not defined.
1745 // We do this to support (f.e.) stores off the end of a structure where
1746 // only some bits in the structure are set.
1747 APInt
Mask(APInt::getLowBitsSet(DestWidth
, SrcWidth
));
1748 if (ShAmt
> 0 && (unsigned)ShAmt
< DestWidth
) {
1749 SV
= Builder
.CreateShl(SV
, ConstantInt::get(SV
->getType(),
1752 } else if (ShAmt
< 0 && (unsigned)-ShAmt
< DestWidth
) {
1753 SV
= Builder
.CreateLShr(SV
, ConstantInt::get(SV
->getType(),
1755 Mask
= Mask
.lshr(-ShAmt
);
1758 // Mask out the bits we are about to insert from the old value, and or
1760 if (SrcWidth
!= DestWidth
) {
1761 assert(DestWidth
> SrcWidth
);
1762 Old
= Builder
.CreateAnd(Old
, ConstantInt::get(Context
, ~Mask
), "mask");
1763 SV
= Builder
.CreateOr(Old
, SV
, "ins");
1770 /// PointsToConstantGlobal - Return true if V (possibly indirectly) points to
1771 /// some part of a constant global variable. This intentionally only accepts
1772 /// constant expressions because we don't can't rewrite arbitrary instructions.
1773 static bool PointsToConstantGlobal(Value
*V
) {
1774 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
1775 return GV
->isConstant();
1776 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(V
))
1777 if (CE
->getOpcode() == Instruction::BitCast
||
1778 CE
->getOpcode() == Instruction::GetElementPtr
)
1779 return PointsToConstantGlobal(CE
->getOperand(0));
1783 /// isOnlyCopiedFromConstantGlobal - Recursively walk the uses of a (derived)
1784 /// pointer to an alloca. Ignore any reads of the pointer, return false if we
1785 /// see any stores or other unknown uses. If we see pointer arithmetic, keep
1786 /// track of whether it moves the pointer (with isOffset) but otherwise traverse
1787 /// the uses. If we see a memcpy/memmove that targets an unoffseted pointer to
1788 /// the alloca, and if the source pointer is a pointer to a constant global, we
1789 /// can optimize this.
1790 static bool isOnlyCopiedFromConstantGlobal(Value
*V
, Instruction
*&TheCopy
,
1792 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!=E
; ++UI
) {
1793 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(*UI
))
1794 // Ignore non-volatile loads, they are always ok.
1795 if (!LI
->isVolatile())
1798 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(*UI
)) {
1799 // If uses of the bitcast are ok, we are ok.
1800 if (!isOnlyCopiedFromConstantGlobal(BCI
, TheCopy
, isOffset
))
1804 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(*UI
)) {
1805 // If the GEP has all zero indices, it doesn't offset the pointer. If it
1806 // doesn't, it does.
1807 if (!isOnlyCopiedFromConstantGlobal(GEP
, TheCopy
,
1808 isOffset
|| !GEP
->hasAllZeroIndices()))
1813 // If this is isn't our memcpy/memmove, reject it as something we can't
1815 if (!isa
<MemTransferInst
>(*UI
))
1818 // If we already have seen a copy, reject the second one.
1819 if (TheCopy
) return false;
1821 // If the pointer has been offset from the start of the alloca, we can't
1822 // safely handle this.
1823 if (isOffset
) return false;
1825 // If the memintrinsic isn't using the alloca as the dest, reject it.
1826 if (UI
.getOperandNo() != 1) return false;
1828 MemIntrinsic
*MI
= cast
<MemIntrinsic
>(*UI
);
1830 // If the source of the memcpy/move is not a constant global, reject it.
1831 if (!PointsToConstantGlobal(MI
->getOperand(2)))
1834 // Otherwise, the transform is safe. Remember the copy instruction.
1840 /// isOnlyCopiedFromConstantGlobal - Return true if the specified alloca is only
1841 /// modified by a copy from a constant global. If we can prove this, we can
1842 /// replace any uses of the alloca with uses of the global directly.
1843 Instruction
*SROA::isOnlyCopiedFromConstantGlobal(AllocationInst
*AI
) {
1844 Instruction
*TheCopy
= 0;
1845 if (::isOnlyCopiedFromConstantGlobal(AI
, TheCopy
, false))