Reverting back to original 1.8 version so I can manually merge in patch.
[llvm-complete.git] / lib / Transforms / Scalar / ScalarReplAggregates.cpp
blob843c908ba4408fcfb3ce7b35543302b8f713a475
1 //===- ScalarReplAggregates.cpp - Scalar Replacement of Aggregates --------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file was developed by the LLVM research group and is distributed under
6 // the University of Illinois Open Source License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This transformation implements the well known scalar replacement of
11 // aggregates transformation. This xform breaks up alloca instructions of
12 // aggregate type (structure or array) into individual alloca instructions for
13 // each member (if possible). Then, if possible, it transforms the individual
14 // alloca instructions into nice clean scalar SSA form.
16 // This combines a simple SRoA algorithm with the Mem2Reg algorithm because
17 // often interact, especially for C++ programs. As such, iterating between
18 // SRoA, then Mem2Reg until we run out of things to promote works well.
20 //===----------------------------------------------------------------------===//
22 #include "llvm/Transforms/Scalar.h"
23 #include "llvm/Constants.h"
24 #include "llvm/DerivedTypes.h"
25 #include "llvm/Function.h"
26 #include "llvm/Pass.h"
27 #include "llvm/Instructions.h"
28 #include "llvm/Analysis/Dominators.h"
29 #include "llvm/Target/TargetData.h"
30 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/GetElementPtrTypeIterator.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/Visibility.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/ADT/StringExtras.h"
37 #include <iostream>
38 using namespace llvm;
40 namespace {
41 Statistic<> NumReplaced("scalarrepl", "Number of allocas broken up");
42 Statistic<> NumPromoted("scalarrepl", "Number of allocas promoted");
43 Statistic<> NumConverted("scalarrepl",
44 "Number of aggregates converted to scalar");
46 struct VISIBILITY_HIDDEN SROA : public FunctionPass {
47 bool runOnFunction(Function &F);
49 bool performScalarRepl(Function &F);
50 bool performPromotion(Function &F);
52 // getAnalysisUsage - This pass does not require any passes, but we know it
53 // will not alter the CFG, so say so.
54 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
55 AU.addRequired<DominatorTree>();
56 AU.addRequired<DominanceFrontier>();
57 AU.addRequired<TargetData>();
58 AU.setPreservesCFG();
61 private:
62 int isSafeElementUse(Value *Ptr);
63 int isSafeUseOfAllocation(Instruction *User);
64 int isSafeAllocaToScalarRepl(AllocationInst *AI);
65 void CanonicalizeAllocaUsers(AllocationInst *AI);
66 AllocaInst *AddNewAlloca(Function &F, const Type *Ty, AllocationInst *Base);
68 const Type *CanConvertToScalar(Value *V, bool &IsNotTrivial);
69 void ConvertToScalar(AllocationInst *AI, const Type *Ty);
70 void ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset);
73 RegisterOpt<SROA> X("scalarrepl", "Scalar Replacement of Aggregates");
76 // Public interface to the ScalarReplAggregates pass
77 FunctionPass *llvm::createScalarReplAggregatesPass() { return new SROA(); }
80 bool SROA::runOnFunction(Function &F) {
81 bool Changed = performPromotion(F);
82 while (1) {
83 bool LocalChange = performScalarRepl(F);
84 if (!LocalChange) break; // No need to repromote if no scalarrepl
85 Changed = true;
86 LocalChange = performPromotion(F);
87 if (!LocalChange) break; // No need to re-scalarrepl if no promotion
90 return Changed;
94 bool SROA::performPromotion(Function &F) {
95 std::vector<AllocaInst*> Allocas;
96 const TargetData &TD = getAnalysis<TargetData>();
97 DominatorTree &DT = getAnalysis<DominatorTree>();
98 DominanceFrontier &DF = getAnalysis<DominanceFrontier>();
100 BasicBlock &BB = F.getEntryBlock(); // Get the entry node for the function
102 bool Changed = false;
104 while (1) {
105 Allocas.clear();
107 // Find allocas that are safe to promote, by looking at all instructions in
108 // the entry node
109 for (BasicBlock::iterator I = BB.begin(), E = --BB.end(); I != E; ++I)
110 if (AllocaInst *AI = dyn_cast<AllocaInst>(I)) // Is it an alloca?
111 if (isAllocaPromotable(AI, TD))
112 Allocas.push_back(AI);
114 if (Allocas.empty()) break;
116 PromoteMemToReg(Allocas, DT, DF, TD);
117 NumPromoted += Allocas.size();
118 Changed = true;
121 return Changed;
124 // performScalarRepl - This algorithm is a simple worklist driven algorithm,
125 // which runs on all of the malloc/alloca instructions in the function, removing
126 // them if they are only used by getelementptr instructions.
128 bool SROA::performScalarRepl(Function &F) {
129 std::vector<AllocationInst*> WorkList;
131 // Scan the entry basic block, adding any alloca's and mallocs to the worklist
132 BasicBlock &BB = F.getEntryBlock();
133 for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
134 if (AllocationInst *A = dyn_cast<AllocationInst>(I))
135 WorkList.push_back(A);
137 // Process the worklist
138 bool Changed = false;
139 while (!WorkList.empty()) {
140 AllocationInst *AI = WorkList.back();
141 WorkList.pop_back();
143 // If we can turn this aggregate value (potentially with casts) into a
144 // simple scalar value that can be mem2reg'd into a register value.
145 bool IsNotTrivial = false;
146 if (const Type *ActualType = CanConvertToScalar(AI, IsNotTrivial))
147 if (IsNotTrivial && ActualType != Type::VoidTy) {
148 ConvertToScalar(AI, ActualType);
149 Changed = true;
150 continue;
153 // We cannot transform the allocation instruction if it is an array
154 // allocation (allocations OF arrays are ok though), and an allocation of a
155 // scalar value cannot be decomposed at all.
157 if (AI->isArrayAllocation() ||
158 (!isa<StructType>(AI->getAllocatedType()) &&
159 !isa<ArrayType>(AI->getAllocatedType()))) continue;
161 // Check that all of the users of the allocation are capable of being
162 // transformed.
163 switch (isSafeAllocaToScalarRepl(AI)) {
164 default: assert(0 && "Unexpected value!");
165 case 0: // Not safe to scalar replace.
166 continue;
167 case 1: // Safe, but requires cleanup/canonicalizations first
168 CanonicalizeAllocaUsers(AI);
169 case 3: // Safe to scalar replace.
170 break;
173 DEBUG(std::cerr << "Found inst to xform: " << *AI);
174 Changed = true;
176 std::vector<AllocaInst*> ElementAllocas;
177 if (const StructType *ST = dyn_cast<StructType>(AI->getAllocatedType())) {
178 ElementAllocas.reserve(ST->getNumContainedTypes());
179 for (unsigned i = 0, e = ST->getNumContainedTypes(); i != e; ++i) {
180 AllocaInst *NA = new AllocaInst(ST->getContainedType(i), 0,
181 AI->getAlignment(),
182 AI->getName() + "." + utostr(i), AI);
183 ElementAllocas.push_back(NA);
184 WorkList.push_back(NA); // Add to worklist for recursive processing
186 } else {
187 const ArrayType *AT = cast<ArrayType>(AI->getAllocatedType());
188 ElementAllocas.reserve(AT->getNumElements());
189 const Type *ElTy = AT->getElementType();
190 for (unsigned i = 0, e = AT->getNumElements(); i != e; ++i) {
191 AllocaInst *NA = new AllocaInst(ElTy, 0, AI->getAlignment(),
192 AI->getName() + "." + utostr(i), AI);
193 ElementAllocas.push_back(NA);
194 WorkList.push_back(NA); // Add to worklist for recursive processing
198 // Now that we have created the alloca instructions that we want to use,
199 // expand the getelementptr instructions to use them.
201 while (!AI->use_empty()) {
202 Instruction *User = cast<Instruction>(AI->use_back());
203 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
204 // We now know that the GEP is of the form: GEP <ptr>, 0, <cst>
205 unsigned Idx =
206 (unsigned)cast<ConstantInt>(GEPI->getOperand(2))->getRawValue();
208 assert(Idx < ElementAllocas.size() && "Index out of range?");
209 AllocaInst *AllocaToUse = ElementAllocas[Idx];
211 Value *RepValue;
212 if (GEPI->getNumOperands() == 3) {
213 // Do not insert a new getelementptr instruction with zero indices, only
214 // to have it optimized out later.
215 RepValue = AllocaToUse;
216 } else {
217 // We are indexing deeply into the structure, so we still need a
218 // getelement ptr instruction to finish the indexing. This may be
219 // expanded itself once the worklist is rerun.
221 std::string OldName = GEPI->getName(); // Steal the old name.
222 std::vector<Value*> NewArgs;
223 NewArgs.push_back(Constant::getNullValue(Type::IntTy));
224 NewArgs.insert(NewArgs.end(), GEPI->op_begin()+3, GEPI->op_end());
225 GEPI->setName("");
226 RepValue = new GetElementPtrInst(AllocaToUse, NewArgs, OldName, GEPI);
229 // Move all of the users over to the new GEP.
230 GEPI->replaceAllUsesWith(RepValue);
231 // Delete the old GEP
232 GEPI->eraseFromParent();
235 // Finally, delete the Alloca instruction
236 AI->getParent()->getInstList().erase(AI);
237 NumReplaced++;
240 return Changed;
244 /// isSafeElementUse - Check to see if this use is an allowed use for a
245 /// getelementptr instruction of an array aggregate allocation.
247 int SROA::isSafeElementUse(Value *Ptr) {
248 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
249 I != E; ++I) {
250 Instruction *User = cast<Instruction>(*I);
251 switch (User->getOpcode()) {
252 case Instruction::Load: break;
253 case Instruction::Store:
254 // Store is ok if storing INTO the pointer, not storing the pointer
255 if (User->getOperand(0) == Ptr) return 0;
256 break;
257 case Instruction::GetElementPtr: {
258 GetElementPtrInst *GEP = cast<GetElementPtrInst>(User);
259 if (GEP->getNumOperands() > 1) {
260 if (!isa<Constant>(GEP->getOperand(1)) ||
261 !cast<Constant>(GEP->getOperand(1))->isNullValue())
262 return 0; // Using pointer arithmetic to navigate the array...
264 if (!isSafeElementUse(GEP)) return 0;
265 break;
267 default:
268 DEBUG(std::cerr << " Transformation preventing inst: " << *User);
269 return 0;
272 return 3; // All users look ok :)
275 /// AllUsersAreLoads - Return true if all users of this value are loads.
276 static bool AllUsersAreLoads(Value *Ptr) {
277 for (Value::use_iterator I = Ptr->use_begin(), E = Ptr->use_end();
278 I != E; ++I)
279 if (cast<Instruction>(*I)->getOpcode() != Instruction::Load)
280 return false;
281 return true;
284 /// isSafeUseOfAllocation - Check to see if this user is an allowed use for an
285 /// aggregate allocation.
287 int SROA::isSafeUseOfAllocation(Instruction *User) {
288 if (!isa<GetElementPtrInst>(User)) return 0;
290 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(User);
291 gep_type_iterator I = gep_type_begin(GEPI), E = gep_type_end(GEPI);
293 // The GEP is not safe to transform if not of the form "GEP <ptr>, 0, <cst>".
294 if (I == E ||
295 I.getOperand() != Constant::getNullValue(I.getOperand()->getType()))
296 return 0;
298 ++I;
299 if (I == E) return 0; // ran out of GEP indices??
301 // If this is a use of an array allocation, do a bit more checking for sanity.
302 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
303 uint64_t NumElements = AT->getNumElements();
305 if (ConstantInt *CI = dyn_cast<ConstantInt>(I.getOperand())) {
306 // Check to make sure that index falls within the array. If not,
307 // something funny is going on, so we won't do the optimization.
309 if (cast<ConstantInt>(GEPI->getOperand(2))->getRawValue() >= NumElements)
310 return 0;
312 // We cannot scalar repl this level of the array unless any array
313 // sub-indices are in-range constants. In particular, consider:
314 // A[0][i]. We cannot know that the user isn't doing invalid things like
315 // allowing i to index an out-of-range subscript that accesses A[1].
317 // Scalar replacing *just* the outer index of the array is probably not
318 // going to be a win anyway, so just give up.
319 for (++I; I != E && isa<ArrayType>(*I); ++I) {
320 const ArrayType *SubArrayTy = cast<ArrayType>(*I);
321 uint64_t NumElements = SubArrayTy->getNumElements();
322 if (!isa<ConstantInt>(I.getOperand())) return 0;
323 if (cast<ConstantInt>(I.getOperand())->getRawValue() >= NumElements)
324 return 0;
327 } else {
328 // If this is an array index and the index is not constant, we cannot
329 // promote... that is unless the array has exactly one or two elements in
330 // it, in which case we CAN promote it, but we have to canonicalize this
331 // out if this is the only problem.
332 if ((NumElements == 1 || NumElements == 2) &&
333 AllUsersAreLoads(GEPI))
334 return 1; // Canonicalization required!
335 return 0;
339 // If there are any non-simple uses of this getelementptr, make sure to reject
340 // them.
341 return isSafeElementUse(GEPI);
344 /// isSafeStructAllocaToScalarRepl - Check to see if the specified allocation of
345 /// an aggregate can be broken down into elements. Return 0 if not, 3 if safe,
346 /// or 1 if safe after canonicalization has been performed.
348 int SROA::isSafeAllocaToScalarRepl(AllocationInst *AI) {
349 // Loop over the use list of the alloca. We can only transform it if all of
350 // the users are safe to transform.
352 int isSafe = 3;
353 for (Value::use_iterator I = AI->use_begin(), E = AI->use_end();
354 I != E; ++I) {
355 isSafe &= isSafeUseOfAllocation(cast<Instruction>(*I));
356 if (isSafe == 0) {
357 DEBUG(std::cerr << "Cannot transform: " << *AI << " due to user: "
358 << **I);
359 return 0;
362 // If we require cleanup, isSafe is now 1, otherwise it is 3.
363 return isSafe;
366 /// CanonicalizeAllocaUsers - If SROA reported that it can promote the specified
367 /// allocation, but only if cleaned up, perform the cleanups required.
368 void SROA::CanonicalizeAllocaUsers(AllocationInst *AI) {
369 // At this point, we know that the end result will be SROA'd and promoted, so
370 // we can insert ugly code if required so long as sroa+mem2reg will clean it
371 // up.
372 for (Value::use_iterator UI = AI->use_begin(), E = AI->use_end();
373 UI != E; ) {
374 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(*UI++);
375 gep_type_iterator I = gep_type_begin(GEPI);
376 ++I;
378 if (const ArrayType *AT = dyn_cast<ArrayType>(*I)) {
379 uint64_t NumElements = AT->getNumElements();
381 if (!isa<ConstantInt>(I.getOperand())) {
382 if (NumElements == 1) {
383 GEPI->setOperand(2, Constant::getNullValue(Type::IntTy));
384 } else {
385 assert(NumElements == 2 && "Unhandled case!");
386 // All users of the GEP must be loads. At each use of the GEP, insert
387 // two loads of the appropriate indexed GEP and select between them.
388 Value *IsOne = BinaryOperator::createSetNE(I.getOperand(),
389 Constant::getNullValue(I.getOperand()->getType()),
390 "isone", GEPI);
391 // Insert the new GEP instructions, which are properly indexed.
392 std::vector<Value*> Indices(GEPI->op_begin()+1, GEPI->op_end());
393 Indices[1] = Constant::getNullValue(Type::IntTy);
394 Value *ZeroIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
395 GEPI->getName()+".0", GEPI);
396 Indices[1] = ConstantInt::get(Type::IntTy, 1);
397 Value *OneIdx = new GetElementPtrInst(GEPI->getOperand(0), Indices,
398 GEPI->getName()+".1", GEPI);
399 // Replace all loads of the variable index GEP with loads from both
400 // indexes and a select.
401 while (!GEPI->use_empty()) {
402 LoadInst *LI = cast<LoadInst>(GEPI->use_back());
403 Value *Zero = new LoadInst(ZeroIdx, LI->getName()+".0", LI);
404 Value *One = new LoadInst(OneIdx , LI->getName()+".1", LI);
405 Value *R = new SelectInst(IsOne, One, Zero, LI->getName(), LI);
406 LI->replaceAllUsesWith(R);
407 LI->eraseFromParent();
409 GEPI->eraseFromParent();
416 /// MergeInType - Add the 'In' type to the accumulated type so far. If the
417 /// types are incompatible, return true, otherwise update Accum and return
418 /// false.
420 /// There are two cases we handle here:
421 /// 1) An effectively integer union, where the pieces are stored into as
422 /// smaller integers (common with byte swap and other idioms).
423 /// 2) A union of a vector and its elements. Here we turn element accesses
424 /// into insert/extract element operations.
425 static bool MergeInType(const Type *In, const Type *&Accum) {
426 // If this is our first type, just use it.
427 const PackedType *PTy;
428 if (Accum == Type::VoidTy || In == Accum) {
429 Accum = In;
430 } else if (In->isIntegral() && Accum->isIntegral()) { // integer union.
431 // Otherwise pick whichever type is larger.
432 if (In->getTypeID() > Accum->getTypeID())
433 Accum = In;
434 } else if ((PTy = dyn_cast<PackedType>(Accum)) &&
435 PTy->getElementType() == In) {
436 // Accum is a vector, and we are accessing an element: ok.
437 } else if ((PTy = dyn_cast<PackedType>(In)) &&
438 PTy->getElementType() == Accum) {
439 // In is a vector, and accum is an element: ok, remember In.
440 Accum = In;
441 } else {
442 return true;
444 return false;
447 /// getUIntAtLeastAsBitAs - Return an unsigned integer type that is at least
448 /// as big as the specified type. If there is no suitable type, this returns
449 /// null.
450 const Type *getUIntAtLeastAsBitAs(unsigned NumBits) {
451 if (NumBits > 64) return 0;
452 if (NumBits > 32) return Type::ULongTy;
453 if (NumBits > 16) return Type::UIntTy;
454 if (NumBits > 8) return Type::UShortTy;
455 return Type::UByteTy;
458 /// CanConvertToScalar - V is a pointer. If we can convert the pointee to a
459 /// single scalar integer type, return that type. Further, if the use is not
460 /// a completely trivial use that mem2reg could promote, set IsNotTrivial. If
461 /// there are no uses of this pointer, return Type::VoidTy to differentiate from
462 /// failure.
464 const Type *SROA::CanConvertToScalar(Value *V, bool &IsNotTrivial) {
465 const Type *UsedType = Type::VoidTy; // No uses, no forced type.
466 const TargetData &TD = getAnalysis<TargetData>();
467 const PointerType *PTy = cast<PointerType>(V->getType());
469 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI!=E; ++UI) {
470 Instruction *User = cast<Instruction>(*UI);
472 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
473 if (MergeInType(LI->getType(), UsedType))
474 return 0;
476 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
477 // Storing the pointer, not the into the value?
478 if (SI->getOperand(0) == V) return 0;
480 // NOTE: We could handle storing of FP imms into integers here!
482 if (MergeInType(SI->getOperand(0)->getType(), UsedType))
483 return 0;
484 } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
485 if (!isa<PointerType>(CI->getType())) return 0;
486 IsNotTrivial = true;
487 const Type *SubTy = CanConvertToScalar(CI, IsNotTrivial);
488 if (!SubTy || MergeInType(SubTy, UsedType)) return 0;
489 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
490 // Check to see if this is stepping over an element: GEP Ptr, int C
491 if (GEP->getNumOperands() == 2 && isa<ConstantInt>(GEP->getOperand(1))) {
492 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
493 unsigned ElSize = TD.getTypeSize(PTy->getElementType());
494 unsigned BitOffset = Idx*ElSize*8;
495 if (BitOffset > 64 || !isPowerOf2_32(ElSize)) return 0;
497 IsNotTrivial = true;
498 const Type *SubElt = CanConvertToScalar(GEP, IsNotTrivial);
499 if (SubElt == 0) return 0;
500 if (SubElt != Type::VoidTy && SubElt->isInteger()) {
501 const Type *NewTy =
502 getUIntAtLeastAsBitAs(SubElt->getPrimitiveSizeInBits()+BitOffset);
503 if (NewTy == 0 || MergeInType(NewTy, UsedType)) return 0;
504 continue;
506 } else if (GEP->getNumOperands() == 3 &&
507 isa<ConstantInt>(GEP->getOperand(1)) &&
508 isa<ConstantInt>(GEP->getOperand(2)) &&
509 cast<Constant>(GEP->getOperand(1))->isNullValue()) {
510 // We are stepping into an element, e.g. a structure or an array:
511 // GEP Ptr, int 0, uint C
512 const Type *AggTy = PTy->getElementType();
513 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
515 if (const ArrayType *ATy = dyn_cast<ArrayType>(AggTy)) {
516 if (Idx >= ATy->getNumElements()) return 0; // Out of range.
517 } else if (const PackedType *PackedTy = dyn_cast<PackedType>(AggTy)) {
518 // Getting an element of the packed vector.
519 if (Idx >= PackedTy->getNumElements()) return 0; // Out of range.
521 // Merge in the packed type.
522 if (MergeInType(PackedTy, UsedType)) return 0;
524 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
525 if (SubTy == 0) return 0;
527 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType))
528 return 0;
530 // We'll need to change this to an insert/extract element operation.
531 IsNotTrivial = true;
532 continue; // Everything looks ok
534 } else if (isa<StructType>(AggTy)) {
535 // Structs are always ok.
536 } else {
537 return 0;
539 const Type *NTy = getUIntAtLeastAsBitAs(TD.getTypeSize(AggTy)*8);
540 if (NTy == 0 || MergeInType(NTy, UsedType)) return 0;
541 const Type *SubTy = CanConvertToScalar(GEP, IsNotTrivial);
542 if (SubTy == 0) return 0;
543 if (SubTy != Type::VoidTy && MergeInType(SubTy, UsedType))
544 return 0;
545 continue; // Everything looks ok
547 return 0;
548 } else {
549 // Cannot handle this!
550 return 0;
554 return UsedType;
557 /// ConvertToScalar - The specified alloca passes the CanConvertToScalar
558 /// predicate and is non-trivial. Convert it to something that can be trivially
559 /// promoted into a register by mem2reg.
560 void SROA::ConvertToScalar(AllocationInst *AI, const Type *ActualTy) {
561 DEBUG(std::cerr << "CONVERT TO SCALAR: " << *AI << " TYPE = "
562 << *ActualTy << "\n");
563 ++NumConverted;
565 BasicBlock *EntryBlock = AI->getParent();
566 assert(EntryBlock == &EntryBlock->getParent()->front() &&
567 "Not in the entry block!");
568 EntryBlock->getInstList().remove(AI); // Take the alloca out of the program.
570 if (ActualTy->isInteger())
571 ActualTy = ActualTy->getUnsignedVersion();
573 // Create and insert the alloca.
574 AllocaInst *NewAI = new AllocaInst(ActualTy, 0, AI->getName(),
575 EntryBlock->begin());
576 ConvertUsesToScalar(AI, NewAI, 0);
577 delete AI;
581 /// ConvertUsesToScalar - Convert all of the users of Ptr to use the new alloca
582 /// directly. This happens when we are converting an "integer union" to a
583 /// single integer scalar, or when we are converting a "vector union" to a
584 /// vector with insert/extractelement instructions.
586 /// Offset is an offset from the original alloca, in bits that need to be
587 /// shifted to the right. By the end of this, there should be no uses of Ptr.
588 void SROA::ConvertUsesToScalar(Value *Ptr, AllocaInst *NewAI, unsigned Offset) {
589 bool isVectorInsert = isa<PackedType>(NewAI->getType()->getElementType());
590 while (!Ptr->use_empty()) {
591 Instruction *User = cast<Instruction>(Ptr->use_back());
593 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
594 // The load is a bit extract from NewAI shifted right by Offset bits.
595 Value *NV = new LoadInst(NewAI, LI->getName(), LI);
596 if (NV->getType() != LI->getType()) {
597 if (const PackedType *PTy = dyn_cast<PackedType>(NV->getType())) {
598 // Must be an element access.
599 unsigned Elt = Offset/PTy->getElementType()->getPrimitiveSizeInBits();
600 NV = new ExtractElementInst(NV, ConstantUInt::get(Type::UIntTy, Elt),
601 "tmp", LI);
602 } else {
603 assert(NV->getType()->isInteger() && "Unknown promotion!");
604 if (Offset && Offset < NV->getType()->getPrimitiveSizeInBits())
605 NV = new ShiftInst(Instruction::Shr, NV,
606 ConstantUInt::get(Type::UByteTy, Offset),
607 LI->getName(), LI);
608 NV = new CastInst(NV, LI->getType(), LI->getName(), LI);
611 LI->replaceAllUsesWith(NV);
612 LI->eraseFromParent();
613 } else if (StoreInst *SI = dyn_cast<StoreInst>(User)) {
614 assert(SI->getOperand(0) != Ptr && "Consistency error!");
616 // Convert the stored type to the actual type, shift it left to insert
617 // then 'or' into place.
618 Value *SV = SI->getOperand(0);
619 const Type *AllocaType = NewAI->getType()->getElementType();
620 if (SV->getType() != AllocaType) {
621 Value *Old = new LoadInst(NewAI, NewAI->getName()+".in", SI);
623 if (const PackedType *PTy = dyn_cast<PackedType>(AllocaType)) {
624 // Must be an element insertion.
625 unsigned Elt = Offset/PTy->getElementType()->getPrimitiveSizeInBits();
626 SV = new InsertElementInst(Old, SV,
627 ConstantUInt::get(Type::UIntTy, Elt),
628 "tmp", SI);
629 } else {
630 // If SV is signed, convert it to unsigned, so that the next cast zero
631 // extends the value.
632 if (SV->getType()->isSigned())
633 SV = new CastInst(SV, SV->getType()->getUnsignedVersion(),
634 SV->getName(), SI);
635 SV = new CastInst(SV, Old->getType(), SV->getName(), SI);
636 if (Offset && Offset < SV->getType()->getPrimitiveSizeInBits())
637 SV = new ShiftInst(Instruction::Shl, SV,
638 ConstantUInt::get(Type::UByteTy, Offset),
639 SV->getName()+".adj", SI);
640 // Mask out the bits we are about to insert from the old value.
641 unsigned TotalBits = SV->getType()->getPrimitiveSizeInBits();
642 unsigned InsertBits =
643 SI->getOperand(0)->getType()->getPrimitiveSizeInBits();
644 if (TotalBits != InsertBits) {
645 assert(TotalBits > InsertBits);
646 uint64_t Mask = ~(((1ULL << InsertBits)-1) << Offset);
647 if (TotalBits != 64)
648 Mask = Mask & ((1ULL << TotalBits)-1);
649 Old = BinaryOperator::createAnd(Old,
650 ConstantUInt::get(Old->getType(), Mask),
651 Old->getName()+".mask", SI);
652 SV = BinaryOperator::createOr(Old, SV, SV->getName()+".ins", SI);
656 new StoreInst(SV, NewAI, SI);
657 SI->eraseFromParent();
659 } else if (CastInst *CI = dyn_cast<CastInst>(User)) {
660 unsigned NewOff = Offset;
661 const TargetData &TD = getAnalysis<TargetData>();
662 if (TD.isBigEndian() && !isVectorInsert) {
663 // Adjust the pointer. For example, storing 16-bits into a 32-bit
664 // alloca with just a cast makes it modify the top 16-bits.
665 const Type *SrcTy = cast<PointerType>(Ptr->getType())->getElementType();
666 const Type *DstTy = cast<PointerType>(CI->getType())->getElementType();
667 int PtrDiffBits = TD.getTypeSize(SrcTy)*8-TD.getTypeSize(DstTy)*8;
668 NewOff += PtrDiffBits;
670 ConvertUsesToScalar(CI, NewAI, NewOff);
671 CI->eraseFromParent();
672 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
673 const PointerType *AggPtrTy =
674 cast<PointerType>(GEP->getOperand(0)->getType());
675 const TargetData &TD = getAnalysis<TargetData>();
676 unsigned AggSizeInBits = TD.getTypeSize(AggPtrTy->getElementType())*8;
678 // Check to see if this is stepping over an element: GEP Ptr, int C
679 unsigned NewOffset = Offset;
680 if (GEP->getNumOperands() == 2) {
681 unsigned Idx = cast<ConstantInt>(GEP->getOperand(1))->getRawValue();
682 unsigned BitOffset = Idx*AggSizeInBits;
684 if (TD.isLittleEndian() || isVectorInsert)
685 NewOffset += BitOffset;
686 else
687 NewOffset -= BitOffset;
689 } else if (GEP->getNumOperands() == 3) {
690 // We know that operand #2 is zero.
691 unsigned Idx = cast<ConstantInt>(GEP->getOperand(2))->getRawValue();
692 const Type *AggTy = AggPtrTy->getElementType();
693 if (const SequentialType *SeqTy = dyn_cast<SequentialType>(AggTy)) {
694 unsigned ElSizeBits = TD.getTypeSize(SeqTy->getElementType())*8;
696 if (TD.isLittleEndian() || isVectorInsert)
697 NewOffset += ElSizeBits*Idx;
698 else
699 NewOffset += AggSizeInBits-ElSizeBits*(Idx+1);
700 } else if (const StructType *STy = dyn_cast<StructType>(AggTy)) {
701 unsigned EltBitOffset = TD.getStructLayout(STy)->MemberOffsets[Idx]*8;
703 if (TD.isLittleEndian() || isVectorInsert)
704 NewOffset += EltBitOffset;
705 else {
706 const PointerType *ElPtrTy = cast<PointerType>(GEP->getType());
707 unsigned ElSizeBits = TD.getTypeSize(ElPtrTy->getElementType())*8;
708 NewOffset += AggSizeInBits-(EltBitOffset+ElSizeBits);
711 } else {
712 assert(0 && "Unsupported operation!");
713 abort();
715 } else {
716 assert(0 && "Unsupported operation!");
717 abort();
719 ConvertUsesToScalar(GEP, NewAI, NewOffset);
720 GEP->eraseFromParent();
721 } else {
722 assert(0 && "Unsupported operation!");
723 abort();