1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass transforms simple global variables that never have their address
11 // taken. If obviously true, it marks read/write globals as constant, deletes
12 // variables only stored to, etc.
14 //===----------------------------------------------------------------------===//
16 #define DEBUG_TYPE "globalopt"
17 #include "llvm/Transforms/IPO.h"
18 #include "llvm/CallingConv.h"
19 #include "llvm/Constants.h"
20 #include "llvm/DerivedTypes.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/IntrinsicInst.h"
23 #include "llvm/Module.h"
24 #include "llvm/Operator.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/MemoryBuiltins.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Support/CallSite.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/GetElementPtrTypeIterator.h"
33 #include "llvm/Support/MathExtras.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/ADT/DenseMap.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/STLExtras.h"
43 STATISTIC(NumMarked
, "Number of globals marked constant");
44 STATISTIC(NumUnnamed
, "Number of globals marked unnamed_addr");
45 STATISTIC(NumSRA
, "Number of aggregate globals broken into scalars");
46 STATISTIC(NumHeapSRA
, "Number of heap objects SRA'd");
47 STATISTIC(NumSubstitute
,"Number of globals with initializers stored into them");
48 STATISTIC(NumDeleted
, "Number of globals deleted");
49 STATISTIC(NumFnDeleted
, "Number of functions deleted");
50 STATISTIC(NumGlobUses
, "Number of global uses devirtualized");
51 STATISTIC(NumLocalized
, "Number of globals localized");
52 STATISTIC(NumShrunkToBool
, "Number of global vars shrunk to booleans");
53 STATISTIC(NumFastCallFns
, "Number of functions converted to fastcc");
54 STATISTIC(NumCtorsEvaluated
, "Number of static ctors evaluated");
55 STATISTIC(NumNestRemoved
, "Number of nest attributes removed");
56 STATISTIC(NumAliasesResolved
, "Number of global aliases resolved");
57 STATISTIC(NumAliasesRemoved
, "Number of global aliases eliminated");
58 STATISTIC(NumCXXDtorsRemoved
, "Number of global C++ destructors removed");
62 struct GlobalOpt
: public ModulePass
{
63 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
65 static char ID
; // Pass identification, replacement for typeid
66 GlobalOpt() : ModulePass(ID
) {
67 initializeGlobalOptPass(*PassRegistry::getPassRegistry());
70 bool runOnModule(Module
&M
);
73 GlobalVariable
*FindGlobalCtors(Module
&M
);
74 bool OptimizeFunctions(Module
&M
);
75 bool OptimizeGlobalVars(Module
&M
);
76 bool OptimizeGlobalAliases(Module
&M
);
77 bool OptimizeGlobalCtorsList(GlobalVariable
*&GCL
);
78 bool ProcessGlobal(GlobalVariable
*GV
,Module::global_iterator
&GVI
);
79 bool ProcessInternalGlobal(GlobalVariable
*GV
,Module::global_iterator
&GVI
,
80 const SmallPtrSet
<const PHINode
*, 16> &PHIUsers
,
81 const GlobalStatus
&GS
);
82 bool OptimizeEmptyGlobalCXXDtors(Function
*CXAAtExitFn
);
86 char GlobalOpt::ID
= 0;
87 INITIALIZE_PASS(GlobalOpt
, "globalopt",
88 "Global Variable Optimizer", false, false)
90 ModulePass
*llvm::createGlobalOptimizerPass() { return new GlobalOpt(); }
94 /// GlobalStatus - As we analyze each global, keep track of some information
95 /// about it. If we find out that the address of the global is taken, none of
96 /// this info will be accurate.
98 /// isCompared - True if the global's address is used in a comparison.
101 /// isLoaded - True if the global is ever loaded. If the global isn't ever
102 /// loaded it can be deleted.
105 /// StoredType - Keep track of what stores to the global look like.
108 /// NotStored - There is no store to this global. It can thus be marked
112 /// isInitializerStored - This global is stored to, but the only thing
113 /// stored is the constant it was initialized with. This is only tracked
114 /// for scalar globals.
117 /// isStoredOnce - This global is stored to, but only its initializer and
118 /// one other value is ever stored to it. If this global isStoredOnce, we
119 /// track the value stored to it in StoredOnceValue below. This is only
120 /// tracked for scalar globals.
123 /// isStored - This global is stored to by multiple values or something else
124 /// that we cannot track.
128 /// StoredOnceValue - If only one value (besides the initializer constant) is
129 /// ever stored to this global, keep track of what value it is.
130 Value
*StoredOnceValue
;
132 /// AccessingFunction/HasMultipleAccessingFunctions - These start out
133 /// null/false. When the first accessing function is noticed, it is recorded.
134 /// When a second different accessing function is noticed,
135 /// HasMultipleAccessingFunctions is set to true.
136 const Function
*AccessingFunction
;
137 bool HasMultipleAccessingFunctions
;
139 /// HasNonInstructionUser - Set to true if this global has a user that is not
140 /// an instruction (e.g. a constant expr or GV initializer).
141 bool HasNonInstructionUser
;
143 /// HasPHIUser - Set to true if this global has a user that is a PHI node.
146 GlobalStatus() : isCompared(false), isLoaded(false), StoredType(NotStored
),
147 StoredOnceValue(0), AccessingFunction(0),
148 HasMultipleAccessingFunctions(false), HasNonInstructionUser(false),
154 // SafeToDestroyConstant - It is safe to destroy a constant iff it is only used
155 // by constants itself. Note that constants cannot be cyclic, so this test is
156 // pretty easy to implement recursively.
158 static bool SafeToDestroyConstant(const Constant
*C
) {
159 if (isa
<GlobalValue
>(C
)) return false;
161 for (Value::const_use_iterator UI
= C
->use_begin(), E
= C
->use_end(); UI
!= E
;
163 if (const Constant
*CU
= dyn_cast
<Constant
>(*UI
)) {
164 if (!SafeToDestroyConstant(CU
)) return false;
171 /// AnalyzeGlobal - Look at all uses of the global and fill in the GlobalStatus
172 /// structure. If the global has its address taken, return true to indicate we
173 /// can't do anything with it.
175 static bool AnalyzeGlobal(const Value
*V
, GlobalStatus
&GS
,
176 SmallPtrSet
<const PHINode
*, 16> &PHIUsers
) {
177 for (Value::const_use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
;
180 if (const ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(U
)) {
181 GS
.HasNonInstructionUser
= true;
183 // If the result of the constantexpr isn't pointer type, then we won't
184 // know to expect it in various places. Just reject early.
185 if (!isa
<PointerType
>(CE
->getType())) return true;
187 if (AnalyzeGlobal(CE
, GS
, PHIUsers
)) return true;
188 } else if (const Instruction
*I
= dyn_cast
<Instruction
>(U
)) {
189 if (!GS
.HasMultipleAccessingFunctions
) {
190 const Function
*F
= I
->getParent()->getParent();
191 if (GS
.AccessingFunction
== 0)
192 GS
.AccessingFunction
= F
;
193 else if (GS
.AccessingFunction
!= F
)
194 GS
.HasMultipleAccessingFunctions
= true;
196 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
198 if (LI
->isVolatile()) return true; // Don't hack on volatile loads.
199 } else if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
200 // Don't allow a store OF the address, only stores TO the address.
201 if (SI
->getOperand(0) == V
) return true;
203 if (SI
->isVolatile()) return true; // Don't hack on volatile stores.
205 // If this is a direct store to the global (i.e., the global is a scalar
206 // value, not an aggregate), keep more specific information about
208 if (GS
.StoredType
!= GlobalStatus::isStored
) {
209 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(
210 SI
->getOperand(1))) {
211 Value
*StoredVal
= SI
->getOperand(0);
212 if (StoredVal
== GV
->getInitializer()) {
213 if (GS
.StoredType
< GlobalStatus::isInitializerStored
)
214 GS
.StoredType
= GlobalStatus::isInitializerStored
;
215 } else if (isa
<LoadInst
>(StoredVal
) &&
216 cast
<LoadInst
>(StoredVal
)->getOperand(0) == GV
) {
217 if (GS
.StoredType
< GlobalStatus::isInitializerStored
)
218 GS
.StoredType
= GlobalStatus::isInitializerStored
;
219 } else if (GS
.StoredType
< GlobalStatus::isStoredOnce
) {
220 GS
.StoredType
= GlobalStatus::isStoredOnce
;
221 GS
.StoredOnceValue
= StoredVal
;
222 } else if (GS
.StoredType
== GlobalStatus::isStoredOnce
&&
223 GS
.StoredOnceValue
== StoredVal
) {
226 GS
.StoredType
= GlobalStatus::isStored
;
229 GS
.StoredType
= GlobalStatus::isStored
;
232 } else if (isa
<GetElementPtrInst
>(I
)) {
233 if (AnalyzeGlobal(I
, GS
, PHIUsers
)) return true;
234 } else if (isa
<SelectInst
>(I
)) {
235 if (AnalyzeGlobal(I
, GS
, PHIUsers
)) return true;
236 } else if (const PHINode
*PN
= dyn_cast
<PHINode
>(I
)) {
237 // PHI nodes we can check just like select or GEP instructions, but we
238 // have to be careful about infinite recursion.
239 if (PHIUsers
.insert(PN
)) // Not already visited.
240 if (AnalyzeGlobal(I
, GS
, PHIUsers
)) return true;
241 GS
.HasPHIUser
= true;
242 } else if (isa
<CmpInst
>(I
)) {
243 GS
.isCompared
= true;
244 } else if (const MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(I
)) {
245 if (MTI
->isVolatile()) return true;
246 if (MTI
->getArgOperand(0) == V
)
247 GS
.StoredType
= GlobalStatus::isStored
;
248 if (MTI
->getArgOperand(1) == V
)
250 } else if (const MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(I
)) {
251 assert(MSI
->getArgOperand(0) == V
&& "Memset only takes one pointer!");
252 if (MSI
->isVolatile()) return true;
253 GS
.StoredType
= GlobalStatus::isStored
;
255 return true; // Any other non-load instruction might take address!
257 } else if (const Constant
*C
= dyn_cast
<Constant
>(U
)) {
258 GS
.HasNonInstructionUser
= true;
259 // We might have a dead and dangling constant hanging off of here.
260 if (!SafeToDestroyConstant(C
))
263 GS
.HasNonInstructionUser
= true;
264 // Otherwise must be some other user.
272 static Constant
*getAggregateConstantElement(Constant
*Agg
, Constant
*Idx
) {
273 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Idx
);
275 unsigned IdxV
= CI
->getZExtValue();
277 if (ConstantStruct
*CS
= dyn_cast
<ConstantStruct
>(Agg
)) {
278 if (IdxV
< CS
->getNumOperands()) return CS
->getOperand(IdxV
);
279 } else if (ConstantArray
*CA
= dyn_cast
<ConstantArray
>(Agg
)) {
280 if (IdxV
< CA
->getNumOperands()) return CA
->getOperand(IdxV
);
281 } else if (ConstantVector
*CP
= dyn_cast
<ConstantVector
>(Agg
)) {
282 if (IdxV
< CP
->getNumOperands()) return CP
->getOperand(IdxV
);
283 } else if (isa
<ConstantAggregateZero
>(Agg
)) {
284 if (const StructType
*STy
= dyn_cast
<StructType
>(Agg
->getType())) {
285 if (IdxV
< STy
->getNumElements())
286 return Constant::getNullValue(STy
->getElementType(IdxV
));
287 } else if (const SequentialType
*STy
=
288 dyn_cast
<SequentialType
>(Agg
->getType())) {
289 return Constant::getNullValue(STy
->getElementType());
291 } else if (isa
<UndefValue
>(Agg
)) {
292 if (const StructType
*STy
= dyn_cast
<StructType
>(Agg
->getType())) {
293 if (IdxV
< STy
->getNumElements())
294 return UndefValue::get(STy
->getElementType(IdxV
));
295 } else if (const SequentialType
*STy
=
296 dyn_cast
<SequentialType
>(Agg
->getType())) {
297 return UndefValue::get(STy
->getElementType());
304 /// CleanupConstantGlobalUsers - We just marked GV constant. Loop over all
305 /// users of the global, cleaning up the obvious ones. This is largely just a
306 /// quick scan over the use list to clean up the easy and obvious cruft. This
307 /// returns true if it made a change.
308 static bool CleanupConstantGlobalUsers(Value
*V
, Constant
*Init
) {
309 bool Changed
= false;
310 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
;) {
313 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(U
)) {
315 // Replace the load with the initializer.
316 LI
->replaceAllUsesWith(Init
);
317 LI
->eraseFromParent();
320 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
321 // Store must be unreachable or storing Init into the global.
322 SI
->eraseFromParent();
324 } else if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(U
)) {
325 if (CE
->getOpcode() == Instruction::GetElementPtr
) {
326 Constant
*SubInit
= 0;
328 SubInit
= ConstantFoldLoadThroughGEPConstantExpr(Init
, CE
);
329 Changed
|= CleanupConstantGlobalUsers(CE
, SubInit
);
330 } else if (CE
->getOpcode() == Instruction::BitCast
&&
331 CE
->getType()->isPointerTy()) {
332 // Pointer cast, delete any stores and memsets to the global.
333 Changed
|= CleanupConstantGlobalUsers(CE
, 0);
336 if (CE
->use_empty()) {
337 CE
->destroyConstant();
340 } else if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(U
)) {
341 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
342 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
343 // and will invalidate our notion of what Init is.
344 Constant
*SubInit
= 0;
345 if (!isa
<ConstantExpr
>(GEP
->getOperand(0))) {
347 dyn_cast_or_null
<ConstantExpr
>(ConstantFoldInstruction(GEP
));
348 if (Init
&& CE
&& CE
->getOpcode() == Instruction::GetElementPtr
)
349 SubInit
= ConstantFoldLoadThroughGEPConstantExpr(Init
, CE
);
351 Changed
|= CleanupConstantGlobalUsers(GEP
, SubInit
);
353 if (GEP
->use_empty()) {
354 GEP
->eraseFromParent();
357 } else if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(U
)) { // memset/cpy/mv
358 if (MI
->getRawDest() == V
) {
359 MI
->eraseFromParent();
363 } else if (Constant
*C
= dyn_cast
<Constant
>(U
)) {
364 // If we have a chain of dead constantexprs or other things dangling from
365 // us, and if they are all dead, nuke them without remorse.
366 if (SafeToDestroyConstant(C
)) {
367 C
->destroyConstant();
368 // This could have invalidated UI, start over from scratch.
369 CleanupConstantGlobalUsers(V
, Init
);
377 /// isSafeSROAElementUse - Return true if the specified instruction is a safe
378 /// user of a derived expression from a global that we want to SROA.
379 static bool isSafeSROAElementUse(Value
*V
) {
380 // We might have a dead and dangling constant hanging off of here.
381 if (Constant
*C
= dyn_cast
<Constant
>(V
))
382 return SafeToDestroyConstant(C
);
384 Instruction
*I
= dyn_cast
<Instruction
>(V
);
385 if (!I
) return false;
388 if (isa
<LoadInst
>(I
)) return true;
390 // Stores *to* the pointer are ok.
391 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
392 return SI
->getOperand(0) != V
;
394 // Otherwise, it must be a GEP.
395 GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(I
);
396 if (GEPI
== 0) return false;
398 if (GEPI
->getNumOperands() < 3 || !isa
<Constant
>(GEPI
->getOperand(1)) ||
399 !cast
<Constant
>(GEPI
->getOperand(1))->isNullValue())
402 for (Value::use_iterator I
= GEPI
->use_begin(), E
= GEPI
->use_end();
404 if (!isSafeSROAElementUse(*I
))
410 /// IsUserOfGlobalSafeForSRA - U is a direct user of the specified global value.
411 /// Look at it and its uses and decide whether it is safe to SROA this global.
413 static bool IsUserOfGlobalSafeForSRA(User
*U
, GlobalValue
*GV
) {
414 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
415 if (!isa
<GetElementPtrInst
>(U
) &&
416 (!isa
<ConstantExpr
>(U
) ||
417 cast
<ConstantExpr
>(U
)->getOpcode() != Instruction::GetElementPtr
))
420 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
421 // don't like < 3 operand CE's, and we don't like non-constant integer
422 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
424 if (U
->getNumOperands() < 3 || !isa
<Constant
>(U
->getOperand(1)) ||
425 !cast
<Constant
>(U
->getOperand(1))->isNullValue() ||
426 !isa
<ConstantInt
>(U
->getOperand(2)))
429 gep_type_iterator GEPI
= gep_type_begin(U
), E
= gep_type_end(U
);
430 ++GEPI
; // Skip over the pointer index.
432 // If this is a use of an array allocation, do a bit more checking for sanity.
433 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(*GEPI
)) {
434 uint64_t NumElements
= AT
->getNumElements();
435 ConstantInt
*Idx
= cast
<ConstantInt
>(U
->getOperand(2));
437 // Check to make sure that index falls within the array. If not,
438 // something funny is going on, so we won't do the optimization.
440 if (Idx
->getZExtValue() >= NumElements
)
443 // We cannot scalar repl this level of the array unless any array
444 // sub-indices are in-range constants. In particular, consider:
445 // A[0][i]. We cannot know that the user isn't doing invalid things like
446 // allowing i to index an out-of-range subscript that accesses A[1].
448 // Scalar replacing *just* the outer index of the array is probably not
449 // going to be a win anyway, so just give up.
450 for (++GEPI
; // Skip array index.
453 uint64_t NumElements
;
454 if (const ArrayType
*SubArrayTy
= dyn_cast
<ArrayType
>(*GEPI
))
455 NumElements
= SubArrayTy
->getNumElements();
456 else if (const VectorType
*SubVectorTy
= dyn_cast
<VectorType
>(*GEPI
))
457 NumElements
= SubVectorTy
->getNumElements();
459 assert((*GEPI
)->isStructTy() &&
460 "Indexed GEP type is not array, vector, or struct!");
464 ConstantInt
*IdxVal
= dyn_cast
<ConstantInt
>(GEPI
.getOperand());
465 if (!IdxVal
|| IdxVal
->getZExtValue() >= NumElements
)
470 for (Value::use_iterator I
= U
->use_begin(), E
= U
->use_end(); I
!= E
; ++I
)
471 if (!isSafeSROAElementUse(*I
))
476 /// GlobalUsersSafeToSRA - Look at all uses of the global and decide whether it
477 /// is safe for us to perform this transformation.
479 static bool GlobalUsersSafeToSRA(GlobalValue
*GV
) {
480 for (Value::use_iterator UI
= GV
->use_begin(), E
= GV
->use_end();
482 if (!IsUserOfGlobalSafeForSRA(*UI
, GV
))
489 /// SRAGlobal - Perform scalar replacement of aggregates on the specified global
490 /// variable. This opens the door for other optimizations by exposing the
491 /// behavior of the program in a more fine-grained way. We have determined that
492 /// this transformation is safe already. We return the first global variable we
493 /// insert so that the caller can reprocess it.
494 static GlobalVariable
*SRAGlobal(GlobalVariable
*GV
, const TargetData
&TD
) {
495 // Make sure this global only has simple uses that we can SRA.
496 if (!GlobalUsersSafeToSRA(GV
))
499 assert(GV
->hasLocalLinkage() && !GV
->isConstant());
500 Constant
*Init
= GV
->getInitializer();
501 const Type
*Ty
= Init
->getType();
503 std::vector
<GlobalVariable
*> NewGlobals
;
504 Module::GlobalListType
&Globals
= GV
->getParent()->getGlobalList();
506 // Get the alignment of the global, either explicit or target-specific.
507 unsigned StartAlignment
= GV
->getAlignment();
508 if (StartAlignment
== 0)
509 StartAlignment
= TD
.getABITypeAlignment(GV
->getType());
511 if (const StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
512 NewGlobals
.reserve(STy
->getNumElements());
513 const StructLayout
&Layout
= *TD
.getStructLayout(STy
);
514 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
) {
515 Constant
*In
= getAggregateConstantElement(Init
,
516 ConstantInt::get(Type::getInt32Ty(STy
->getContext()), i
));
517 assert(In
&& "Couldn't get element of initializer?");
518 GlobalVariable
*NGV
= new GlobalVariable(STy
->getElementType(i
), false,
519 GlobalVariable::InternalLinkage
,
520 In
, GV
->getName()+"."+Twine(i
),
522 GV
->getType()->getAddressSpace());
523 Globals
.insert(GV
, NGV
);
524 NewGlobals
.push_back(NGV
);
526 // Calculate the known alignment of the field. If the original aggregate
527 // had 256 byte alignment for example, something might depend on that:
528 // propagate info to each field.
529 uint64_t FieldOffset
= Layout
.getElementOffset(i
);
530 unsigned NewAlign
= (unsigned)MinAlign(StartAlignment
, FieldOffset
);
531 if (NewAlign
> TD
.getABITypeAlignment(STy
->getElementType(i
)))
532 NGV
->setAlignment(NewAlign
);
534 } else if (const SequentialType
*STy
= dyn_cast
<SequentialType
>(Ty
)) {
535 unsigned NumElements
= 0;
536 if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(STy
))
537 NumElements
= ATy
->getNumElements();
539 NumElements
= cast
<VectorType
>(STy
)->getNumElements();
541 if (NumElements
> 16 && GV
->hasNUsesOrMore(16))
542 return 0; // It's not worth it.
543 NewGlobals
.reserve(NumElements
);
545 uint64_t EltSize
= TD
.getTypeAllocSize(STy
->getElementType());
546 unsigned EltAlign
= TD
.getABITypeAlignment(STy
->getElementType());
547 for (unsigned i
= 0, e
= NumElements
; i
!= e
; ++i
) {
548 Constant
*In
= getAggregateConstantElement(Init
,
549 ConstantInt::get(Type::getInt32Ty(Init
->getContext()), i
));
550 assert(In
&& "Couldn't get element of initializer?");
552 GlobalVariable
*NGV
= new GlobalVariable(STy
->getElementType(), false,
553 GlobalVariable::InternalLinkage
,
554 In
, GV
->getName()+"."+Twine(i
),
556 GV
->getType()->getAddressSpace());
557 Globals
.insert(GV
, NGV
);
558 NewGlobals
.push_back(NGV
);
560 // Calculate the known alignment of the field. If the original aggregate
561 // had 256 byte alignment for example, something might depend on that:
562 // propagate info to each field.
563 unsigned NewAlign
= (unsigned)MinAlign(StartAlignment
, EltSize
*i
);
564 if (NewAlign
> EltAlign
)
565 NGV
->setAlignment(NewAlign
);
569 if (NewGlobals
.empty())
572 DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV
);
574 Constant
*NullInt
=Constant::getNullValue(Type::getInt32Ty(GV
->getContext()));
576 // Loop over all of the uses of the global, replacing the constantexpr geps,
577 // with smaller constantexpr geps or direct references.
578 while (!GV
->use_empty()) {
579 User
*GEP
= GV
->use_back();
580 assert(((isa
<ConstantExpr
>(GEP
) &&
581 cast
<ConstantExpr
>(GEP
)->getOpcode()==Instruction::GetElementPtr
)||
582 isa
<GetElementPtrInst
>(GEP
)) && "NonGEP CE's are not SRAable!");
584 // Ignore the 1th operand, which has to be zero or else the program is quite
585 // broken (undefined). Get the 2nd operand, which is the structure or array
587 unsigned Val
= cast
<ConstantInt
>(GEP
->getOperand(2))->getZExtValue();
588 if (Val
>= NewGlobals
.size()) Val
= 0; // Out of bound array access.
590 Value
*NewPtr
= NewGlobals
[Val
];
592 // Form a shorter GEP if needed.
593 if (GEP
->getNumOperands() > 3) {
594 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(GEP
)) {
595 SmallVector
<Constant
*, 8> Idxs
;
596 Idxs
.push_back(NullInt
);
597 for (unsigned i
= 3, e
= CE
->getNumOperands(); i
!= e
; ++i
)
598 Idxs
.push_back(CE
->getOperand(i
));
599 NewPtr
= ConstantExpr::getGetElementPtr(cast
<Constant
>(NewPtr
),
600 &Idxs
[0], Idxs
.size());
602 GetElementPtrInst
*GEPI
= cast
<GetElementPtrInst
>(GEP
);
603 SmallVector
<Value
*, 8> Idxs
;
604 Idxs
.push_back(NullInt
);
605 for (unsigned i
= 3, e
= GEPI
->getNumOperands(); i
!= e
; ++i
)
606 Idxs
.push_back(GEPI
->getOperand(i
));
607 NewPtr
= GetElementPtrInst::Create(NewPtr
, Idxs
.begin(), Idxs
.end(),
608 GEPI
->getName()+"."+Twine(Val
),GEPI
);
611 GEP
->replaceAllUsesWith(NewPtr
);
613 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(GEP
))
614 GEPI
->eraseFromParent();
616 cast
<ConstantExpr
>(GEP
)->destroyConstant();
619 // Delete the old global, now that it is dead.
623 // Loop over the new globals array deleting any globals that are obviously
624 // dead. This can arise due to scalarization of a structure or an array that
625 // has elements that are dead.
626 unsigned FirstGlobal
= 0;
627 for (unsigned i
= 0, e
= NewGlobals
.size(); i
!= e
; ++i
)
628 if (NewGlobals
[i
]->use_empty()) {
629 Globals
.erase(NewGlobals
[i
]);
630 if (FirstGlobal
== i
) ++FirstGlobal
;
633 return FirstGlobal
!= NewGlobals
.size() ? NewGlobals
[FirstGlobal
] : 0;
636 /// AllUsesOfValueWillTrapIfNull - Return true if all users of the specified
637 /// value will trap if the value is dynamically null. PHIs keeps track of any
638 /// phi nodes we've seen to avoid reprocessing them.
639 static bool AllUsesOfValueWillTrapIfNull(const Value
*V
,
640 SmallPtrSet
<const PHINode
*, 8> &PHIs
) {
641 for (Value::const_use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
;
645 if (isa
<LoadInst
>(U
)) {
647 } else if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
648 if (SI
->getOperand(0) == V
) {
649 //cerr << "NONTRAPPING USE: " << *U;
650 return false; // Storing the value.
652 } else if (const CallInst
*CI
= dyn_cast
<CallInst
>(U
)) {
653 if (CI
->getCalledValue() != V
) {
654 //cerr << "NONTRAPPING USE: " << *U;
655 return false; // Not calling the ptr
657 } else if (const InvokeInst
*II
= dyn_cast
<InvokeInst
>(U
)) {
658 if (II
->getCalledValue() != V
) {
659 //cerr << "NONTRAPPING USE: " << *U;
660 return false; // Not calling the ptr
662 } else if (const BitCastInst
*CI
= dyn_cast
<BitCastInst
>(U
)) {
663 if (!AllUsesOfValueWillTrapIfNull(CI
, PHIs
)) return false;
664 } else if (const GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(U
)) {
665 if (!AllUsesOfValueWillTrapIfNull(GEPI
, PHIs
)) return false;
666 } else if (const PHINode
*PN
= dyn_cast
<PHINode
>(U
)) {
667 // If we've already seen this phi node, ignore it, it has already been
669 if (PHIs
.insert(PN
) && !AllUsesOfValueWillTrapIfNull(PN
, PHIs
))
671 } else if (isa
<ICmpInst
>(U
) &&
672 isa
<ConstantPointerNull
>(UI
->getOperand(1))) {
673 // Ignore icmp X, null
675 //cerr << "NONTRAPPING USE: " << *U;
682 /// AllUsesOfLoadedValueWillTrapIfNull - Return true if all uses of any loads
683 /// from GV will trap if the loaded value is null. Note that this also permits
684 /// comparisons of the loaded value against null, as a special case.
685 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable
*GV
) {
686 for (Value::const_use_iterator UI
= GV
->use_begin(), E
= GV
->use_end();
690 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(U
)) {
691 SmallPtrSet
<const PHINode
*, 8> PHIs
;
692 if (!AllUsesOfValueWillTrapIfNull(LI
, PHIs
))
694 } else if (isa
<StoreInst
>(U
)) {
695 // Ignore stores to the global.
697 // We don't know or understand this user, bail out.
698 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
705 static bool OptimizeAwayTrappingUsesOfValue(Value
*V
, Constant
*NewV
) {
706 bool Changed
= false;
707 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
; ) {
708 Instruction
*I
= cast
<Instruction
>(*UI
++);
709 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
)) {
710 LI
->setOperand(0, NewV
);
712 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
)) {
713 if (SI
->getOperand(1) == V
) {
714 SI
->setOperand(1, NewV
);
717 } else if (isa
<CallInst
>(I
) || isa
<InvokeInst
>(I
)) {
719 if (CS
.getCalledValue() == V
) {
720 // Calling through the pointer! Turn into a direct call, but be careful
721 // that the pointer is not also being passed as an argument.
722 CS
.setCalledFunction(NewV
);
724 bool PassedAsArg
= false;
725 for (unsigned i
= 0, e
= CS
.arg_size(); i
!= e
; ++i
)
726 if (CS
.getArgument(i
) == V
) {
728 CS
.setArgument(i
, NewV
);
732 // Being passed as an argument also. Be careful to not invalidate UI!
736 } else if (CastInst
*CI
= dyn_cast
<CastInst
>(I
)) {
737 Changed
|= OptimizeAwayTrappingUsesOfValue(CI
,
738 ConstantExpr::getCast(CI
->getOpcode(),
739 NewV
, CI
->getType()));
740 if (CI
->use_empty()) {
742 CI
->eraseFromParent();
744 } else if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(I
)) {
745 // Should handle GEP here.
746 SmallVector
<Constant
*, 8> Idxs
;
747 Idxs
.reserve(GEPI
->getNumOperands()-1);
748 for (User::op_iterator i
= GEPI
->op_begin() + 1, e
= GEPI
->op_end();
750 if (Constant
*C
= dyn_cast
<Constant
>(*i
))
754 if (Idxs
.size() == GEPI
->getNumOperands()-1)
755 Changed
|= OptimizeAwayTrappingUsesOfValue(GEPI
,
756 ConstantExpr::getGetElementPtr(NewV
, &Idxs
[0],
758 if (GEPI
->use_empty()) {
760 GEPI
->eraseFromParent();
769 /// OptimizeAwayTrappingUsesOfLoads - The specified global has only one non-null
770 /// value stored into it. If there are uses of the loaded value that would trap
771 /// if the loaded value is dynamically null, then we know that they cannot be
772 /// reachable with a null optimize away the load.
773 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable
*GV
, Constant
*LV
) {
774 bool Changed
= false;
776 // Keep track of whether we are able to remove all the uses of the global
777 // other than the store that defines it.
778 bool AllNonStoreUsesGone
= true;
780 // Replace all uses of loads with uses of uses of the stored value.
781 for (Value::use_iterator GUI
= GV
->use_begin(), E
= GV
->use_end(); GUI
!= E
;){
782 User
*GlobalUser
= *GUI
++;
783 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(GlobalUser
)) {
784 Changed
|= OptimizeAwayTrappingUsesOfValue(LI
, LV
);
785 // If we were able to delete all uses of the loads
786 if (LI
->use_empty()) {
787 LI
->eraseFromParent();
790 AllNonStoreUsesGone
= false;
792 } else if (isa
<StoreInst
>(GlobalUser
)) {
793 // Ignore the store that stores "LV" to the global.
794 assert(GlobalUser
->getOperand(1) == GV
&&
795 "Must be storing *to* the global");
797 AllNonStoreUsesGone
= false;
799 // If we get here we could have other crazy uses that are transitively
801 assert((isa
<PHINode
>(GlobalUser
) || isa
<SelectInst
>(GlobalUser
) ||
802 isa
<ConstantExpr
>(GlobalUser
) || isa
<CmpInst
>(GlobalUser
)) &&
803 "Only expect load and stores!");
808 DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
);
812 // If we nuked all of the loads, then none of the stores are needed either,
813 // nor is the global.
814 if (AllNonStoreUsesGone
) {
815 DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
816 CleanupConstantGlobalUsers(GV
, 0);
817 if (GV
->use_empty()) {
818 GV
->eraseFromParent();
826 /// ConstantPropUsersOf - Walk the use list of V, constant folding all of the
827 /// instructions that are foldable.
828 static void ConstantPropUsersOf(Value
*V
) {
829 for (Value::use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
; )
830 if (Instruction
*I
= dyn_cast
<Instruction
>(*UI
++))
831 if (Constant
*NewC
= ConstantFoldInstruction(I
)) {
832 I
->replaceAllUsesWith(NewC
);
834 // Advance UI to the next non-I use to avoid invalidating it!
835 // Instructions could multiply use V.
836 while (UI
!= E
&& *UI
== I
)
838 I
->eraseFromParent();
842 /// OptimizeGlobalAddressOfMalloc - This function takes the specified global
843 /// variable, and transforms the program as if it always contained the result of
844 /// the specified malloc. Because it is always the result of the specified
845 /// malloc, there is no reason to actually DO the malloc. Instead, turn the
846 /// malloc into a global, and any loads of GV as uses of the new global.
847 static GlobalVariable
*OptimizeGlobalAddressOfMalloc(GlobalVariable
*GV
,
850 ConstantInt
*NElements
,
852 DEBUG(errs() << "PROMOTING GLOBAL: " << *GV
<< " CALL = " << *CI
<< '\n');
854 const Type
*GlobalType
;
855 if (NElements
->getZExtValue() == 1)
856 GlobalType
= AllocTy
;
858 // If we have an array allocation, the global variable is of an array.
859 GlobalType
= ArrayType::get(AllocTy
, NElements
->getZExtValue());
861 // Create the new global variable. The contents of the malloc'd memory is
862 // undefined, so initialize with an undef value.
863 GlobalVariable
*NewGV
= new GlobalVariable(*GV
->getParent(),
865 GlobalValue::InternalLinkage
,
866 UndefValue::get(GlobalType
),
867 GV
->getName()+".body",
869 GV
->isThreadLocal());
871 // If there are bitcast users of the malloc (which is typical, usually we have
872 // a malloc + bitcast) then replace them with uses of the new global. Update
873 // other users to use the global as well.
874 BitCastInst
*TheBC
= 0;
875 while (!CI
->use_empty()) {
876 Instruction
*User
= cast
<Instruction
>(CI
->use_back());
877 if (BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(User
)) {
878 if (BCI
->getType() == NewGV
->getType()) {
879 BCI
->replaceAllUsesWith(NewGV
);
880 BCI
->eraseFromParent();
882 BCI
->setOperand(0, NewGV
);
886 TheBC
= new BitCastInst(NewGV
, CI
->getType(), "newgv", CI
);
887 User
->replaceUsesOfWith(CI
, TheBC
);
891 Constant
*RepValue
= NewGV
;
892 if (NewGV
->getType() != GV
->getType()->getElementType())
893 RepValue
= ConstantExpr::getBitCast(RepValue
,
894 GV
->getType()->getElementType());
896 // If there is a comparison against null, we will insert a global bool to
897 // keep track of whether the global was initialized yet or not.
898 GlobalVariable
*InitBool
=
899 new GlobalVariable(Type::getInt1Ty(GV
->getContext()), false,
900 GlobalValue::InternalLinkage
,
901 ConstantInt::getFalse(GV
->getContext()),
902 GV
->getName()+".init", GV
->isThreadLocal());
903 bool InitBoolUsed
= false;
905 // Loop over all uses of GV, processing them in turn.
906 while (!GV
->use_empty()) {
907 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(GV
->use_back())) {
908 // The global is initialized when the store to it occurs.
909 new StoreInst(ConstantInt::getTrue(GV
->getContext()), InitBool
, SI
);
910 SI
->eraseFromParent();
914 LoadInst
*LI
= cast
<LoadInst
>(GV
->use_back());
915 while (!LI
->use_empty()) {
916 Use
&LoadUse
= LI
->use_begin().getUse();
917 if (!isa
<ICmpInst
>(LoadUse
.getUser())) {
922 ICmpInst
*ICI
= cast
<ICmpInst
>(LoadUse
.getUser());
923 // Replace the cmp X, 0 with a use of the bool value.
924 Value
*LV
= new LoadInst(InitBool
, InitBool
->getName()+".val", ICI
);
926 switch (ICI
->getPredicate()) {
927 default: llvm_unreachable("Unknown ICmp Predicate!");
928 case ICmpInst::ICMP_ULT
:
929 case ICmpInst::ICMP_SLT
: // X < null -> always false
930 LV
= ConstantInt::getFalse(GV
->getContext());
932 case ICmpInst::ICMP_ULE
:
933 case ICmpInst::ICMP_SLE
:
934 case ICmpInst::ICMP_EQ
:
935 LV
= BinaryOperator::CreateNot(LV
, "notinit", ICI
);
937 case ICmpInst::ICMP_NE
:
938 case ICmpInst::ICMP_UGE
:
939 case ICmpInst::ICMP_SGE
:
940 case ICmpInst::ICMP_UGT
:
941 case ICmpInst::ICMP_SGT
:
944 ICI
->replaceAllUsesWith(LV
);
945 ICI
->eraseFromParent();
947 LI
->eraseFromParent();
950 // If the initialization boolean was used, insert it, otherwise delete it.
952 while (!InitBool
->use_empty()) // Delete initializations
953 cast
<StoreInst
>(InitBool
->use_back())->eraseFromParent();
956 GV
->getParent()->getGlobalList().insert(GV
, InitBool
);
958 // Now the GV is dead, nuke it and the malloc..
959 GV
->eraseFromParent();
960 CI
->eraseFromParent();
962 // To further other optimizations, loop over all users of NewGV and try to
963 // constant prop them. This will promote GEP instructions with constant
964 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
965 ConstantPropUsersOf(NewGV
);
966 if (RepValue
!= NewGV
)
967 ConstantPropUsersOf(RepValue
);
972 /// ValueIsOnlyUsedLocallyOrStoredToOneGlobal - Scan the use-list of V checking
973 /// to make sure that there are no complex uses of V. We permit simple things
974 /// like dereferencing the pointer, but not storing through the address, unless
975 /// it is to the specified global.
976 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction
*V
,
977 const GlobalVariable
*GV
,
978 SmallPtrSet
<const PHINode
*, 8> &PHIs
) {
979 for (Value::const_use_iterator UI
= V
->use_begin(), E
= V
->use_end();
981 const Instruction
*Inst
= cast
<Instruction
>(*UI
);
983 if (isa
<LoadInst
>(Inst
) || isa
<CmpInst
>(Inst
)) {
984 continue; // Fine, ignore.
987 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
988 if (SI
->getOperand(0) == V
&& SI
->getOperand(1) != GV
)
989 return false; // Storing the pointer itself... bad.
990 continue; // Otherwise, storing through it, or storing into GV... fine.
993 // Must index into the array and into the struct.
994 if (isa
<GetElementPtrInst
>(Inst
) && Inst
->getNumOperands() >= 3) {
995 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst
, GV
, PHIs
))
1000 if (const PHINode
*PN
= dyn_cast
<PHINode
>(Inst
)) {
1001 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
1003 if (PHIs
.insert(PN
))
1004 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN
, GV
, PHIs
))
1009 if (const BitCastInst
*BCI
= dyn_cast
<BitCastInst
>(Inst
)) {
1010 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI
, GV
, PHIs
))
1020 /// ReplaceUsesOfMallocWithGlobal - The Alloc pointer is stored into GV
1021 /// somewhere. Transform all uses of the allocation into loads from the
1022 /// global and uses of the resultant pointer. Further, delete the store into
1023 /// GV. This assumes that these value pass the
1024 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1025 static void ReplaceUsesOfMallocWithGlobal(Instruction
*Alloc
,
1026 GlobalVariable
*GV
) {
1027 while (!Alloc
->use_empty()) {
1028 Instruction
*U
= cast
<Instruction
>(*Alloc
->use_begin());
1029 Instruction
*InsertPt
= U
;
1030 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(U
)) {
1031 // If this is the store of the allocation into the global, remove it.
1032 if (SI
->getOperand(1) == GV
) {
1033 SI
->eraseFromParent();
1036 } else if (PHINode
*PN
= dyn_cast
<PHINode
>(U
)) {
1037 // Insert the load in the corresponding predecessor, not right before the
1039 InsertPt
= PN
->getIncomingBlock(Alloc
->use_begin())->getTerminator();
1040 } else if (isa
<BitCastInst
>(U
)) {
1041 // Must be bitcast between the malloc and store to initialize the global.
1042 ReplaceUsesOfMallocWithGlobal(U
, GV
);
1043 U
->eraseFromParent();
1045 } else if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(U
)) {
1046 // If this is a "GEP bitcast" and the user is a store to the global, then
1047 // just process it as a bitcast.
1048 if (GEPI
->hasAllZeroIndices() && GEPI
->hasOneUse())
1049 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(GEPI
->use_back()))
1050 if (SI
->getOperand(1) == GV
) {
1051 // Must be bitcast GEP between the malloc and store to initialize
1053 ReplaceUsesOfMallocWithGlobal(GEPI
, GV
);
1054 GEPI
->eraseFromParent();
1059 // Insert a load from the global, and use it instead of the malloc.
1060 Value
*NL
= new LoadInst(GV
, GV
->getName()+".val", InsertPt
);
1061 U
->replaceUsesOfWith(Alloc
, NL
);
1065 /// LoadUsesSimpleEnoughForHeapSRA - Verify that all uses of V (a load, or a phi
1066 /// of a load) are simple enough to perform heap SRA on. This permits GEP's
1067 /// that index through the array and struct field, icmps of null, and PHIs.
1068 static bool LoadUsesSimpleEnoughForHeapSRA(const Value
*V
,
1069 SmallPtrSet
<const PHINode
*, 32> &LoadUsingPHIs
,
1070 SmallPtrSet
<const PHINode
*, 32> &LoadUsingPHIsPerLoad
) {
1071 // We permit two users of the load: setcc comparing against the null
1072 // pointer, and a getelementptr of a specific form.
1073 for (Value::const_use_iterator UI
= V
->use_begin(), E
= V
->use_end(); UI
!= E
;
1075 const Instruction
*User
= cast
<Instruction
>(*UI
);
1077 // Comparison against null is ok.
1078 if (const ICmpInst
*ICI
= dyn_cast
<ICmpInst
>(User
)) {
1079 if (!isa
<ConstantPointerNull
>(ICI
->getOperand(1)))
1084 // getelementptr is also ok, but only a simple form.
1085 if (const GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(User
)) {
1086 // Must index into the array and into the struct.
1087 if (GEPI
->getNumOperands() < 3)
1090 // Otherwise the GEP is ok.
1094 if (const PHINode
*PN
= dyn_cast
<PHINode
>(User
)) {
1095 if (!LoadUsingPHIsPerLoad
.insert(PN
))
1096 // This means some phi nodes are dependent on each other.
1097 // Avoid infinite looping!
1099 if (!LoadUsingPHIs
.insert(PN
))
1100 // If we have already analyzed this PHI, then it is safe.
1103 // Make sure all uses of the PHI are simple enough to transform.
1104 if (!LoadUsesSimpleEnoughForHeapSRA(PN
,
1105 LoadUsingPHIs
, LoadUsingPHIsPerLoad
))
1111 // Otherwise we don't know what this is, not ok.
1119 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA - If all users of values loaded from
1120 /// GV are simple enough to perform HeapSRA, return true.
1121 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable
*GV
,
1122 Instruction
*StoredVal
) {
1123 SmallPtrSet
<const PHINode
*, 32> LoadUsingPHIs
;
1124 SmallPtrSet
<const PHINode
*, 32> LoadUsingPHIsPerLoad
;
1125 for (Value::const_use_iterator UI
= GV
->use_begin(), E
= GV
->use_end();
1127 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(*UI
)) {
1128 if (!LoadUsesSimpleEnoughForHeapSRA(LI
, LoadUsingPHIs
,
1129 LoadUsingPHIsPerLoad
))
1131 LoadUsingPHIsPerLoad
.clear();
1134 // If we reach here, we know that all uses of the loads and transitive uses
1135 // (through PHI nodes) are simple enough to transform. However, we don't know
1136 // that all inputs the to the PHI nodes are in the same equivalence sets.
1137 // Check to verify that all operands of the PHIs are either PHIS that can be
1138 // transformed, loads from GV, or MI itself.
1139 for (SmallPtrSet
<const PHINode
*, 32>::const_iterator I
= LoadUsingPHIs
.begin()
1140 , E
= LoadUsingPHIs
.end(); I
!= E
; ++I
) {
1141 const PHINode
*PN
= *I
;
1142 for (unsigned op
= 0, e
= PN
->getNumIncomingValues(); op
!= e
; ++op
) {
1143 Value
*InVal
= PN
->getIncomingValue(op
);
1145 // PHI of the stored value itself is ok.
1146 if (InVal
== StoredVal
) continue;
1148 if (const PHINode
*InPN
= dyn_cast
<PHINode
>(InVal
)) {
1149 // One of the PHIs in our set is (optimistically) ok.
1150 if (LoadUsingPHIs
.count(InPN
))
1155 // Load from GV is ok.
1156 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(InVal
))
1157 if (LI
->getOperand(0) == GV
)
1162 // Anything else is rejected.
1170 static Value
*GetHeapSROAValue(Value
*V
, unsigned FieldNo
,
1171 DenseMap
<Value
*, std::vector
<Value
*> > &InsertedScalarizedValues
,
1172 std::vector
<std::pair
<PHINode
*, unsigned> > &PHIsToRewrite
) {
1173 std::vector
<Value
*> &FieldVals
= InsertedScalarizedValues
[V
];
1175 if (FieldNo
>= FieldVals
.size())
1176 FieldVals
.resize(FieldNo
+1);
1178 // If we already have this value, just reuse the previously scalarized
1180 if (Value
*FieldVal
= FieldVals
[FieldNo
])
1183 // Depending on what instruction this is, we have several cases.
1185 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(V
)) {
1186 // This is a scalarized version of the load from the global. Just create
1187 // a new Load of the scalarized global.
1188 Result
= new LoadInst(GetHeapSROAValue(LI
->getOperand(0), FieldNo
,
1189 InsertedScalarizedValues
,
1191 LI
->getName()+".f"+Twine(FieldNo
), LI
);
1192 } else if (PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
1193 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1195 const StructType
*ST
=
1196 cast
<StructType
>(cast
<PointerType
>(PN
->getType())->getElementType());
1199 PHINode::Create(PointerType::getUnqual(ST
->getElementType(FieldNo
)),
1200 PN
->getNumIncomingValues(),
1201 PN
->getName()+".f"+Twine(FieldNo
), PN
);
1203 PHIsToRewrite
.push_back(std::make_pair(PN
, FieldNo
));
1205 llvm_unreachable("Unknown usable value");
1209 return FieldVals
[FieldNo
] = Result
;
1212 /// RewriteHeapSROALoadUser - Given a load instruction and a value derived from
1213 /// the load, rewrite the derived value to use the HeapSRoA'd load.
1214 static void RewriteHeapSROALoadUser(Instruction
*LoadUser
,
1215 DenseMap
<Value
*, std::vector
<Value
*> > &InsertedScalarizedValues
,
1216 std::vector
<std::pair
<PHINode
*, unsigned> > &PHIsToRewrite
) {
1217 // If this is a comparison against null, handle it.
1218 if (ICmpInst
*SCI
= dyn_cast
<ICmpInst
>(LoadUser
)) {
1219 assert(isa
<ConstantPointerNull
>(SCI
->getOperand(1)));
1220 // If we have a setcc of the loaded pointer, we can use a setcc of any
1222 Value
*NPtr
= GetHeapSROAValue(SCI
->getOperand(0), 0,
1223 InsertedScalarizedValues
, PHIsToRewrite
);
1225 Value
*New
= new ICmpInst(SCI
, SCI
->getPredicate(), NPtr
,
1226 Constant::getNullValue(NPtr
->getType()),
1228 SCI
->replaceAllUsesWith(New
);
1229 SCI
->eraseFromParent();
1233 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1234 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(LoadUser
)) {
1235 assert(GEPI
->getNumOperands() >= 3 && isa
<ConstantInt
>(GEPI
->getOperand(2))
1236 && "Unexpected GEPI!");
1238 // Load the pointer for this field.
1239 unsigned FieldNo
= cast
<ConstantInt
>(GEPI
->getOperand(2))->getZExtValue();
1240 Value
*NewPtr
= GetHeapSROAValue(GEPI
->getOperand(0), FieldNo
,
1241 InsertedScalarizedValues
, PHIsToRewrite
);
1243 // Create the new GEP idx vector.
1244 SmallVector
<Value
*, 8> GEPIdx
;
1245 GEPIdx
.push_back(GEPI
->getOperand(1));
1246 GEPIdx
.append(GEPI
->op_begin()+3, GEPI
->op_end());
1248 Value
*NGEPI
= GetElementPtrInst::Create(NewPtr
,
1249 GEPIdx
.begin(), GEPIdx
.end(),
1250 GEPI
->getName(), GEPI
);
1251 GEPI
->replaceAllUsesWith(NGEPI
);
1252 GEPI
->eraseFromParent();
1256 // Recursively transform the users of PHI nodes. This will lazily create the
1257 // PHIs that are needed for individual elements. Keep track of what PHIs we
1258 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1259 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1260 // already been seen first by another load, so its uses have already been
1262 PHINode
*PN
= cast
<PHINode
>(LoadUser
);
1264 DenseMap
<Value
*, std::vector
<Value
*> >::iterator InsertPos
;
1265 tie(InsertPos
, Inserted
) =
1266 InsertedScalarizedValues
.insert(std::make_pair(PN
, std::vector
<Value
*>()));
1267 if (!Inserted
) return;
1269 // If this is the first time we've seen this PHI, recursively process all
1271 for (Value::use_iterator UI
= PN
->use_begin(), E
= PN
->use_end(); UI
!= E
; ) {
1272 Instruction
*User
= cast
<Instruction
>(*UI
++);
1273 RewriteHeapSROALoadUser(User
, InsertedScalarizedValues
, PHIsToRewrite
);
1277 /// RewriteUsesOfLoadForHeapSRoA - We are performing Heap SRoA on a global. Ptr
1278 /// is a value loaded from the global. Eliminate all uses of Ptr, making them
1279 /// use FieldGlobals instead. All uses of loaded values satisfy
1280 /// AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1281 static void RewriteUsesOfLoadForHeapSRoA(LoadInst
*Load
,
1282 DenseMap
<Value
*, std::vector
<Value
*> > &InsertedScalarizedValues
,
1283 std::vector
<std::pair
<PHINode
*, unsigned> > &PHIsToRewrite
) {
1284 for (Value::use_iterator UI
= Load
->use_begin(), E
= Load
->use_end();
1286 Instruction
*User
= cast
<Instruction
>(*UI
++);
1287 RewriteHeapSROALoadUser(User
, InsertedScalarizedValues
, PHIsToRewrite
);
1290 if (Load
->use_empty()) {
1291 Load
->eraseFromParent();
1292 InsertedScalarizedValues
.erase(Load
);
1296 /// PerformHeapAllocSRoA - CI is an allocation of an array of structures. Break
1297 /// it up into multiple allocations of arrays of the fields.
1298 static GlobalVariable
*PerformHeapAllocSRoA(GlobalVariable
*GV
, CallInst
*CI
,
1299 Value
* NElems
, TargetData
*TD
) {
1300 DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV
<< " MALLOC = " << *CI
<< '\n');
1301 const Type
* MAT
= getMallocAllocatedType(CI
);
1302 const StructType
*STy
= cast
<StructType
>(MAT
);
1304 // There is guaranteed to be at least one use of the malloc (storing
1305 // it into GV). If there are other uses, change them to be uses of
1306 // the global to simplify later code. This also deletes the store
1308 ReplaceUsesOfMallocWithGlobal(CI
, GV
);
1310 // Okay, at this point, there are no users of the malloc. Insert N
1311 // new mallocs at the same place as CI, and N globals.
1312 std::vector
<Value
*> FieldGlobals
;
1313 std::vector
<Value
*> FieldMallocs
;
1315 for (unsigned FieldNo
= 0, e
= STy
->getNumElements(); FieldNo
!= e
;++FieldNo
){
1316 const Type
*FieldTy
= STy
->getElementType(FieldNo
);
1317 const PointerType
*PFieldTy
= PointerType::getUnqual(FieldTy
);
1319 GlobalVariable
*NGV
=
1320 new GlobalVariable(*GV
->getParent(),
1321 PFieldTy
, false, GlobalValue::InternalLinkage
,
1322 Constant::getNullValue(PFieldTy
),
1323 GV
->getName() + ".f" + Twine(FieldNo
), GV
,
1324 GV
->isThreadLocal());
1325 FieldGlobals
.push_back(NGV
);
1327 unsigned TypeSize
= TD
->getTypeAllocSize(FieldTy
);
1328 if (const StructType
*ST
= dyn_cast
<StructType
>(FieldTy
))
1329 TypeSize
= TD
->getStructLayout(ST
)->getSizeInBytes();
1330 const Type
*IntPtrTy
= TD
->getIntPtrType(CI
->getContext());
1331 Value
*NMI
= CallInst::CreateMalloc(CI
, IntPtrTy
, FieldTy
,
1332 ConstantInt::get(IntPtrTy
, TypeSize
),
1334 CI
->getName() + ".f" + Twine(FieldNo
));
1335 FieldMallocs
.push_back(NMI
);
1336 new StoreInst(NMI
, NGV
, CI
);
1339 // The tricky aspect of this transformation is handling the case when malloc
1340 // fails. In the original code, malloc failing would set the result pointer
1341 // of malloc to null. In this case, some mallocs could succeed and others
1342 // could fail. As such, we emit code that looks like this:
1343 // F0 = malloc(field0)
1344 // F1 = malloc(field1)
1345 // F2 = malloc(field2)
1346 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1347 // if (F0) { free(F0); F0 = 0; }
1348 // if (F1) { free(F1); F1 = 0; }
1349 // if (F2) { free(F2); F2 = 0; }
1351 // The malloc can also fail if its argument is too large.
1352 Constant
*ConstantZero
= ConstantInt::get(CI
->getArgOperand(0)->getType(), 0);
1353 Value
*RunningOr
= new ICmpInst(CI
, ICmpInst::ICMP_SLT
, CI
->getArgOperand(0),
1354 ConstantZero
, "isneg");
1355 for (unsigned i
= 0, e
= FieldMallocs
.size(); i
!= e
; ++i
) {
1356 Value
*Cond
= new ICmpInst(CI
, ICmpInst::ICMP_EQ
, FieldMallocs
[i
],
1357 Constant::getNullValue(FieldMallocs
[i
]->getType()),
1359 RunningOr
= BinaryOperator::CreateOr(RunningOr
, Cond
, "tmp", CI
);
1362 // Split the basic block at the old malloc.
1363 BasicBlock
*OrigBB
= CI
->getParent();
1364 BasicBlock
*ContBB
= OrigBB
->splitBasicBlock(CI
, "malloc_cont");
1366 // Create the block to check the first condition. Put all these blocks at the
1367 // end of the function as they are unlikely to be executed.
1368 BasicBlock
*NullPtrBlock
= BasicBlock::Create(OrigBB
->getContext(),
1370 OrigBB
->getParent());
1372 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1373 // branch on RunningOr.
1374 OrigBB
->getTerminator()->eraseFromParent();
1375 BranchInst::Create(NullPtrBlock
, ContBB
, RunningOr
, OrigBB
);
1377 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1378 // pointer, because some may be null while others are not.
1379 for (unsigned i
= 0, e
= FieldGlobals
.size(); i
!= e
; ++i
) {
1380 Value
*GVVal
= new LoadInst(FieldGlobals
[i
], "tmp", NullPtrBlock
);
1381 Value
*Cmp
= new ICmpInst(*NullPtrBlock
, ICmpInst::ICMP_NE
, GVVal
,
1382 Constant::getNullValue(GVVal
->getType()),
1384 BasicBlock
*FreeBlock
= BasicBlock::Create(Cmp
->getContext(), "free_it",
1385 OrigBB
->getParent());
1386 BasicBlock
*NextBlock
= BasicBlock::Create(Cmp
->getContext(), "next",
1387 OrigBB
->getParent());
1388 Instruction
*BI
= BranchInst::Create(FreeBlock
, NextBlock
,
1391 // Fill in FreeBlock.
1392 CallInst::CreateFree(GVVal
, BI
);
1393 new StoreInst(Constant::getNullValue(GVVal
->getType()), FieldGlobals
[i
],
1395 BranchInst::Create(NextBlock
, FreeBlock
);
1397 NullPtrBlock
= NextBlock
;
1400 BranchInst::Create(ContBB
, NullPtrBlock
);
1402 // CI is no longer needed, remove it.
1403 CI
->eraseFromParent();
1405 /// InsertedScalarizedLoads - As we process loads, if we can't immediately
1406 /// update all uses of the load, keep track of what scalarized loads are
1407 /// inserted for a given load.
1408 DenseMap
<Value
*, std::vector
<Value
*> > InsertedScalarizedValues
;
1409 InsertedScalarizedValues
[GV
] = FieldGlobals
;
1411 std::vector
<std::pair
<PHINode
*, unsigned> > PHIsToRewrite
;
1413 // Okay, the malloc site is completely handled. All of the uses of GV are now
1414 // loads, and all uses of those loads are simple. Rewrite them to use loads
1415 // of the per-field globals instead.
1416 for (Value::use_iterator UI
= GV
->use_begin(), E
= GV
->use_end(); UI
!= E
;) {
1417 Instruction
*User
= cast
<Instruction
>(*UI
++);
1419 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(User
)) {
1420 RewriteUsesOfLoadForHeapSRoA(LI
, InsertedScalarizedValues
, PHIsToRewrite
);
1424 // Must be a store of null.
1425 StoreInst
*SI
= cast
<StoreInst
>(User
);
1426 assert(isa
<ConstantPointerNull
>(SI
->getOperand(0)) &&
1427 "Unexpected heap-sra user!");
1429 // Insert a store of null into each global.
1430 for (unsigned i
= 0, e
= FieldGlobals
.size(); i
!= e
; ++i
) {
1431 const PointerType
*PT
= cast
<PointerType
>(FieldGlobals
[i
]->getType());
1432 Constant
*Null
= Constant::getNullValue(PT
->getElementType());
1433 new StoreInst(Null
, FieldGlobals
[i
], SI
);
1435 // Erase the original store.
1436 SI
->eraseFromParent();
1439 // While we have PHIs that are interesting to rewrite, do it.
1440 while (!PHIsToRewrite
.empty()) {
1441 PHINode
*PN
= PHIsToRewrite
.back().first
;
1442 unsigned FieldNo
= PHIsToRewrite
.back().second
;
1443 PHIsToRewrite
.pop_back();
1444 PHINode
*FieldPN
= cast
<PHINode
>(InsertedScalarizedValues
[PN
][FieldNo
]);
1445 assert(FieldPN
->getNumIncomingValues() == 0 &&"Already processed this phi");
1447 // Add all the incoming values. This can materialize more phis.
1448 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1449 Value
*InVal
= PN
->getIncomingValue(i
);
1450 InVal
= GetHeapSROAValue(InVal
, FieldNo
, InsertedScalarizedValues
,
1452 FieldPN
->addIncoming(InVal
, PN
->getIncomingBlock(i
));
1456 // Drop all inter-phi links and any loads that made it this far.
1457 for (DenseMap
<Value
*, std::vector
<Value
*> >::iterator
1458 I
= InsertedScalarizedValues
.begin(), E
= InsertedScalarizedValues
.end();
1460 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
->first
))
1461 PN
->dropAllReferences();
1462 else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
->first
))
1463 LI
->dropAllReferences();
1466 // Delete all the phis and loads now that inter-references are dead.
1467 for (DenseMap
<Value
*, std::vector
<Value
*> >::iterator
1468 I
= InsertedScalarizedValues
.begin(), E
= InsertedScalarizedValues
.end();
1470 if (PHINode
*PN
= dyn_cast
<PHINode
>(I
->first
))
1471 PN
->eraseFromParent();
1472 else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(I
->first
))
1473 LI
->eraseFromParent();
1476 // The old global is now dead, remove it.
1477 GV
->eraseFromParent();
1480 return cast
<GlobalVariable
>(FieldGlobals
[0]);
1483 /// TryToOptimizeStoreOfMallocToGlobal - This function is called when we see a
1484 /// pointer global variable with a single value stored it that is a malloc or
1486 static bool TryToOptimizeStoreOfMallocToGlobal(GlobalVariable
*GV
,
1488 const Type
*AllocTy
,
1489 Module::global_iterator
&GVI
,
1494 // If this is a malloc of an abstract type, don't touch it.
1495 if (!AllocTy
->isSized())
1498 // We can't optimize this global unless all uses of it are *known* to be
1499 // of the malloc value, not of the null initializer value (consider a use
1500 // that compares the global's value against zero to see if the malloc has
1501 // been reached). To do this, we check to see if all uses of the global
1502 // would trap if the global were null: this proves that they must all
1503 // happen after the malloc.
1504 if (!AllUsesOfLoadedValueWillTrapIfNull(GV
))
1507 // We can't optimize this if the malloc itself is used in a complex way,
1508 // for example, being stored into multiple globals. This allows the
1509 // malloc to be stored into the specified global, loaded setcc'd, and
1510 // GEP'd. These are all things we could transform to using the global
1512 SmallPtrSet
<const PHINode
*, 8> PHIs
;
1513 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI
, GV
, PHIs
))
1516 // If we have a global that is only initialized with a fixed size malloc,
1517 // transform the program to use global memory instead of malloc'd memory.
1518 // This eliminates dynamic allocation, avoids an indirection accessing the
1519 // data, and exposes the resultant global to further GlobalOpt.
1520 // We cannot optimize the malloc if we cannot determine malloc array size.
1521 Value
*NElems
= getMallocArraySize(CI
, TD
, true);
1525 if (ConstantInt
*NElements
= dyn_cast
<ConstantInt
>(NElems
))
1526 // Restrict this transformation to only working on small allocations
1527 // (2048 bytes currently), as we don't want to introduce a 16M global or
1529 if (NElements
->getZExtValue() * TD
->getTypeAllocSize(AllocTy
) < 2048) {
1530 GVI
= OptimizeGlobalAddressOfMalloc(GV
, CI
, AllocTy
, NElements
, TD
);
1534 // If the allocation is an array of structures, consider transforming this
1535 // into multiple malloc'd arrays, one for each field. This is basically
1536 // SRoA for malloc'd memory.
1538 // If this is an allocation of a fixed size array of structs, analyze as a
1539 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1540 if (NElems
== ConstantInt::get(CI
->getArgOperand(0)->getType(), 1))
1541 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(AllocTy
))
1542 AllocTy
= AT
->getElementType();
1544 const StructType
*AllocSTy
= dyn_cast
<StructType
>(AllocTy
);
1548 // This the structure has an unreasonable number of fields, leave it
1550 if (AllocSTy
->getNumElements() <= 16 && AllocSTy
->getNumElements() != 0 &&
1551 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV
, CI
)) {
1553 // If this is a fixed size array, transform the Malloc to be an alloc of
1554 // structs. malloc [100 x struct],1 -> malloc struct, 100
1555 if (const ArrayType
*AT
= dyn_cast
<ArrayType
>(getMallocAllocatedType(CI
))) {
1556 const Type
*IntPtrTy
= TD
->getIntPtrType(CI
->getContext());
1557 unsigned TypeSize
= TD
->getStructLayout(AllocSTy
)->getSizeInBytes();
1558 Value
*AllocSize
= ConstantInt::get(IntPtrTy
, TypeSize
);
1559 Value
*NumElements
= ConstantInt::get(IntPtrTy
, AT
->getNumElements());
1560 Instruction
*Malloc
= CallInst::CreateMalloc(CI
, IntPtrTy
, AllocSTy
,
1561 AllocSize
, NumElements
,
1563 Instruction
*Cast
= new BitCastInst(Malloc
, CI
->getType(), "tmp", CI
);
1564 CI
->replaceAllUsesWith(Cast
);
1565 CI
->eraseFromParent();
1566 CI
= dyn_cast
<BitCastInst
>(Malloc
) ?
1567 extractMallocCallFromBitCast(Malloc
) : cast
<CallInst
>(Malloc
);
1570 GVI
= PerformHeapAllocSRoA(GV
, CI
, getMallocArraySize(CI
, TD
, true),TD
);
1577 // OptimizeOnceStoredGlobal - Try to optimize globals based on the knowledge
1578 // that only one value (besides its initializer) is ever stored to the global.
1579 static bool OptimizeOnceStoredGlobal(GlobalVariable
*GV
, Value
*StoredOnceVal
,
1580 Module::global_iterator
&GVI
,
1582 // Ignore no-op GEPs and bitcasts.
1583 StoredOnceVal
= StoredOnceVal
->stripPointerCasts();
1585 // If we are dealing with a pointer global that is initialized to null and
1586 // only has one (non-null) value stored into it, then we can optimize any
1587 // users of the loaded value (often calls and loads) that would trap if the
1589 if (GV
->getInitializer()->getType()->isPointerTy() &&
1590 GV
->getInitializer()->isNullValue()) {
1591 if (Constant
*SOVC
= dyn_cast
<Constant
>(StoredOnceVal
)) {
1592 if (GV
->getInitializer()->getType() != SOVC
->getType())
1593 SOVC
= ConstantExpr::getBitCast(SOVC
, GV
->getInitializer()->getType());
1595 // Optimize away any trapping uses of the loaded value.
1596 if (OptimizeAwayTrappingUsesOfLoads(GV
, SOVC
))
1598 } else if (CallInst
*CI
= extractMallocCall(StoredOnceVal
)) {
1599 const Type
* MallocType
= getMallocAllocatedType(CI
);
1600 if (MallocType
&& TryToOptimizeStoreOfMallocToGlobal(GV
, CI
, MallocType
,
1609 /// TryToShrinkGlobalToBoolean - At this point, we have learned that the only
1610 /// two values ever stored into GV are its initializer and OtherVal. See if we
1611 /// can shrink the global into a boolean and select between the two values
1612 /// whenever it is used. This exposes the values to other scalar optimizations.
1613 static bool TryToShrinkGlobalToBoolean(GlobalVariable
*GV
, Constant
*OtherVal
) {
1614 const Type
*GVElType
= GV
->getType()->getElementType();
1616 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1617 // an FP value, pointer or vector, don't do this optimization because a select
1618 // between them is very expensive and unlikely to lead to later
1619 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1620 // where v1 and v2 both require constant pool loads, a big loss.
1621 if (GVElType
== Type::getInt1Ty(GV
->getContext()) ||
1622 GVElType
->isFloatingPointTy() ||
1623 GVElType
->isPointerTy() || GVElType
->isVectorTy())
1626 // Walk the use list of the global seeing if all the uses are load or store.
1627 // If there is anything else, bail out.
1628 for (Value::use_iterator I
= GV
->use_begin(), E
= GV
->use_end(); I
!= E
; ++I
){
1630 if (!isa
<LoadInst
>(U
) && !isa
<StoreInst
>(U
))
1634 DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV
);
1636 // Create the new global, initializing it to false.
1637 GlobalVariable
*NewGV
= new GlobalVariable(Type::getInt1Ty(GV
->getContext()),
1639 GlobalValue::InternalLinkage
,
1640 ConstantInt::getFalse(GV
->getContext()),
1642 GV
->isThreadLocal());
1643 GV
->getParent()->getGlobalList().insert(GV
, NewGV
);
1645 Constant
*InitVal
= GV
->getInitializer();
1646 assert(InitVal
->getType() != Type::getInt1Ty(GV
->getContext()) &&
1647 "No reason to shrink to bool!");
1649 // If initialized to zero and storing one into the global, we can use a cast
1650 // instead of a select to synthesize the desired value.
1651 bool IsOneZero
= false;
1652 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(OtherVal
))
1653 IsOneZero
= InitVal
->isNullValue() && CI
->isOne();
1655 while (!GV
->use_empty()) {
1656 Instruction
*UI
= cast
<Instruction
>(GV
->use_back());
1657 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(UI
)) {
1658 // Change the store into a boolean store.
1659 bool StoringOther
= SI
->getOperand(0) == OtherVal
;
1660 // Only do this if we weren't storing a loaded value.
1662 if (StoringOther
|| SI
->getOperand(0) == InitVal
)
1663 StoreVal
= ConstantInt::get(Type::getInt1Ty(GV
->getContext()),
1666 // Otherwise, we are storing a previously loaded copy. To do this,
1667 // change the copy from copying the original value to just copying the
1669 Instruction
*StoredVal
= cast
<Instruction
>(SI
->getOperand(0));
1671 // If we've already replaced the input, StoredVal will be a cast or
1672 // select instruction. If not, it will be a load of the original
1674 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(StoredVal
)) {
1675 assert(LI
->getOperand(0) == GV
&& "Not a copy!");
1676 // Insert a new load, to preserve the saved value.
1677 StoreVal
= new LoadInst(NewGV
, LI
->getName()+".b", LI
);
1679 assert((isa
<CastInst
>(StoredVal
) || isa
<SelectInst
>(StoredVal
)) &&
1680 "This is not a form that we understand!");
1681 StoreVal
= StoredVal
->getOperand(0);
1682 assert(isa
<LoadInst
>(StoreVal
) && "Not a load of NewGV!");
1685 new StoreInst(StoreVal
, NewGV
, SI
);
1687 // Change the load into a load of bool then a select.
1688 LoadInst
*LI
= cast
<LoadInst
>(UI
);
1689 LoadInst
*NLI
= new LoadInst(NewGV
, LI
->getName()+".b", LI
);
1692 NSI
= new ZExtInst(NLI
, LI
->getType(), "", LI
);
1694 NSI
= SelectInst::Create(NLI
, OtherVal
, InitVal
, "", LI
);
1696 LI
->replaceAllUsesWith(NSI
);
1698 UI
->eraseFromParent();
1701 GV
->eraseFromParent();
1706 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1707 /// it if possible. If we make a change, return true.
1708 bool GlobalOpt::ProcessGlobal(GlobalVariable
*GV
,
1709 Module::global_iterator
&GVI
) {
1710 if (!GV
->hasLocalLinkage())
1713 // Do more involved optimizations if the global is internal.
1714 GV
->removeDeadConstantUsers();
1716 if (GV
->use_empty()) {
1717 DEBUG(dbgs() << "GLOBAL DEAD: " << *GV
);
1718 GV
->eraseFromParent();
1723 SmallPtrSet
<const PHINode
*, 16> PHIUsers
;
1726 if (AnalyzeGlobal(GV
, GS
, PHIUsers
))
1729 if (!GS
.isCompared
&& !GV
->hasUnnamedAddr()) {
1730 GV
->setUnnamedAddr(true);
1734 if (GV
->isConstant() || !GV
->hasInitializer())
1737 return ProcessInternalGlobal(GV
, GVI
, PHIUsers
, GS
);
1740 /// ProcessInternalGlobal - Analyze the specified global variable and optimize
1741 /// it if possible. If we make a change, return true.
1742 bool GlobalOpt::ProcessInternalGlobal(GlobalVariable
*GV
,
1743 Module::global_iterator
&GVI
,
1744 const SmallPtrSet
<const PHINode
*, 16> &PHIUsers
,
1745 const GlobalStatus
&GS
) {
1746 // If this is a first class global and has only one accessing function
1747 // and this function is main (which we know is not recursive we can make
1748 // this global a local variable) we replace the global with a local alloca
1749 // in this function.
1751 // NOTE: It doesn't make sense to promote non single-value types since we
1752 // are just replacing static memory to stack memory.
1754 // If the global is in different address space, don't bring it to stack.
1755 if (!GS
.HasMultipleAccessingFunctions
&&
1756 GS
.AccessingFunction
&& !GS
.HasNonInstructionUser
&&
1757 GV
->getType()->getElementType()->isSingleValueType() &&
1758 GS
.AccessingFunction
->getName() == "main" &&
1759 GS
.AccessingFunction
->hasExternalLinkage() &&
1760 GV
->getType()->getAddressSpace() == 0) {
1761 DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV
);
1762 Instruction
& FirstI
= const_cast<Instruction
&>(*GS
.AccessingFunction
1763 ->getEntryBlock().begin());
1764 const Type
* ElemTy
= GV
->getType()->getElementType();
1765 // FIXME: Pass Global's alignment when globals have alignment
1766 AllocaInst
* Alloca
= new AllocaInst(ElemTy
, NULL
, GV
->getName(), &FirstI
);
1767 if (!isa
<UndefValue
>(GV
->getInitializer()))
1768 new StoreInst(GV
->getInitializer(), Alloca
, &FirstI
);
1770 GV
->replaceAllUsesWith(Alloca
);
1771 GV
->eraseFromParent();
1776 // If the global is never loaded (but may be stored to), it is dead.
1779 DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV
);
1781 // Delete any stores we can find to the global. We may not be able to
1782 // make it completely dead though.
1783 bool Changed
= CleanupConstantGlobalUsers(GV
, GV
->getInitializer());
1785 // If the global is dead now, delete it.
1786 if (GV
->use_empty()) {
1787 GV
->eraseFromParent();
1793 } else if (GS
.StoredType
<= GlobalStatus::isInitializerStored
) {
1794 DEBUG(dbgs() << "MARKING CONSTANT: " << *GV
);
1795 GV
->setConstant(true);
1797 // Clean up any obviously simplifiable users now.
1798 CleanupConstantGlobalUsers(GV
, GV
->getInitializer());
1800 // If the global is dead now, just nuke it.
1801 if (GV
->use_empty()) {
1802 DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1803 << "all users and delete global!\n");
1804 GV
->eraseFromParent();
1810 } else if (!GV
->getInitializer()->getType()->isSingleValueType()) {
1811 if (TargetData
*TD
= getAnalysisIfAvailable
<TargetData
>())
1812 if (GlobalVariable
*FirstNewGV
= SRAGlobal(GV
, *TD
)) {
1813 GVI
= FirstNewGV
; // Don't skip the newly produced globals!
1816 } else if (GS
.StoredType
== GlobalStatus::isStoredOnce
) {
1817 // If the initial value for the global was an undef value, and if only
1818 // one other value was stored into it, we can just change the
1819 // initializer to be the stored value, then delete all stores to the
1820 // global. This allows us to mark it constant.
1821 if (Constant
*SOVConstant
= dyn_cast
<Constant
>(GS
.StoredOnceValue
))
1822 if (isa
<UndefValue
>(GV
->getInitializer())) {
1823 // Change the initial value here.
1824 GV
->setInitializer(SOVConstant
);
1826 // Clean up any obviously simplifiable users now.
1827 CleanupConstantGlobalUsers(GV
, GV
->getInitializer());
1829 if (GV
->use_empty()) {
1830 DEBUG(dbgs() << " *** Substituting initializer allowed us to "
1831 << "simplify all users and delete global!\n");
1832 GV
->eraseFromParent();
1841 // Try to optimize globals based on the knowledge that only one value
1842 // (besides its initializer) is ever stored to the global.
1843 if (OptimizeOnceStoredGlobal(GV
, GS
.StoredOnceValue
, GVI
,
1844 getAnalysisIfAvailable
<TargetData
>()))
1847 // Otherwise, if the global was not a boolean, we can shrink it to be a
1849 if (Constant
*SOVConstant
= dyn_cast
<Constant
>(GS
.StoredOnceValue
))
1850 if (TryToShrinkGlobalToBoolean(GV
, SOVConstant
)) {
1859 /// ChangeCalleesToFastCall - Walk all of the direct calls of the specified
1860 /// function, changing them to FastCC.
1861 static void ChangeCalleesToFastCall(Function
*F
) {
1862 for (Value::use_iterator UI
= F
->use_begin(), E
= F
->use_end(); UI
!= E
;++UI
){
1863 CallSite
User(cast
<Instruction
>(*UI
));
1864 User
.setCallingConv(CallingConv::Fast
);
1868 static AttrListPtr
StripNest(const AttrListPtr
&Attrs
) {
1869 for (unsigned i
= 0, e
= Attrs
.getNumSlots(); i
!= e
; ++i
) {
1870 if ((Attrs
.getSlot(i
).Attrs
& Attribute::Nest
) == 0)
1873 // There can be only one.
1874 return Attrs
.removeAttr(Attrs
.getSlot(i
).Index
, Attribute::Nest
);
1880 static void RemoveNestAttribute(Function
*F
) {
1881 F
->setAttributes(StripNest(F
->getAttributes()));
1882 for (Value::use_iterator UI
= F
->use_begin(), E
= F
->use_end(); UI
!= E
;++UI
){
1883 CallSite
User(cast
<Instruction
>(*UI
));
1884 User
.setAttributes(StripNest(User
.getAttributes()));
1888 bool GlobalOpt::OptimizeFunctions(Module
&M
) {
1889 bool Changed
= false;
1890 // Optimize functions.
1891 for (Module::iterator FI
= M
.begin(), E
= M
.end(); FI
!= E
; ) {
1893 // Functions without names cannot be referenced outside this module.
1894 if (!F
->hasName() && !F
->isDeclaration())
1895 F
->setLinkage(GlobalValue::InternalLinkage
);
1896 F
->removeDeadConstantUsers();
1897 if (F
->use_empty() && (F
->hasLocalLinkage() || F
->hasLinkOnceLinkage())) {
1898 F
->eraseFromParent();
1901 } else if (F
->hasLocalLinkage()) {
1902 if (F
->getCallingConv() == CallingConv::C
&& !F
->isVarArg() &&
1903 !F
->hasAddressTaken()) {
1904 // If this function has C calling conventions, is not a varargs
1905 // function, and is only called directly, promote it to use the Fast
1906 // calling convention.
1907 F
->setCallingConv(CallingConv::Fast
);
1908 ChangeCalleesToFastCall(F
);
1913 if (F
->getAttributes().hasAttrSomewhere(Attribute::Nest
) &&
1914 !F
->hasAddressTaken()) {
1915 // The function is not used by a trampoline intrinsic, so it is safe
1916 // to remove the 'nest' attribute.
1917 RemoveNestAttribute(F
);
1926 bool GlobalOpt::OptimizeGlobalVars(Module
&M
) {
1927 bool Changed
= false;
1928 for (Module::global_iterator GVI
= M
.global_begin(), E
= M
.global_end();
1930 GlobalVariable
*GV
= GVI
++;
1931 // Global variables without names cannot be referenced outside this module.
1932 if (!GV
->hasName() && !GV
->isDeclaration())
1933 GV
->setLinkage(GlobalValue::InternalLinkage
);
1934 // Simplify the initializer.
1935 if (GV
->hasInitializer())
1936 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(GV
->getInitializer())) {
1937 TargetData
*TD
= getAnalysisIfAvailable
<TargetData
>();
1938 Constant
*New
= ConstantFoldConstantExpression(CE
, TD
);
1939 if (New
&& New
!= CE
)
1940 GV
->setInitializer(New
);
1943 Changed
|= ProcessGlobal(GV
, GVI
);
1948 /// FindGlobalCtors - Find the llvm.global_ctors list, verifying that all
1949 /// initializers have an init priority of 65535.
1950 GlobalVariable
*GlobalOpt::FindGlobalCtors(Module
&M
) {
1951 GlobalVariable
*GV
= M
.getGlobalVariable("llvm.global_ctors");
1952 if (GV
== 0) return 0;
1954 // Verify that the initializer is simple enough for us to handle. We are
1955 // only allowed to optimize the initializer if it is unique.
1956 if (!GV
->hasUniqueInitializer()) return 0;
1958 if (isa
<ConstantAggregateZero
>(GV
->getInitializer()))
1960 ConstantArray
*CA
= cast
<ConstantArray
>(GV
->getInitializer());
1962 for (User::op_iterator i
= CA
->op_begin(), e
= CA
->op_end(); i
!= e
; ++i
) {
1963 if (isa
<ConstantAggregateZero
>(*i
))
1965 ConstantStruct
*CS
= cast
<ConstantStruct
>(*i
);
1966 if (isa
<ConstantPointerNull
>(CS
->getOperand(1)))
1969 // Must have a function or null ptr.
1970 if (!isa
<Function
>(CS
->getOperand(1)))
1973 // Init priority must be standard.
1974 ConstantInt
*CI
= cast
<ConstantInt
>(CS
->getOperand(0));
1975 if (CI
->getZExtValue() != 65535)
1982 /// ParseGlobalCtors - Given a llvm.global_ctors list that we can understand,
1983 /// return a list of the functions and null terminator as a vector.
1984 static std::vector
<Function
*> ParseGlobalCtors(GlobalVariable
*GV
) {
1985 if (GV
->getInitializer()->isNullValue())
1986 return std::vector
<Function
*>();
1987 ConstantArray
*CA
= cast
<ConstantArray
>(GV
->getInitializer());
1988 std::vector
<Function
*> Result
;
1989 Result
.reserve(CA
->getNumOperands());
1990 for (User::op_iterator i
= CA
->op_begin(), e
= CA
->op_end(); i
!= e
; ++i
) {
1991 ConstantStruct
*CS
= cast
<ConstantStruct
>(*i
);
1992 Result
.push_back(dyn_cast
<Function
>(CS
->getOperand(1)));
1997 /// InstallGlobalCtors - Given a specified llvm.global_ctors list, install the
1998 /// specified array, returning the new global to use.
1999 static GlobalVariable
*InstallGlobalCtors(GlobalVariable
*GCL
,
2000 const std::vector
<Function
*> &Ctors
) {
2001 // If we made a change, reassemble the initializer list.
2002 Constant
*CSVals
[2];
2003 CSVals
[0] = ConstantInt::get(Type::getInt32Ty(GCL
->getContext()), 65535);
2006 const StructType
*StructTy
=
2008 cast
<ArrayType
>(GCL
->getType()->getElementType())->getElementType());
2010 // Create the new init list.
2011 std::vector
<Constant
*> CAList
;
2012 for (unsigned i
= 0, e
= Ctors
.size(); i
!= e
; ++i
) {
2014 CSVals
[1] = Ctors
[i
];
2016 const Type
*FTy
= FunctionType::get(Type::getVoidTy(GCL
->getContext()),
2018 const PointerType
*PFTy
= PointerType::getUnqual(FTy
);
2019 CSVals
[1] = Constant::getNullValue(PFTy
);
2020 CSVals
[0] = ConstantInt::get(Type::getInt32Ty(GCL
->getContext()),
2023 CAList
.push_back(ConstantStruct::get(StructTy
, CSVals
));
2026 // Create the array initializer.
2027 Constant
*CA
= ConstantArray::get(ArrayType::get(StructTy
,
2028 CAList
.size()), CAList
);
2030 // If we didn't change the number of elements, don't create a new GV.
2031 if (CA
->getType() == GCL
->getInitializer()->getType()) {
2032 GCL
->setInitializer(CA
);
2036 // Create the new global and insert it next to the existing list.
2037 GlobalVariable
*NGV
= new GlobalVariable(CA
->getType(), GCL
->isConstant(),
2038 GCL
->getLinkage(), CA
, "",
2039 GCL
->isThreadLocal());
2040 GCL
->getParent()->getGlobalList().insert(GCL
, NGV
);
2043 // Nuke the old list, replacing any uses with the new one.
2044 if (!GCL
->use_empty()) {
2046 if (V
->getType() != GCL
->getType())
2047 V
= ConstantExpr::getBitCast(V
, GCL
->getType());
2048 GCL
->replaceAllUsesWith(V
);
2050 GCL
->eraseFromParent();
2059 static Constant
*getVal(DenseMap
<Value
*, Constant
*> &ComputedValues
, Value
*V
) {
2060 if (Constant
*CV
= dyn_cast
<Constant
>(V
)) return CV
;
2061 Constant
*R
= ComputedValues
[V
];
2062 assert(R
&& "Reference to an uncomputed value!");
2067 isSimpleEnoughValueToCommit(Constant
*C
,
2068 SmallPtrSet
<Constant
*, 8> &SimpleConstants
);
2071 /// isSimpleEnoughValueToCommit - Return true if the specified constant can be
2072 /// handled by the code generator. We don't want to generate something like:
2073 /// void *X = &X/42;
2074 /// because the code generator doesn't have a relocation that can handle that.
2076 /// This function should be called if C was not found (but just got inserted)
2077 /// in SimpleConstants to avoid having to rescan the same constants all the
2079 static bool isSimpleEnoughValueToCommitHelper(Constant
*C
,
2080 SmallPtrSet
<Constant
*, 8> &SimpleConstants
) {
2081 // Simple integer, undef, constant aggregate zero, global addresses, etc are
2083 if (C
->getNumOperands() == 0 || isa
<BlockAddress
>(C
) ||
2084 isa
<GlobalValue
>(C
))
2087 // Aggregate values are safe if all their elements are.
2088 if (isa
<ConstantArray
>(C
) || isa
<ConstantStruct
>(C
) ||
2089 isa
<ConstantVector
>(C
)) {
2090 for (unsigned i
= 0, e
= C
->getNumOperands(); i
!= e
; ++i
) {
2091 Constant
*Op
= cast
<Constant
>(C
->getOperand(i
));
2092 if (!isSimpleEnoughValueToCommit(Op
, SimpleConstants
))
2098 // We don't know exactly what relocations are allowed in constant expressions,
2099 // so we allow &global+constantoffset, which is safe and uniformly supported
2101 ConstantExpr
*CE
= cast
<ConstantExpr
>(C
);
2102 switch (CE
->getOpcode()) {
2103 case Instruction::BitCast
:
2104 case Instruction::IntToPtr
:
2105 case Instruction::PtrToInt
:
2106 // These casts are always fine if the casted value is.
2107 return isSimpleEnoughValueToCommit(CE
->getOperand(0), SimpleConstants
);
2109 // GEP is fine if it is simple + constant offset.
2110 case Instruction::GetElementPtr
:
2111 for (unsigned i
= 1, e
= CE
->getNumOperands(); i
!= e
; ++i
)
2112 if (!isa
<ConstantInt
>(CE
->getOperand(i
)))
2114 return isSimpleEnoughValueToCommit(CE
->getOperand(0), SimpleConstants
);
2116 case Instruction::Add
:
2117 // We allow simple+cst.
2118 if (!isa
<ConstantInt
>(CE
->getOperand(1)))
2120 return isSimpleEnoughValueToCommit(CE
->getOperand(0), SimpleConstants
);
2126 isSimpleEnoughValueToCommit(Constant
*C
,
2127 SmallPtrSet
<Constant
*, 8> &SimpleConstants
) {
2128 // If we already checked this constant, we win.
2129 if (!SimpleConstants
.insert(C
)) return true;
2130 // Check the constant.
2131 return isSimpleEnoughValueToCommitHelper(C
, SimpleConstants
);
2135 /// isSimpleEnoughPointerToCommit - Return true if this constant is simple
2136 /// enough for us to understand. In particular, if it is a cast to anything
2137 /// other than from one pointer type to another pointer type, we punt.
2138 /// We basically just support direct accesses to globals and GEP's of
2139 /// globals. This should be kept up to date with CommitValueTo.
2140 static bool isSimpleEnoughPointerToCommit(Constant
*C
) {
2141 // Conservatively, avoid aggregate types. This is because we don't
2142 // want to worry about them partially overlapping other stores.
2143 if (!cast
<PointerType
>(C
->getType())->getElementType()->isSingleValueType())
2146 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(C
))
2147 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2148 // external globals.
2149 return GV
->hasUniqueInitializer();
2151 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(C
)) {
2152 // Handle a constantexpr gep.
2153 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
2154 isa
<GlobalVariable
>(CE
->getOperand(0)) &&
2155 cast
<GEPOperator
>(CE
)->isInBounds()) {
2156 GlobalVariable
*GV
= cast
<GlobalVariable
>(CE
->getOperand(0));
2157 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2158 // external globals.
2159 if (!GV
->hasUniqueInitializer())
2162 // The first index must be zero.
2163 ConstantInt
*CI
= dyn_cast
<ConstantInt
>(*llvm::next(CE
->op_begin()));
2164 if (!CI
|| !CI
->isZero()) return false;
2166 // The remaining indices must be compile-time known integers within the
2167 // notional bounds of the corresponding static array types.
2168 if (!CE
->isGEPWithNoNotionalOverIndexing())
2171 return ConstantFoldLoadThroughGEPConstantExpr(GV
->getInitializer(), CE
);
2173 // A constantexpr bitcast from a pointer to another pointer is a no-op,
2174 // and we know how to evaluate it by moving the bitcast from the pointer
2175 // operand to the value operand.
2176 } else if (CE
->getOpcode() == Instruction::BitCast
&&
2177 isa
<GlobalVariable
>(CE
->getOperand(0))) {
2178 // Do not allow weak/*_odr/linkonce/dllimport/dllexport linkage or
2179 // external globals.
2180 return cast
<GlobalVariable
>(CE
->getOperand(0))->hasUniqueInitializer();
2187 /// EvaluateStoreInto - Evaluate a piece of a constantexpr store into a global
2188 /// initializer. This returns 'Init' modified to reflect 'Val' stored into it.
2189 /// At this point, the GEP operands of Addr [0, OpNo) have been stepped into.
2190 static Constant
*EvaluateStoreInto(Constant
*Init
, Constant
*Val
,
2191 ConstantExpr
*Addr
, unsigned OpNo
) {
2192 // Base case of the recursion.
2193 if (OpNo
== Addr
->getNumOperands()) {
2194 assert(Val
->getType() == Init
->getType() && "Type mismatch!");
2198 std::vector
<Constant
*> Elts
;
2199 if (const StructType
*STy
= dyn_cast
<StructType
>(Init
->getType())) {
2201 // Break up the constant into its elements.
2202 if (ConstantStruct
*CS
= dyn_cast
<ConstantStruct
>(Init
)) {
2203 for (User::op_iterator i
= CS
->op_begin(), e
= CS
->op_end(); i
!= e
; ++i
)
2204 Elts
.push_back(cast
<Constant
>(*i
));
2205 } else if (isa
<ConstantAggregateZero
>(Init
)) {
2206 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
2207 Elts
.push_back(Constant::getNullValue(STy
->getElementType(i
)));
2208 } else if (isa
<UndefValue
>(Init
)) {
2209 for (unsigned i
= 0, e
= STy
->getNumElements(); i
!= e
; ++i
)
2210 Elts
.push_back(UndefValue::get(STy
->getElementType(i
)));
2212 llvm_unreachable("This code is out of sync with "
2213 " ConstantFoldLoadThroughGEPConstantExpr");
2216 // Replace the element that we are supposed to.
2217 ConstantInt
*CU
= cast
<ConstantInt
>(Addr
->getOperand(OpNo
));
2218 unsigned Idx
= CU
->getZExtValue();
2219 assert(Idx
< STy
->getNumElements() && "Struct index out of range!");
2220 Elts
[Idx
] = EvaluateStoreInto(Elts
[Idx
], Val
, Addr
, OpNo
+1);
2222 // Return the modified struct.
2223 return ConstantStruct::get(STy
, Elts
);
2226 ConstantInt
*CI
= cast
<ConstantInt
>(Addr
->getOperand(OpNo
));
2227 const SequentialType
*InitTy
= cast
<SequentialType
>(Init
->getType());
2230 if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(InitTy
))
2231 NumElts
= ATy
->getNumElements();
2233 NumElts
= cast
<VectorType
>(InitTy
)->getNumElements();
2235 // Break up the array into elements.
2236 if (ConstantArray
*CA
= dyn_cast
<ConstantArray
>(Init
)) {
2237 for (User::op_iterator i
= CA
->op_begin(), e
= CA
->op_end(); i
!= e
; ++i
)
2238 Elts
.push_back(cast
<Constant
>(*i
));
2239 } else if (ConstantVector
*CV
= dyn_cast
<ConstantVector
>(Init
)) {
2240 for (User::op_iterator i
= CV
->op_begin(), e
= CV
->op_end(); i
!= e
; ++i
)
2241 Elts
.push_back(cast
<Constant
>(*i
));
2242 } else if (isa
<ConstantAggregateZero
>(Init
)) {
2243 Elts
.assign(NumElts
, Constant::getNullValue(InitTy
->getElementType()));
2245 assert(isa
<UndefValue
>(Init
) && "This code is out of sync with "
2246 " ConstantFoldLoadThroughGEPConstantExpr");
2247 Elts
.assign(NumElts
, UndefValue::get(InitTy
->getElementType()));
2250 assert(CI
->getZExtValue() < NumElts
);
2251 Elts
[CI
->getZExtValue()] =
2252 EvaluateStoreInto(Elts
[CI
->getZExtValue()], Val
, Addr
, OpNo
+1);
2254 if (Init
->getType()->isArrayTy())
2255 return ConstantArray::get(cast
<ArrayType
>(InitTy
), Elts
);
2256 return ConstantVector::get(Elts
);
2259 /// CommitValueTo - We have decided that Addr (which satisfies the predicate
2260 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2261 static void CommitValueTo(Constant
*Val
, Constant
*Addr
) {
2262 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(Addr
)) {
2263 assert(GV
->hasInitializer());
2264 GV
->setInitializer(Val
);
2268 ConstantExpr
*CE
= cast
<ConstantExpr
>(Addr
);
2269 GlobalVariable
*GV
= cast
<GlobalVariable
>(CE
->getOperand(0));
2270 GV
->setInitializer(EvaluateStoreInto(GV
->getInitializer(), Val
, CE
, 2));
2273 /// ComputeLoadResult - Return the value that would be computed by a load from
2274 /// P after the stores reflected by 'memory' have been performed. If we can't
2275 /// decide, return null.
2276 static Constant
*ComputeLoadResult(Constant
*P
,
2277 const DenseMap
<Constant
*, Constant
*> &Memory
) {
2278 // If this memory location has been recently stored, use the stored value: it
2279 // is the most up-to-date.
2280 DenseMap
<Constant
*, Constant
*>::const_iterator I
= Memory
.find(P
);
2281 if (I
!= Memory
.end()) return I
->second
;
2284 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(P
)) {
2285 if (GV
->hasDefinitiveInitializer())
2286 return GV
->getInitializer();
2290 // Handle a constantexpr getelementptr.
2291 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(P
))
2292 if (CE
->getOpcode() == Instruction::GetElementPtr
&&
2293 isa
<GlobalVariable
>(CE
->getOperand(0))) {
2294 GlobalVariable
*GV
= cast
<GlobalVariable
>(CE
->getOperand(0));
2295 if (GV
->hasDefinitiveInitializer())
2296 return ConstantFoldLoadThroughGEPConstantExpr(GV
->getInitializer(), CE
);
2299 return 0; // don't know how to evaluate.
2302 /// EvaluateFunction - Evaluate a call to function F, returning true if
2303 /// successful, false if we can't evaluate it. ActualArgs contains the formal
2304 /// arguments for the function.
2305 static bool EvaluateFunction(Function
*F
, Constant
*&RetVal
,
2306 const SmallVectorImpl
<Constant
*> &ActualArgs
,
2307 std::vector
<Function
*> &CallStack
,
2308 DenseMap
<Constant
*, Constant
*> &MutatedMemory
,
2309 std::vector
<GlobalVariable
*> &AllocaTmps
,
2310 SmallPtrSet
<Constant
*, 8> &SimpleConstants
,
2311 const TargetData
*TD
) {
2312 // Check to see if this function is already executing (recursion). If so,
2313 // bail out. TODO: we might want to accept limited recursion.
2314 if (std::find(CallStack
.begin(), CallStack
.end(), F
) != CallStack
.end())
2317 CallStack
.push_back(F
);
2319 /// Values - As we compute SSA register values, we store their contents here.
2320 DenseMap
<Value
*, Constant
*> Values
;
2322 // Initialize arguments to the incoming values specified.
2324 for (Function::arg_iterator AI
= F
->arg_begin(), E
= F
->arg_end(); AI
!= E
;
2326 Values
[AI
] = ActualArgs
[ArgNo
];
2328 /// ExecutedBlocks - We only handle non-looping, non-recursive code. As such,
2329 /// we can only evaluate any one basic block at most once. This set keeps
2330 /// track of what we have executed so we can detect recursive cases etc.
2331 SmallPtrSet
<BasicBlock
*, 32> ExecutedBlocks
;
2333 // CurInst - The current instruction we're evaluating.
2334 BasicBlock::iterator CurInst
= F
->begin()->begin();
2336 // This is the main evaluation loop.
2338 Constant
*InstResult
= 0;
2340 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(CurInst
)) {
2341 if (SI
->isVolatile()) return false; // no volatile accesses.
2342 Constant
*Ptr
= getVal(Values
, SI
->getOperand(1));
2343 if (!isSimpleEnoughPointerToCommit(Ptr
))
2344 // If this is too complex for us to commit, reject it.
2347 Constant
*Val
= getVal(Values
, SI
->getOperand(0));
2349 // If this might be too difficult for the backend to handle (e.g. the addr
2350 // of one global variable divided by another) then we can't commit it.
2351 if (!isSimpleEnoughValueToCommit(Val
, SimpleConstants
))
2354 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
2355 if (CE
->getOpcode() == Instruction::BitCast
) {
2356 // If we're evaluating a store through a bitcast, then we need
2357 // to pull the bitcast off the pointer type and push it onto the
2359 Ptr
= CE
->getOperand(0);
2361 const Type
*NewTy
=cast
<PointerType
>(Ptr
->getType())->getElementType();
2363 // In order to push the bitcast onto the stored value, a bitcast
2364 // from NewTy to Val's type must be legal. If it's not, we can try
2365 // introspecting NewTy to find a legal conversion.
2366 while (!Val
->getType()->canLosslesslyBitCastTo(NewTy
)) {
2367 // If NewTy is a struct, we can convert the pointer to the struct
2368 // into a pointer to its first member.
2369 // FIXME: This could be extended to support arrays as well.
2370 if (const StructType
*STy
= dyn_cast
<StructType
>(NewTy
)) {
2371 NewTy
= STy
->getTypeAtIndex(0U);
2373 const IntegerType
*IdxTy
=IntegerType::get(NewTy
->getContext(), 32);
2374 Constant
*IdxZero
= ConstantInt::get(IdxTy
, 0, false);
2375 Constant
* const IdxList
[] = {IdxZero
, IdxZero
};
2377 Ptr
= ConstantExpr::getGetElementPtr(Ptr
, IdxList
, 2);
2379 // If we can't improve the situation by introspecting NewTy,
2380 // we have to give up.
2386 // If we found compatible types, go ahead and push the bitcast
2387 // onto the stored value.
2388 Val
= ConstantExpr::getBitCast(Val
, NewTy
);
2391 MutatedMemory
[Ptr
] = Val
;
2392 } else if (BinaryOperator
*BO
= dyn_cast
<BinaryOperator
>(CurInst
)) {
2393 InstResult
= ConstantExpr::get(BO
->getOpcode(),
2394 getVal(Values
, BO
->getOperand(0)),
2395 getVal(Values
, BO
->getOperand(1)));
2396 } else if (CmpInst
*CI
= dyn_cast
<CmpInst
>(CurInst
)) {
2397 InstResult
= ConstantExpr::getCompare(CI
->getPredicate(),
2398 getVal(Values
, CI
->getOperand(0)),
2399 getVal(Values
, CI
->getOperand(1)));
2400 } else if (CastInst
*CI
= dyn_cast
<CastInst
>(CurInst
)) {
2401 InstResult
= ConstantExpr::getCast(CI
->getOpcode(),
2402 getVal(Values
, CI
->getOperand(0)),
2404 } else if (SelectInst
*SI
= dyn_cast
<SelectInst
>(CurInst
)) {
2405 InstResult
= ConstantExpr::getSelect(getVal(Values
, SI
->getOperand(0)),
2406 getVal(Values
, SI
->getOperand(1)),
2407 getVal(Values
, SI
->getOperand(2)));
2408 } else if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(CurInst
)) {
2409 Constant
*P
= getVal(Values
, GEP
->getOperand(0));
2410 SmallVector
<Constant
*, 8> GEPOps
;
2411 for (User::op_iterator i
= GEP
->op_begin() + 1, e
= GEP
->op_end();
2413 GEPOps
.push_back(getVal(Values
, *i
));
2414 InstResult
= cast
<GEPOperator
>(GEP
)->isInBounds() ?
2415 ConstantExpr::getInBoundsGetElementPtr(P
, &GEPOps
[0], GEPOps
.size()) :
2416 ConstantExpr::getGetElementPtr(P
, &GEPOps
[0], GEPOps
.size());
2417 } else if (LoadInst
*LI
= dyn_cast
<LoadInst
>(CurInst
)) {
2418 if (LI
->isVolatile()) return false; // no volatile accesses.
2419 InstResult
= ComputeLoadResult(getVal(Values
, LI
->getOperand(0)),
2421 if (InstResult
== 0) return false; // Could not evaluate load.
2422 } else if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(CurInst
)) {
2423 if (AI
->isArrayAllocation()) return false; // Cannot handle array allocs.
2424 const Type
*Ty
= AI
->getType()->getElementType();
2425 AllocaTmps
.push_back(new GlobalVariable(Ty
, false,
2426 GlobalValue::InternalLinkage
,
2427 UndefValue::get(Ty
),
2429 InstResult
= AllocaTmps
.back();
2430 } else if (CallInst
*CI
= dyn_cast
<CallInst
>(CurInst
)) {
2432 // Debug info can safely be ignored here.
2433 if (isa
<DbgInfoIntrinsic
>(CI
)) {
2438 // Cannot handle inline asm.
2439 if (isa
<InlineAsm
>(CI
->getCalledValue())) return false;
2441 if (MemSetInst
*MSI
= dyn_cast
<MemSetInst
>(CI
)) {
2442 if (MSI
->isVolatile()) return false;
2443 Constant
*Ptr
= getVal(Values
, MSI
->getDest());
2444 Constant
*Val
= getVal(Values
, MSI
->getValue());
2445 Constant
*DestVal
= ComputeLoadResult(getVal(Values
, Ptr
),
2447 if (Val
->isNullValue() && DestVal
&& DestVal
->isNullValue()) {
2448 // This memset is a no-op.
2455 // Resolve function pointers.
2456 Function
*Callee
= dyn_cast
<Function
>(getVal(Values
,
2457 CI
->getCalledValue()));
2458 if (!Callee
) return false; // Cannot resolve.
2460 SmallVector
<Constant
*, 8> Formals
;
2462 for (User::op_iterator i
= CS
.arg_begin(), e
= CS
.arg_end();
2464 Formals
.push_back(getVal(Values
, *i
));
2466 if (Callee
->isDeclaration()) {
2467 // If this is a function we can constant fold, do it.
2468 if (Constant
*C
= ConstantFoldCall(Callee
, Formals
.data(),
2475 if (Callee
->getFunctionType()->isVarArg())
2479 // Execute the call, if successful, use the return value.
2480 if (!EvaluateFunction(Callee
, RetVal
, Formals
, CallStack
,
2481 MutatedMemory
, AllocaTmps
, SimpleConstants
, TD
))
2483 InstResult
= RetVal
;
2485 } else if (isa
<TerminatorInst
>(CurInst
)) {
2486 BasicBlock
*NewBB
= 0;
2487 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(CurInst
)) {
2488 if (BI
->isUnconditional()) {
2489 NewBB
= BI
->getSuccessor(0);
2492 dyn_cast
<ConstantInt
>(getVal(Values
, BI
->getCondition()));
2493 if (!Cond
) return false; // Cannot determine.
2495 NewBB
= BI
->getSuccessor(!Cond
->getZExtValue());
2497 } else if (SwitchInst
*SI
= dyn_cast
<SwitchInst
>(CurInst
)) {
2499 dyn_cast
<ConstantInt
>(getVal(Values
, SI
->getCondition()));
2500 if (!Val
) return false; // Cannot determine.
2501 NewBB
= SI
->getSuccessor(SI
->findCaseValue(Val
));
2502 } else if (IndirectBrInst
*IBI
= dyn_cast
<IndirectBrInst
>(CurInst
)) {
2503 Value
*Val
= getVal(Values
, IBI
->getAddress())->stripPointerCasts();
2504 if (BlockAddress
*BA
= dyn_cast
<BlockAddress
>(Val
))
2505 NewBB
= BA
->getBasicBlock();
2507 return false; // Cannot determine.
2508 } else if (ReturnInst
*RI
= dyn_cast
<ReturnInst
>(CurInst
)) {
2509 if (RI
->getNumOperands())
2510 RetVal
= getVal(Values
, RI
->getOperand(0));
2512 CallStack
.pop_back(); // return from fn.
2513 return true; // We succeeded at evaluating this ctor!
2515 // invoke, unwind, unreachable.
2516 return false; // Cannot handle this terminator.
2519 // Okay, we succeeded in evaluating this control flow. See if we have
2520 // executed the new block before. If so, we have a looping function,
2521 // which we cannot evaluate in reasonable time.
2522 if (!ExecutedBlocks
.insert(NewBB
))
2523 return false; // looped!
2525 // Okay, we have never been in this block before. Check to see if there
2526 // are any PHI nodes. If so, evaluate them with information about where
2528 BasicBlock
*OldBB
= CurInst
->getParent();
2529 CurInst
= NewBB
->begin();
2531 for (; (PN
= dyn_cast
<PHINode
>(CurInst
)); ++CurInst
)
2532 Values
[PN
] = getVal(Values
, PN
->getIncomingValueForBlock(OldBB
));
2534 // Do NOT increment CurInst. We know that the terminator had no value.
2537 // Did not know how to evaluate this!
2541 if (!CurInst
->use_empty()) {
2542 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(InstResult
))
2543 InstResult
= ConstantFoldConstantExpression(CE
, TD
);
2545 Values
[CurInst
] = InstResult
;
2548 // Advance program counter.
2553 /// EvaluateStaticConstructor - Evaluate static constructors in the function, if
2554 /// we can. Return true if we can, false otherwise.
2555 static bool EvaluateStaticConstructor(Function
*F
, const TargetData
*TD
) {
2556 /// MutatedMemory - For each store we execute, we update this map. Loads
2557 /// check this to get the most up-to-date value. If evaluation is successful,
2558 /// this state is committed to the process.
2559 DenseMap
<Constant
*, Constant
*> MutatedMemory
;
2561 /// AllocaTmps - To 'execute' an alloca, we create a temporary global variable
2562 /// to represent its body. This vector is needed so we can delete the
2563 /// temporary globals when we are done.
2564 std::vector
<GlobalVariable
*> AllocaTmps
;
2566 /// CallStack - This is used to detect recursion. In pathological situations
2567 /// we could hit exponential behavior, but at least there is nothing
2569 std::vector
<Function
*> CallStack
;
2571 /// SimpleConstants - These are constants we have checked and know to be
2572 /// simple enough to live in a static initializer of a global.
2573 SmallPtrSet
<Constant
*, 8> SimpleConstants
;
2575 // Call the function.
2576 Constant
*RetValDummy
;
2577 bool EvalSuccess
= EvaluateFunction(F
, RetValDummy
,
2578 SmallVector
<Constant
*, 0>(), CallStack
,
2579 MutatedMemory
, AllocaTmps
,
2580 SimpleConstants
, TD
);
2583 // We succeeded at evaluation: commit the result.
2584 DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2585 << F
->getName() << "' to " << MutatedMemory
.size()
2587 for (DenseMap
<Constant
*, Constant
*>::iterator I
= MutatedMemory
.begin(),
2588 E
= MutatedMemory
.end(); I
!= E
; ++I
)
2589 CommitValueTo(I
->second
, I
->first
);
2592 // At this point, we are done interpreting. If we created any 'alloca'
2593 // temporaries, release them now.
2594 while (!AllocaTmps
.empty()) {
2595 GlobalVariable
*Tmp
= AllocaTmps
.back();
2596 AllocaTmps
.pop_back();
2598 // If there are still users of the alloca, the program is doing something
2599 // silly, e.g. storing the address of the alloca somewhere and using it
2600 // later. Since this is undefined, we'll just make it be null.
2601 if (!Tmp
->use_empty())
2602 Tmp
->replaceAllUsesWith(Constant::getNullValue(Tmp
->getType()));
2611 /// OptimizeGlobalCtorsList - Simplify and evaluation global ctors if possible.
2612 /// Return true if anything changed.
2613 bool GlobalOpt::OptimizeGlobalCtorsList(GlobalVariable
*&GCL
) {
2614 std::vector
<Function
*> Ctors
= ParseGlobalCtors(GCL
);
2615 bool MadeChange
= false;
2616 if (Ctors
.empty()) return false;
2618 const TargetData
*TD
= getAnalysisIfAvailable
<TargetData
>();
2619 // Loop over global ctors, optimizing them when we can.
2620 for (unsigned i
= 0; i
!= Ctors
.size(); ++i
) {
2621 Function
*F
= Ctors
[i
];
2622 // Found a null terminator in the middle of the list, prune off the rest of
2625 if (i
!= Ctors
.size()-1) {
2632 // We cannot simplify external ctor functions.
2633 if (F
->empty()) continue;
2635 // If we can evaluate the ctor at compile time, do.
2636 if (EvaluateStaticConstructor(F
, TD
)) {
2637 Ctors
.erase(Ctors
.begin()+i
);
2640 ++NumCtorsEvaluated
;
2645 if (!MadeChange
) return false;
2647 GCL
= InstallGlobalCtors(GCL
, Ctors
);
2651 bool GlobalOpt::OptimizeGlobalAliases(Module
&M
) {
2652 bool Changed
= false;
2654 for (Module::alias_iterator I
= M
.alias_begin(), E
= M
.alias_end();
2656 Module::alias_iterator J
= I
++;
2657 // Aliases without names cannot be referenced outside this module.
2658 if (!J
->hasName() && !J
->isDeclaration())
2659 J
->setLinkage(GlobalValue::InternalLinkage
);
2660 // If the aliasee may change at link time, nothing can be done - bail out.
2661 if (J
->mayBeOverridden())
2664 Constant
*Aliasee
= J
->getAliasee();
2665 GlobalValue
*Target
= cast
<GlobalValue
>(Aliasee
->stripPointerCasts());
2666 Target
->removeDeadConstantUsers();
2667 bool hasOneUse
= Target
->hasOneUse() && Aliasee
->hasOneUse();
2669 // Make all users of the alias use the aliasee instead.
2670 if (!J
->use_empty()) {
2671 J
->replaceAllUsesWith(Aliasee
);
2672 ++NumAliasesResolved
;
2676 // If the alias is externally visible, we may still be able to simplify it.
2677 if (!J
->hasLocalLinkage()) {
2678 // If the aliasee has internal linkage, give it the name and linkage
2679 // of the alias, and delete the alias. This turns:
2680 // define internal ... @f(...)
2681 // @a = alias ... @f
2683 // define ... @a(...)
2684 if (!Target
->hasLocalLinkage())
2687 // Do not perform the transform if multiple aliases potentially target the
2688 // aliasee. This check also ensures that it is safe to replace the section
2689 // and other attributes of the aliasee with those of the alias.
2693 // Give the aliasee the name, linkage and other attributes of the alias.
2694 Target
->takeName(J
);
2695 Target
->setLinkage(J
->getLinkage());
2696 Target
->GlobalValue::copyAttributesFrom(J
);
2699 // Delete the alias.
2700 M
.getAliasList().erase(J
);
2701 ++NumAliasesRemoved
;
2708 static Function
*FindCXAAtExit(Module
&M
) {
2709 Function
*Fn
= M
.getFunction("__cxa_atexit");
2714 const FunctionType
*FTy
= Fn
->getFunctionType();
2716 // Checking that the function has the right return type, the right number of
2717 // parameters and that they all have pointer types should be enough.
2718 if (!FTy
->getReturnType()->isIntegerTy() ||
2719 FTy
->getNumParams() != 3 ||
2720 !FTy
->getParamType(0)->isPointerTy() ||
2721 !FTy
->getParamType(1)->isPointerTy() ||
2722 !FTy
->getParamType(2)->isPointerTy())
2728 /// cxxDtorIsEmpty - Returns whether the given function is an empty C++
2729 /// destructor and can therefore be eliminated.
2730 /// Note that we assume that other optimization passes have already simplified
2731 /// the code so we only look for a function with a single basic block, where
2732 /// the only allowed instructions are 'ret' or 'call' to empty C++ dtor.
2733 static bool cxxDtorIsEmpty(const Function
&Fn
,
2734 SmallPtrSet
<const Function
*, 8> &CalledFunctions
) {
2735 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2736 // nounwind, but that doesn't seem worth doing.
2737 if (Fn
.isDeclaration())
2740 if (++Fn
.begin() != Fn
.end())
2743 const BasicBlock
&EntryBlock
= Fn
.getEntryBlock();
2744 for (BasicBlock::const_iterator I
= EntryBlock
.begin(), E
= EntryBlock
.end();
2746 if (const CallInst
*CI
= dyn_cast
<CallInst
>(I
)) {
2747 // Ignore debug intrinsics.
2748 if (isa
<DbgInfoIntrinsic
>(CI
))
2751 const Function
*CalledFn
= CI
->getCalledFunction();
2756 SmallPtrSet
<const Function
*, 8> NewCalledFunctions(CalledFunctions
);
2758 // Don't treat recursive functions as empty.
2759 if (!NewCalledFunctions
.insert(CalledFn
))
2762 if (!cxxDtorIsEmpty(*CalledFn
, NewCalledFunctions
))
2764 } else if (isa
<ReturnInst
>(*I
))
2773 bool GlobalOpt::OptimizeEmptyGlobalCXXDtors(Function
*CXAAtExitFn
) {
2774 /// Itanium C++ ABI p3.3.5:
2776 /// After constructing a global (or local static) object, that will require
2777 /// destruction on exit, a termination function is registered as follows:
2779 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2781 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2782 /// call f(p) when DSO d is unloaded, before all such termination calls
2783 /// registered before this one. It returns zero if registration is
2784 /// successful, nonzero on failure.
2786 // This pass will look for calls to __cxa_atexit where the function is trivial
2788 bool Changed
= false;
2790 for (Function::use_iterator I
= CXAAtExitFn
->use_begin(),
2791 E
= CXAAtExitFn
->use_end(); I
!= E
;) {
2792 // We're only interested in calls. Theoretically, we could handle invoke
2793 // instructions as well, but neither llvm-gcc nor clang generate invokes
2795 CallInst
*CI
= dyn_cast
<CallInst
>(*I
++);
2800 dyn_cast
<Function
>(CI
->getArgOperand(0)->stripPointerCasts());
2804 SmallPtrSet
<const Function
*, 8> CalledFunctions
;
2805 if (!cxxDtorIsEmpty(*DtorFn
, CalledFunctions
))
2808 // Just remove the call.
2809 CI
->replaceAllUsesWith(Constant::getNullValue(CI
->getType()));
2810 CI
->eraseFromParent();
2812 ++NumCXXDtorsRemoved
;
2820 bool GlobalOpt::runOnModule(Module
&M
) {
2821 bool Changed
= false;
2823 // Try to find the llvm.globalctors list.
2824 GlobalVariable
*GlobalCtors
= FindGlobalCtors(M
);
2826 Function
*CXAAtExitFn
= FindCXAAtExit(M
);
2828 bool LocalChange
= true;
2829 while (LocalChange
) {
2830 LocalChange
= false;
2832 // Delete functions that are trivially dead, ccc -> fastcc
2833 LocalChange
|= OptimizeFunctions(M
);
2835 // Optimize global_ctors list.
2837 LocalChange
|= OptimizeGlobalCtorsList(GlobalCtors
);
2839 // Optimize non-address-taken globals.
2840 LocalChange
|= OptimizeGlobalVars(M
);
2842 // Resolve aliases, when possible.
2843 LocalChange
|= OptimizeGlobalAliases(M
);
2845 // Try to remove trivial global destructors.
2847 LocalChange
|= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn
);
2849 Changed
|= LocalChange
;
2852 // TODO: Move all global ctors functions to the end of the module for code