[ASan] Make insertion of version mismatch guard configurable
[llvm-core.git] / lib / Transforms / IPO / GlobalOpt.cpp
blob7adc9168a0b60441311a0c5b9eb39033b6e7153a
1 //===- GlobalOpt.cpp - Optimize Global Variables --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass transforms simple global variables that never have their address
10 // taken. If obviously true, it marks read/write globals as constant, deletes
11 // variables only stored to, etc.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/IPO/GlobalOpt.h"
16 #include "llvm/ADT/DenseMap.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/Twine.h"
22 #include "llvm/ADT/iterator_range.h"
23 #include "llvm/Analysis/BlockFrequencyInfo.h"
24 #include "llvm/Analysis/ConstantFolding.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Analysis/TargetTransformInfo.h"
28 #include "llvm/Transforms/Utils/Local.h"
29 #include "llvm/BinaryFormat/Dwarf.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/BasicBlock.h"
32 #include "llvm/IR/CallSite.h"
33 #include "llvm/IR/CallingConv.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DebugInfoMetadata.h"
38 #include "llvm/IR/DerivedTypes.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/GetElementPtrTypeIterator.h"
42 #include "llvm/IR/GlobalAlias.h"
43 #include "llvm/IR/GlobalValue.h"
44 #include "llvm/IR/GlobalVariable.h"
45 #include "llvm/IR/InstrTypes.h"
46 #include "llvm/IR/Instruction.h"
47 #include "llvm/IR/Instructions.h"
48 #include "llvm/IR/IntrinsicInst.h"
49 #include "llvm/IR/Module.h"
50 #include "llvm/IR/Operator.h"
51 #include "llvm/IR/Type.h"
52 #include "llvm/IR/Use.h"
53 #include "llvm/IR/User.h"
54 #include "llvm/IR/Value.h"
55 #include "llvm/IR/ValueHandle.h"
56 #include "llvm/Pass.h"
57 #include "llvm/Support/AtomicOrdering.h"
58 #include "llvm/Support/Casting.h"
59 #include "llvm/Support/CommandLine.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/MathExtras.h"
63 #include "llvm/Support/raw_ostream.h"
64 #include "llvm/Transforms/IPO.h"
65 #include "llvm/Transforms/Utils/CtorUtils.h"
66 #include "llvm/Transforms/Utils/Evaluator.h"
67 #include "llvm/Transforms/Utils/GlobalStatus.h"
68 #include <cassert>
69 #include <cstdint>
70 #include <utility>
71 #include <vector>
73 using namespace llvm;
75 #define DEBUG_TYPE "globalopt"
77 STATISTIC(NumMarked , "Number of globals marked constant");
78 STATISTIC(NumUnnamed , "Number of globals marked unnamed_addr");
79 STATISTIC(NumSRA , "Number of aggregate globals broken into scalars");
80 STATISTIC(NumHeapSRA , "Number of heap objects SRA'd");
81 STATISTIC(NumSubstitute,"Number of globals with initializers stored into them");
82 STATISTIC(NumDeleted , "Number of globals deleted");
83 STATISTIC(NumGlobUses , "Number of global uses devirtualized");
84 STATISTIC(NumLocalized , "Number of globals localized");
85 STATISTIC(NumShrunkToBool , "Number of global vars shrunk to booleans");
86 STATISTIC(NumFastCallFns , "Number of functions converted to fastcc");
87 STATISTIC(NumCtorsEvaluated, "Number of static ctors evaluated");
88 STATISTIC(NumNestRemoved , "Number of nest attributes removed");
89 STATISTIC(NumAliasesResolved, "Number of global aliases resolved");
90 STATISTIC(NumAliasesRemoved, "Number of global aliases eliminated");
91 STATISTIC(NumCXXDtorsRemoved, "Number of global C++ destructors removed");
92 STATISTIC(NumInternalFunc, "Number of internal functions");
93 STATISTIC(NumColdCC, "Number of functions marked coldcc");
95 static cl::opt<bool>
96 EnableColdCCStressTest("enable-coldcc-stress-test",
97 cl::desc("Enable stress test of coldcc by adding "
98 "calling conv to all internal functions."),
99 cl::init(false), cl::Hidden);
101 static cl::opt<int> ColdCCRelFreq(
102 "coldcc-rel-freq", cl::Hidden, cl::init(2), cl::ZeroOrMore,
103 cl::desc(
104 "Maximum block frequency, expressed as a percentage of caller's "
105 "entry frequency, for a call site to be considered cold for enabling"
106 "coldcc"));
108 /// Is this global variable possibly used by a leak checker as a root? If so,
109 /// we might not really want to eliminate the stores to it.
110 static bool isLeakCheckerRoot(GlobalVariable *GV) {
111 // A global variable is a root if it is a pointer, or could plausibly contain
112 // a pointer. There are two challenges; one is that we could have a struct
113 // the has an inner member which is a pointer. We recurse through the type to
114 // detect these (up to a point). The other is that we may actually be a union
115 // of a pointer and another type, and so our LLVM type is an integer which
116 // gets converted into a pointer, or our type is an [i8 x #] with a pointer
117 // potentially contained here.
119 if (GV->hasPrivateLinkage())
120 return false;
122 SmallVector<Type *, 4> Types;
123 Types.push_back(GV->getValueType());
125 unsigned Limit = 20;
126 do {
127 Type *Ty = Types.pop_back_val();
128 switch (Ty->getTypeID()) {
129 default: break;
130 case Type::PointerTyID: return true;
131 case Type::ArrayTyID:
132 case Type::VectorTyID: {
133 SequentialType *STy = cast<SequentialType>(Ty);
134 Types.push_back(STy->getElementType());
135 break;
137 case Type::StructTyID: {
138 StructType *STy = cast<StructType>(Ty);
139 if (STy->isOpaque()) return true;
140 for (StructType::element_iterator I = STy->element_begin(),
141 E = STy->element_end(); I != E; ++I) {
142 Type *InnerTy = *I;
143 if (isa<PointerType>(InnerTy)) return true;
144 if (isa<CompositeType>(InnerTy))
145 Types.push_back(InnerTy);
147 break;
150 if (--Limit == 0) return true;
151 } while (!Types.empty());
152 return false;
155 /// Given a value that is stored to a global but never read, determine whether
156 /// it's safe to remove the store and the chain of computation that feeds the
157 /// store.
158 static bool IsSafeComputationToRemove(Value *V, const TargetLibraryInfo *TLI) {
159 do {
160 if (isa<Constant>(V))
161 return true;
162 if (!V->hasOneUse())
163 return false;
164 if (isa<LoadInst>(V) || isa<InvokeInst>(V) || isa<Argument>(V) ||
165 isa<GlobalValue>(V))
166 return false;
167 if (isAllocationFn(V, TLI))
168 return true;
170 Instruction *I = cast<Instruction>(V);
171 if (I->mayHaveSideEffects())
172 return false;
173 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
174 if (!GEP->hasAllConstantIndices())
175 return false;
176 } else if (I->getNumOperands() != 1) {
177 return false;
180 V = I->getOperand(0);
181 } while (true);
184 /// This GV is a pointer root. Loop over all users of the global and clean up
185 /// any that obviously don't assign the global a value that isn't dynamically
186 /// allocated.
187 static bool CleanupPointerRootUsers(GlobalVariable *GV,
188 const TargetLibraryInfo *TLI) {
189 // A brief explanation of leak checkers. The goal is to find bugs where
190 // pointers are forgotten, causing an accumulating growth in memory
191 // usage over time. The common strategy for leak checkers is to whitelist the
192 // memory pointed to by globals at exit. This is popular because it also
193 // solves another problem where the main thread of a C++ program may shut down
194 // before other threads that are still expecting to use those globals. To
195 // handle that case, we expect the program may create a singleton and never
196 // destroy it.
198 bool Changed = false;
200 // If Dead[n].first is the only use of a malloc result, we can delete its
201 // chain of computation and the store to the global in Dead[n].second.
202 SmallVector<std::pair<Instruction *, Instruction *>, 32> Dead;
204 // Constants can't be pointers to dynamically allocated memory.
205 for (Value::user_iterator UI = GV->user_begin(), E = GV->user_end();
206 UI != E;) {
207 User *U = *UI++;
208 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
209 Value *V = SI->getValueOperand();
210 if (isa<Constant>(V)) {
211 Changed = true;
212 SI->eraseFromParent();
213 } else if (Instruction *I = dyn_cast<Instruction>(V)) {
214 if (I->hasOneUse())
215 Dead.push_back(std::make_pair(I, SI));
217 } else if (MemSetInst *MSI = dyn_cast<MemSetInst>(U)) {
218 if (isa<Constant>(MSI->getValue())) {
219 Changed = true;
220 MSI->eraseFromParent();
221 } else if (Instruction *I = dyn_cast<Instruction>(MSI->getValue())) {
222 if (I->hasOneUse())
223 Dead.push_back(std::make_pair(I, MSI));
225 } else if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(U)) {
226 GlobalVariable *MemSrc = dyn_cast<GlobalVariable>(MTI->getSource());
227 if (MemSrc && MemSrc->isConstant()) {
228 Changed = true;
229 MTI->eraseFromParent();
230 } else if (Instruction *I = dyn_cast<Instruction>(MemSrc)) {
231 if (I->hasOneUse())
232 Dead.push_back(std::make_pair(I, MTI));
234 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
235 if (CE->use_empty()) {
236 CE->destroyConstant();
237 Changed = true;
239 } else if (Constant *C = dyn_cast<Constant>(U)) {
240 if (isSafeToDestroyConstant(C)) {
241 C->destroyConstant();
242 // This could have invalidated UI, start over from scratch.
243 Dead.clear();
244 CleanupPointerRootUsers(GV, TLI);
245 return true;
250 for (int i = 0, e = Dead.size(); i != e; ++i) {
251 if (IsSafeComputationToRemove(Dead[i].first, TLI)) {
252 Dead[i].second->eraseFromParent();
253 Instruction *I = Dead[i].first;
254 do {
255 if (isAllocationFn(I, TLI))
256 break;
257 Instruction *J = dyn_cast<Instruction>(I->getOperand(0));
258 if (!J)
259 break;
260 I->eraseFromParent();
261 I = J;
262 } while (true);
263 I->eraseFromParent();
267 return Changed;
270 /// We just marked GV constant. Loop over all users of the global, cleaning up
271 /// the obvious ones. This is largely just a quick scan over the use list to
272 /// clean up the easy and obvious cruft. This returns true if it made a change.
273 static bool CleanupConstantGlobalUsers(Value *V, Constant *Init,
274 const DataLayout &DL,
275 TargetLibraryInfo *TLI) {
276 bool Changed = false;
277 // Note that we need to use a weak value handle for the worklist items. When
278 // we delete a constant array, we may also be holding pointer to one of its
279 // elements (or an element of one of its elements if we're dealing with an
280 // array of arrays) in the worklist.
281 SmallVector<WeakTrackingVH, 8> WorkList(V->user_begin(), V->user_end());
282 while (!WorkList.empty()) {
283 Value *UV = WorkList.pop_back_val();
284 if (!UV)
285 continue;
287 User *U = cast<User>(UV);
289 if (LoadInst *LI = dyn_cast<LoadInst>(U)) {
290 if (Init) {
291 // Replace the load with the initializer.
292 LI->replaceAllUsesWith(Init);
293 LI->eraseFromParent();
294 Changed = true;
296 } else if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
297 // Store must be unreachable or storing Init into the global.
298 SI->eraseFromParent();
299 Changed = true;
300 } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U)) {
301 if (CE->getOpcode() == Instruction::GetElementPtr) {
302 Constant *SubInit = nullptr;
303 if (Init)
304 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
305 Changed |= CleanupConstantGlobalUsers(CE, SubInit, DL, TLI);
306 } else if ((CE->getOpcode() == Instruction::BitCast &&
307 CE->getType()->isPointerTy()) ||
308 CE->getOpcode() == Instruction::AddrSpaceCast) {
309 // Pointer cast, delete any stores and memsets to the global.
310 Changed |= CleanupConstantGlobalUsers(CE, nullptr, DL, TLI);
313 if (CE->use_empty()) {
314 CE->destroyConstant();
315 Changed = true;
317 } else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(U)) {
318 // Do not transform "gepinst (gep constexpr (GV))" here, because forming
319 // "gepconstexpr (gep constexpr (GV))" will cause the two gep's to fold
320 // and will invalidate our notion of what Init is.
321 Constant *SubInit = nullptr;
322 if (!isa<ConstantExpr>(GEP->getOperand(0))) {
323 ConstantExpr *CE = dyn_cast_or_null<ConstantExpr>(
324 ConstantFoldInstruction(GEP, DL, TLI));
325 if (Init && CE && CE->getOpcode() == Instruction::GetElementPtr)
326 SubInit = ConstantFoldLoadThroughGEPConstantExpr(Init, CE);
328 // If the initializer is an all-null value and we have an inbounds GEP,
329 // we already know what the result of any load from that GEP is.
330 // TODO: Handle splats.
331 if (Init && isa<ConstantAggregateZero>(Init) && GEP->isInBounds())
332 SubInit = Constant::getNullValue(GEP->getResultElementType());
334 Changed |= CleanupConstantGlobalUsers(GEP, SubInit, DL, TLI);
336 if (GEP->use_empty()) {
337 GEP->eraseFromParent();
338 Changed = true;
340 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(U)) { // memset/cpy/mv
341 if (MI->getRawDest() == V) {
342 MI->eraseFromParent();
343 Changed = true;
346 } else if (Constant *C = dyn_cast<Constant>(U)) {
347 // If we have a chain of dead constantexprs or other things dangling from
348 // us, and if they are all dead, nuke them without remorse.
349 if (isSafeToDestroyConstant(C)) {
350 C->destroyConstant();
351 CleanupConstantGlobalUsers(V, Init, DL, TLI);
352 return true;
356 return Changed;
359 static bool isSafeSROAElementUse(Value *V);
361 /// Return true if the specified GEP is a safe user of a derived
362 /// expression from a global that we want to SROA.
363 static bool isSafeSROAGEP(User *U) {
364 // Check to see if this ConstantExpr GEP is SRA'able. In particular, we
365 // don't like < 3 operand CE's, and we don't like non-constant integer
366 // indices. This enforces that all uses are 'gep GV, 0, C, ...' for some
367 // value of C.
368 if (U->getNumOperands() < 3 || !isa<Constant>(U->getOperand(1)) ||
369 !cast<Constant>(U->getOperand(1))->isNullValue())
370 return false;
372 gep_type_iterator GEPI = gep_type_begin(U), E = gep_type_end(U);
373 ++GEPI; // Skip over the pointer index.
375 // For all other level we require that the indices are constant and inrange.
376 // In particular, consider: A[0][i]. We cannot know that the user isn't doing
377 // invalid things like allowing i to index an out-of-range subscript that
378 // accesses A[1]. This can also happen between different members of a struct
379 // in llvm IR.
380 for (; GEPI != E; ++GEPI) {
381 if (GEPI.isStruct())
382 continue;
384 ConstantInt *IdxVal = dyn_cast<ConstantInt>(GEPI.getOperand());
385 if (!IdxVal || (GEPI.isBoundedSequential() &&
386 IdxVal->getZExtValue() >= GEPI.getSequentialNumElements()))
387 return false;
390 return llvm::all_of(U->users(),
391 [](User *UU) { return isSafeSROAElementUse(UU); });
394 /// Return true if the specified instruction is a safe user of a derived
395 /// expression from a global that we want to SROA.
396 static bool isSafeSROAElementUse(Value *V) {
397 // We might have a dead and dangling constant hanging off of here.
398 if (Constant *C = dyn_cast<Constant>(V))
399 return isSafeToDestroyConstant(C);
401 Instruction *I = dyn_cast<Instruction>(V);
402 if (!I) return false;
404 // Loads are ok.
405 if (isa<LoadInst>(I)) return true;
407 // Stores *to* the pointer are ok.
408 if (StoreInst *SI = dyn_cast<StoreInst>(I))
409 return SI->getOperand(0) != V;
411 // Otherwise, it must be a GEP. Check it and its users are safe to SRA.
412 return isa<GetElementPtrInst>(I) && isSafeSROAGEP(I);
415 /// Look at all uses of the global and decide whether it is safe for us to
416 /// perform this transformation.
417 static bool GlobalUsersSafeToSRA(GlobalValue *GV) {
418 for (User *U : GV->users()) {
419 // The user of the global must be a GEP Inst or a ConstantExpr GEP.
420 if (!isa<GetElementPtrInst>(U) &&
421 (!isa<ConstantExpr>(U) ||
422 cast<ConstantExpr>(U)->getOpcode() != Instruction::GetElementPtr))
423 return false;
425 // Check the gep and it's users are safe to SRA
426 if (!isSafeSROAGEP(U))
427 return false;
430 return true;
433 /// Copy over the debug info for a variable to its SRA replacements.
434 static void transferSRADebugInfo(GlobalVariable *GV, GlobalVariable *NGV,
435 uint64_t FragmentOffsetInBits,
436 uint64_t FragmentSizeInBits,
437 unsigned NumElements) {
438 SmallVector<DIGlobalVariableExpression *, 1> GVs;
439 GV->getDebugInfo(GVs);
440 for (auto *GVE : GVs) {
441 DIVariable *Var = GVE->getVariable();
442 DIExpression *Expr = GVE->getExpression();
443 if (NumElements > 1) {
444 if (auto E = DIExpression::createFragmentExpression(
445 Expr, FragmentOffsetInBits, FragmentSizeInBits))
446 Expr = *E;
447 else
448 return;
450 auto *NGVE = DIGlobalVariableExpression::get(GVE->getContext(), Var, Expr);
451 NGV->addDebugInfo(NGVE);
455 /// Perform scalar replacement of aggregates on the specified global variable.
456 /// This opens the door for other optimizations by exposing the behavior of the
457 /// program in a more fine-grained way. We have determined that this
458 /// transformation is safe already. We return the first global variable we
459 /// insert so that the caller can reprocess it.
460 static GlobalVariable *SRAGlobal(GlobalVariable *GV, const DataLayout &DL) {
461 // Make sure this global only has simple uses that we can SRA.
462 if (!GlobalUsersSafeToSRA(GV))
463 return nullptr;
465 assert(GV->hasLocalLinkage());
466 Constant *Init = GV->getInitializer();
467 Type *Ty = Init->getType();
469 std::vector<GlobalVariable *> NewGlobals;
470 Module::GlobalListType &Globals = GV->getParent()->getGlobalList();
472 // Get the alignment of the global, either explicit or target-specific.
473 unsigned StartAlignment = GV->getAlignment();
474 if (StartAlignment == 0)
475 StartAlignment = DL.getABITypeAlignment(GV->getType());
477 if (StructType *STy = dyn_cast<StructType>(Ty)) {
478 unsigned NumElements = STy->getNumElements();
479 NewGlobals.reserve(NumElements);
480 const StructLayout &Layout = *DL.getStructLayout(STy);
481 for (unsigned i = 0, e = NumElements; i != e; ++i) {
482 Constant *In = Init->getAggregateElement(i);
483 assert(In && "Couldn't get element of initializer?");
484 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(i), false,
485 GlobalVariable::InternalLinkage,
486 In, GV->getName()+"."+Twine(i),
487 GV->getThreadLocalMode(),
488 GV->getType()->getAddressSpace());
489 NGV->setExternallyInitialized(GV->isExternallyInitialized());
490 NGV->copyAttributesFrom(GV);
491 Globals.push_back(NGV);
492 NewGlobals.push_back(NGV);
494 // Calculate the known alignment of the field. If the original aggregate
495 // had 256 byte alignment for example, something might depend on that:
496 // propagate info to each field.
497 uint64_t FieldOffset = Layout.getElementOffset(i);
498 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, FieldOffset);
499 if (NewAlign > DL.getABITypeAlignment(STy->getElementType(i)))
500 NGV->setAlignment(NewAlign);
502 // Copy over the debug info for the variable.
503 uint64_t Size = DL.getTypeAllocSizeInBits(NGV->getValueType());
504 uint64_t FragmentOffsetInBits = Layout.getElementOffsetInBits(i);
505 transferSRADebugInfo(GV, NGV, FragmentOffsetInBits, Size, NumElements);
507 } else if (SequentialType *STy = dyn_cast<SequentialType>(Ty)) {
508 unsigned NumElements = STy->getNumElements();
509 if (NumElements > 16 && GV->hasNUsesOrMore(16))
510 return nullptr; // It's not worth it.
511 NewGlobals.reserve(NumElements);
512 auto ElTy = STy->getElementType();
513 uint64_t EltSize = DL.getTypeAllocSize(ElTy);
514 unsigned EltAlign = DL.getABITypeAlignment(ElTy);
515 uint64_t FragmentSizeInBits = DL.getTypeAllocSizeInBits(ElTy);
516 for (unsigned i = 0, e = NumElements; i != e; ++i) {
517 Constant *In = Init->getAggregateElement(i);
518 assert(In && "Couldn't get element of initializer?");
520 GlobalVariable *NGV = new GlobalVariable(STy->getElementType(), false,
521 GlobalVariable::InternalLinkage,
522 In, GV->getName()+"."+Twine(i),
523 GV->getThreadLocalMode(),
524 GV->getType()->getAddressSpace());
525 NGV->setExternallyInitialized(GV->isExternallyInitialized());
526 NGV->copyAttributesFrom(GV);
527 Globals.push_back(NGV);
528 NewGlobals.push_back(NGV);
530 // Calculate the known alignment of the field. If the original aggregate
531 // had 256 byte alignment for example, something might depend on that:
532 // propagate info to each field.
533 unsigned NewAlign = (unsigned)MinAlign(StartAlignment, EltSize*i);
534 if (NewAlign > EltAlign)
535 NGV->setAlignment(NewAlign);
536 transferSRADebugInfo(GV, NGV, FragmentSizeInBits * i, FragmentSizeInBits,
537 NumElements);
541 if (NewGlobals.empty())
542 return nullptr;
544 LLVM_DEBUG(dbgs() << "PERFORMING GLOBAL SRA ON: " << *GV << "\n");
546 Constant *NullInt =Constant::getNullValue(Type::getInt32Ty(GV->getContext()));
548 // Loop over all of the uses of the global, replacing the constantexpr geps,
549 // with smaller constantexpr geps or direct references.
550 while (!GV->use_empty()) {
551 User *GEP = GV->user_back();
552 assert(((isa<ConstantExpr>(GEP) &&
553 cast<ConstantExpr>(GEP)->getOpcode()==Instruction::GetElementPtr)||
554 isa<GetElementPtrInst>(GEP)) && "NonGEP CE's are not SRAable!");
556 // Ignore the 1th operand, which has to be zero or else the program is quite
557 // broken (undefined). Get the 2nd operand, which is the structure or array
558 // index.
559 unsigned Val = cast<ConstantInt>(GEP->getOperand(2))->getZExtValue();
560 if (Val >= NewGlobals.size()) Val = 0; // Out of bound array access.
562 Value *NewPtr = NewGlobals[Val];
563 Type *NewTy = NewGlobals[Val]->getValueType();
565 // Form a shorter GEP if needed.
566 if (GEP->getNumOperands() > 3) {
567 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(GEP)) {
568 SmallVector<Constant*, 8> Idxs;
569 Idxs.push_back(NullInt);
570 for (unsigned i = 3, e = CE->getNumOperands(); i != e; ++i)
571 Idxs.push_back(CE->getOperand(i));
572 NewPtr =
573 ConstantExpr::getGetElementPtr(NewTy, cast<Constant>(NewPtr), Idxs);
574 } else {
575 GetElementPtrInst *GEPI = cast<GetElementPtrInst>(GEP);
576 SmallVector<Value*, 8> Idxs;
577 Idxs.push_back(NullInt);
578 for (unsigned i = 3, e = GEPI->getNumOperands(); i != e; ++i)
579 Idxs.push_back(GEPI->getOperand(i));
580 NewPtr = GetElementPtrInst::Create(
581 NewTy, NewPtr, Idxs, GEPI->getName() + "." + Twine(Val), GEPI);
584 GEP->replaceAllUsesWith(NewPtr);
586 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(GEP))
587 GEPI->eraseFromParent();
588 else
589 cast<ConstantExpr>(GEP)->destroyConstant();
592 // Delete the old global, now that it is dead.
593 Globals.erase(GV);
594 ++NumSRA;
596 // Loop over the new globals array deleting any globals that are obviously
597 // dead. This can arise due to scalarization of a structure or an array that
598 // has elements that are dead.
599 unsigned FirstGlobal = 0;
600 for (unsigned i = 0, e = NewGlobals.size(); i != e; ++i)
601 if (NewGlobals[i]->use_empty()) {
602 Globals.erase(NewGlobals[i]);
603 if (FirstGlobal == i) ++FirstGlobal;
606 return FirstGlobal != NewGlobals.size() ? NewGlobals[FirstGlobal] : nullptr;
609 /// Return true if all users of the specified value will trap if the value is
610 /// dynamically null. PHIs keeps track of any phi nodes we've seen to avoid
611 /// reprocessing them.
612 static bool AllUsesOfValueWillTrapIfNull(const Value *V,
613 SmallPtrSetImpl<const PHINode*> &PHIs) {
614 for (const User *U : V->users()) {
615 if (const Instruction *I = dyn_cast<Instruction>(U)) {
616 // If null pointer is considered valid, then all uses are non-trapping.
617 // Non address-space 0 globals have already been pruned by the caller.
618 if (NullPointerIsDefined(I->getFunction()))
619 return false;
621 if (isa<LoadInst>(U)) {
622 // Will trap.
623 } else if (const StoreInst *SI = dyn_cast<StoreInst>(U)) {
624 if (SI->getOperand(0) == V) {
625 //cerr << "NONTRAPPING USE: " << *U;
626 return false; // Storing the value.
628 } else if (const CallInst *CI = dyn_cast<CallInst>(U)) {
629 if (CI->getCalledValue() != V) {
630 //cerr << "NONTRAPPING USE: " << *U;
631 return false; // Not calling the ptr
633 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(U)) {
634 if (II->getCalledValue() != V) {
635 //cerr << "NONTRAPPING USE: " << *U;
636 return false; // Not calling the ptr
638 } else if (const BitCastInst *CI = dyn_cast<BitCastInst>(U)) {
639 if (!AllUsesOfValueWillTrapIfNull(CI, PHIs)) return false;
640 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
641 if (!AllUsesOfValueWillTrapIfNull(GEPI, PHIs)) return false;
642 } else if (const PHINode *PN = dyn_cast<PHINode>(U)) {
643 // If we've already seen this phi node, ignore it, it has already been
644 // checked.
645 if (PHIs.insert(PN).second && !AllUsesOfValueWillTrapIfNull(PN, PHIs))
646 return false;
647 } else if (isa<ICmpInst>(U) &&
648 isa<ConstantPointerNull>(U->getOperand(1))) {
649 // Ignore icmp X, null
650 } else {
651 //cerr << "NONTRAPPING USE: " << *U;
652 return false;
655 return true;
658 /// Return true if all uses of any loads from GV will trap if the loaded value
659 /// is null. Note that this also permits comparisons of the loaded value
660 /// against null, as a special case.
661 static bool AllUsesOfLoadedValueWillTrapIfNull(const GlobalVariable *GV) {
662 for (const User *U : GV->users())
663 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
664 SmallPtrSet<const PHINode*, 8> PHIs;
665 if (!AllUsesOfValueWillTrapIfNull(LI, PHIs))
666 return false;
667 } else if (isa<StoreInst>(U)) {
668 // Ignore stores to the global.
669 } else {
670 // We don't know or understand this user, bail out.
671 //cerr << "UNKNOWN USER OF GLOBAL!: " << *U;
672 return false;
674 return true;
677 static bool OptimizeAwayTrappingUsesOfValue(Value *V, Constant *NewV) {
678 bool Changed = false;
679 for (auto UI = V->user_begin(), E = V->user_end(); UI != E; ) {
680 Instruction *I = cast<Instruction>(*UI++);
681 // Uses are non-trapping if null pointer is considered valid.
682 // Non address-space 0 globals are already pruned by the caller.
683 if (NullPointerIsDefined(I->getFunction()))
684 return false;
685 if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
686 LI->setOperand(0, NewV);
687 Changed = true;
688 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
689 if (SI->getOperand(1) == V) {
690 SI->setOperand(1, NewV);
691 Changed = true;
693 } else if (isa<CallInst>(I) || isa<InvokeInst>(I)) {
694 CallSite CS(I);
695 if (CS.getCalledValue() == V) {
696 // Calling through the pointer! Turn into a direct call, but be careful
697 // that the pointer is not also being passed as an argument.
698 CS.setCalledFunction(NewV);
699 Changed = true;
700 bool PassedAsArg = false;
701 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
702 if (CS.getArgument(i) == V) {
703 PassedAsArg = true;
704 CS.setArgument(i, NewV);
707 if (PassedAsArg) {
708 // Being passed as an argument also. Be careful to not invalidate UI!
709 UI = V->user_begin();
712 } else if (CastInst *CI = dyn_cast<CastInst>(I)) {
713 Changed |= OptimizeAwayTrappingUsesOfValue(CI,
714 ConstantExpr::getCast(CI->getOpcode(),
715 NewV, CI->getType()));
716 if (CI->use_empty()) {
717 Changed = true;
718 CI->eraseFromParent();
720 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(I)) {
721 // Should handle GEP here.
722 SmallVector<Constant*, 8> Idxs;
723 Idxs.reserve(GEPI->getNumOperands()-1);
724 for (User::op_iterator i = GEPI->op_begin() + 1, e = GEPI->op_end();
725 i != e; ++i)
726 if (Constant *C = dyn_cast<Constant>(*i))
727 Idxs.push_back(C);
728 else
729 break;
730 if (Idxs.size() == GEPI->getNumOperands()-1)
731 Changed |= OptimizeAwayTrappingUsesOfValue(
732 GEPI, ConstantExpr::getGetElementPtr(GEPI->getSourceElementType(),
733 NewV, Idxs));
734 if (GEPI->use_empty()) {
735 Changed = true;
736 GEPI->eraseFromParent();
741 return Changed;
744 /// The specified global has only one non-null value stored into it. If there
745 /// are uses of the loaded value that would trap if the loaded value is
746 /// dynamically null, then we know that they cannot be reachable with a null
747 /// optimize away the load.
748 static bool OptimizeAwayTrappingUsesOfLoads(GlobalVariable *GV, Constant *LV,
749 const DataLayout &DL,
750 TargetLibraryInfo *TLI) {
751 bool Changed = false;
753 // Keep track of whether we are able to remove all the uses of the global
754 // other than the store that defines it.
755 bool AllNonStoreUsesGone = true;
757 // Replace all uses of loads with uses of uses of the stored value.
758 for (Value::user_iterator GUI = GV->user_begin(), E = GV->user_end(); GUI != E;){
759 User *GlobalUser = *GUI++;
760 if (LoadInst *LI = dyn_cast<LoadInst>(GlobalUser)) {
761 Changed |= OptimizeAwayTrappingUsesOfValue(LI, LV);
762 // If we were able to delete all uses of the loads
763 if (LI->use_empty()) {
764 LI->eraseFromParent();
765 Changed = true;
766 } else {
767 AllNonStoreUsesGone = false;
769 } else if (isa<StoreInst>(GlobalUser)) {
770 // Ignore the store that stores "LV" to the global.
771 assert(GlobalUser->getOperand(1) == GV &&
772 "Must be storing *to* the global");
773 } else {
774 AllNonStoreUsesGone = false;
776 // If we get here we could have other crazy uses that are transitively
777 // loaded.
778 assert((isa<PHINode>(GlobalUser) || isa<SelectInst>(GlobalUser) ||
779 isa<ConstantExpr>(GlobalUser) || isa<CmpInst>(GlobalUser) ||
780 isa<BitCastInst>(GlobalUser) ||
781 isa<GetElementPtrInst>(GlobalUser)) &&
782 "Only expect load and stores!");
786 if (Changed) {
787 LLVM_DEBUG(dbgs() << "OPTIMIZED LOADS FROM STORED ONCE POINTER: " << *GV
788 << "\n");
789 ++NumGlobUses;
792 // If we nuked all of the loads, then none of the stores are needed either,
793 // nor is the global.
794 if (AllNonStoreUsesGone) {
795 if (isLeakCheckerRoot(GV)) {
796 Changed |= CleanupPointerRootUsers(GV, TLI);
797 } else {
798 Changed = true;
799 CleanupConstantGlobalUsers(GV, nullptr, DL, TLI);
801 if (GV->use_empty()) {
802 LLVM_DEBUG(dbgs() << " *** GLOBAL NOW DEAD!\n");
803 Changed = true;
804 GV->eraseFromParent();
805 ++NumDeleted;
808 return Changed;
811 /// Walk the use list of V, constant folding all of the instructions that are
812 /// foldable.
813 static void ConstantPropUsersOf(Value *V, const DataLayout &DL,
814 TargetLibraryInfo *TLI) {
815 for (Value::user_iterator UI = V->user_begin(), E = V->user_end(); UI != E; )
816 if (Instruction *I = dyn_cast<Instruction>(*UI++))
817 if (Constant *NewC = ConstantFoldInstruction(I, DL, TLI)) {
818 I->replaceAllUsesWith(NewC);
820 // Advance UI to the next non-I use to avoid invalidating it!
821 // Instructions could multiply use V.
822 while (UI != E && *UI == I)
823 ++UI;
824 if (isInstructionTriviallyDead(I, TLI))
825 I->eraseFromParent();
829 /// This function takes the specified global variable, and transforms the
830 /// program as if it always contained the result of the specified malloc.
831 /// Because it is always the result of the specified malloc, there is no reason
832 /// to actually DO the malloc. Instead, turn the malloc into a global, and any
833 /// loads of GV as uses of the new global.
834 static GlobalVariable *
835 OptimizeGlobalAddressOfMalloc(GlobalVariable *GV, CallInst *CI, Type *AllocTy,
836 ConstantInt *NElements, const DataLayout &DL,
837 TargetLibraryInfo *TLI) {
838 LLVM_DEBUG(errs() << "PROMOTING GLOBAL: " << *GV << " CALL = " << *CI
839 << '\n');
841 Type *GlobalType;
842 if (NElements->getZExtValue() == 1)
843 GlobalType = AllocTy;
844 else
845 // If we have an array allocation, the global variable is of an array.
846 GlobalType = ArrayType::get(AllocTy, NElements->getZExtValue());
848 // Create the new global variable. The contents of the malloc'd memory is
849 // undefined, so initialize with an undef value.
850 GlobalVariable *NewGV = new GlobalVariable(
851 *GV->getParent(), GlobalType, false, GlobalValue::InternalLinkage,
852 UndefValue::get(GlobalType), GV->getName() + ".body", nullptr,
853 GV->getThreadLocalMode());
855 // If there are bitcast users of the malloc (which is typical, usually we have
856 // a malloc + bitcast) then replace them with uses of the new global. Update
857 // other users to use the global as well.
858 BitCastInst *TheBC = nullptr;
859 while (!CI->use_empty()) {
860 Instruction *User = cast<Instruction>(CI->user_back());
861 if (BitCastInst *BCI = dyn_cast<BitCastInst>(User)) {
862 if (BCI->getType() == NewGV->getType()) {
863 BCI->replaceAllUsesWith(NewGV);
864 BCI->eraseFromParent();
865 } else {
866 BCI->setOperand(0, NewGV);
868 } else {
869 if (!TheBC)
870 TheBC = new BitCastInst(NewGV, CI->getType(), "newgv", CI);
871 User->replaceUsesOfWith(CI, TheBC);
875 Constant *RepValue = NewGV;
876 if (NewGV->getType() != GV->getValueType())
877 RepValue = ConstantExpr::getBitCast(RepValue, GV->getValueType());
879 // If there is a comparison against null, we will insert a global bool to
880 // keep track of whether the global was initialized yet or not.
881 GlobalVariable *InitBool =
882 new GlobalVariable(Type::getInt1Ty(GV->getContext()), false,
883 GlobalValue::InternalLinkage,
884 ConstantInt::getFalse(GV->getContext()),
885 GV->getName()+".init", GV->getThreadLocalMode());
886 bool InitBoolUsed = false;
888 // Loop over all uses of GV, processing them in turn.
889 while (!GV->use_empty()) {
890 if (StoreInst *SI = dyn_cast<StoreInst>(GV->user_back())) {
891 // The global is initialized when the store to it occurs.
892 new StoreInst(ConstantInt::getTrue(GV->getContext()), InitBool, false, 0,
893 SI->getOrdering(), SI->getSyncScopeID(), SI);
894 SI->eraseFromParent();
895 continue;
898 LoadInst *LI = cast<LoadInst>(GV->user_back());
899 while (!LI->use_empty()) {
900 Use &LoadUse = *LI->use_begin();
901 ICmpInst *ICI = dyn_cast<ICmpInst>(LoadUse.getUser());
902 if (!ICI) {
903 LoadUse = RepValue;
904 continue;
907 // Replace the cmp X, 0 with a use of the bool value.
908 // Sink the load to where the compare was, if atomic rules allow us to.
909 Value *LV = new LoadInst(InitBool->getValueType(), InitBool,
910 InitBool->getName() + ".val", false, 0,
911 LI->getOrdering(), LI->getSyncScopeID(),
912 LI->isUnordered() ? (Instruction *)ICI : LI);
913 InitBoolUsed = true;
914 switch (ICI->getPredicate()) {
915 default: llvm_unreachable("Unknown ICmp Predicate!");
916 case ICmpInst::ICMP_ULT:
917 case ICmpInst::ICMP_SLT: // X < null -> always false
918 LV = ConstantInt::getFalse(GV->getContext());
919 break;
920 case ICmpInst::ICMP_ULE:
921 case ICmpInst::ICMP_SLE:
922 case ICmpInst::ICMP_EQ:
923 LV = BinaryOperator::CreateNot(LV, "notinit", ICI);
924 break;
925 case ICmpInst::ICMP_NE:
926 case ICmpInst::ICMP_UGE:
927 case ICmpInst::ICMP_SGE:
928 case ICmpInst::ICMP_UGT:
929 case ICmpInst::ICMP_SGT:
930 break; // no change.
932 ICI->replaceAllUsesWith(LV);
933 ICI->eraseFromParent();
935 LI->eraseFromParent();
938 // If the initialization boolean was used, insert it, otherwise delete it.
939 if (!InitBoolUsed) {
940 while (!InitBool->use_empty()) // Delete initializations
941 cast<StoreInst>(InitBool->user_back())->eraseFromParent();
942 delete InitBool;
943 } else
944 GV->getParent()->getGlobalList().insert(GV->getIterator(), InitBool);
946 // Now the GV is dead, nuke it and the malloc..
947 GV->eraseFromParent();
948 CI->eraseFromParent();
950 // To further other optimizations, loop over all users of NewGV and try to
951 // constant prop them. This will promote GEP instructions with constant
952 // indices into GEP constant-exprs, which will allow global-opt to hack on it.
953 ConstantPropUsersOf(NewGV, DL, TLI);
954 if (RepValue != NewGV)
955 ConstantPropUsersOf(RepValue, DL, TLI);
957 return NewGV;
960 /// Scan the use-list of V checking to make sure that there are no complex uses
961 /// of V. We permit simple things like dereferencing the pointer, but not
962 /// storing through the address, unless it is to the specified global.
963 static bool ValueIsOnlyUsedLocallyOrStoredToOneGlobal(const Instruction *V,
964 const GlobalVariable *GV,
965 SmallPtrSetImpl<const PHINode*> &PHIs) {
966 for (const User *U : V->users()) {
967 const Instruction *Inst = cast<Instruction>(U);
969 if (isa<LoadInst>(Inst) || isa<CmpInst>(Inst)) {
970 continue; // Fine, ignore.
973 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
974 if (SI->getOperand(0) == V && SI->getOperand(1) != GV)
975 return false; // Storing the pointer itself... bad.
976 continue; // Otherwise, storing through it, or storing into GV... fine.
979 // Must index into the array and into the struct.
980 if (isa<GetElementPtrInst>(Inst) && Inst->getNumOperands() >= 3) {
981 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(Inst, GV, PHIs))
982 return false;
983 continue;
986 if (const PHINode *PN = dyn_cast<PHINode>(Inst)) {
987 // PHIs are ok if all uses are ok. Don't infinitely recurse through PHI
988 // cycles.
989 if (PHIs.insert(PN).second)
990 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(PN, GV, PHIs))
991 return false;
992 continue;
995 if (const BitCastInst *BCI = dyn_cast<BitCastInst>(Inst)) {
996 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(BCI, GV, PHIs))
997 return false;
998 continue;
1001 return false;
1003 return true;
1006 /// The Alloc pointer is stored into GV somewhere. Transform all uses of the
1007 /// allocation into loads from the global and uses of the resultant pointer.
1008 /// Further, delete the store into GV. This assumes that these value pass the
1009 /// 'ValueIsOnlyUsedLocallyOrStoredToOneGlobal' predicate.
1010 static void ReplaceUsesOfMallocWithGlobal(Instruction *Alloc,
1011 GlobalVariable *GV) {
1012 while (!Alloc->use_empty()) {
1013 Instruction *U = cast<Instruction>(*Alloc->user_begin());
1014 Instruction *InsertPt = U;
1015 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1016 // If this is the store of the allocation into the global, remove it.
1017 if (SI->getOperand(1) == GV) {
1018 SI->eraseFromParent();
1019 continue;
1021 } else if (PHINode *PN = dyn_cast<PHINode>(U)) {
1022 // Insert the load in the corresponding predecessor, not right before the
1023 // PHI.
1024 InsertPt = PN->getIncomingBlock(*Alloc->use_begin())->getTerminator();
1025 } else if (isa<BitCastInst>(U)) {
1026 // Must be bitcast between the malloc and store to initialize the global.
1027 ReplaceUsesOfMallocWithGlobal(U, GV);
1028 U->eraseFromParent();
1029 continue;
1030 } else if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(U)) {
1031 // If this is a "GEP bitcast" and the user is a store to the global, then
1032 // just process it as a bitcast.
1033 if (GEPI->hasAllZeroIndices() && GEPI->hasOneUse())
1034 if (StoreInst *SI = dyn_cast<StoreInst>(GEPI->user_back()))
1035 if (SI->getOperand(1) == GV) {
1036 // Must be bitcast GEP between the malloc and store to initialize
1037 // the global.
1038 ReplaceUsesOfMallocWithGlobal(GEPI, GV);
1039 GEPI->eraseFromParent();
1040 continue;
1044 // Insert a load from the global, and use it instead of the malloc.
1045 Value *NL =
1046 new LoadInst(GV->getValueType(), GV, GV->getName() + ".val", InsertPt);
1047 U->replaceUsesOfWith(Alloc, NL);
1051 /// Verify that all uses of V (a load, or a phi of a load) are simple enough to
1052 /// perform heap SRA on. This permits GEP's that index through the array and
1053 /// struct field, icmps of null, and PHIs.
1054 static bool LoadUsesSimpleEnoughForHeapSRA(const Value *V,
1055 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIs,
1056 SmallPtrSetImpl<const PHINode*> &LoadUsingPHIsPerLoad) {
1057 // We permit two users of the load: setcc comparing against the null
1058 // pointer, and a getelementptr of a specific form.
1059 for (const User *U : V->users()) {
1060 const Instruction *UI = cast<Instruction>(U);
1062 // Comparison against null is ok.
1063 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UI)) {
1064 if (!isa<ConstantPointerNull>(ICI->getOperand(1)))
1065 return false;
1066 continue;
1069 // getelementptr is also ok, but only a simple form.
1070 if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(UI)) {
1071 // Must index into the array and into the struct.
1072 if (GEPI->getNumOperands() < 3)
1073 return false;
1075 // Otherwise the GEP is ok.
1076 continue;
1079 if (const PHINode *PN = dyn_cast<PHINode>(UI)) {
1080 if (!LoadUsingPHIsPerLoad.insert(PN).second)
1081 // This means some phi nodes are dependent on each other.
1082 // Avoid infinite looping!
1083 return false;
1084 if (!LoadUsingPHIs.insert(PN).second)
1085 // If we have already analyzed this PHI, then it is safe.
1086 continue;
1088 // Make sure all uses of the PHI are simple enough to transform.
1089 if (!LoadUsesSimpleEnoughForHeapSRA(PN,
1090 LoadUsingPHIs, LoadUsingPHIsPerLoad))
1091 return false;
1093 continue;
1096 // Otherwise we don't know what this is, not ok.
1097 return false;
1100 return true;
1103 /// If all users of values loaded from GV are simple enough to perform HeapSRA,
1104 /// return true.
1105 static bool AllGlobalLoadUsesSimpleEnoughForHeapSRA(const GlobalVariable *GV,
1106 Instruction *StoredVal) {
1107 SmallPtrSet<const PHINode*, 32> LoadUsingPHIs;
1108 SmallPtrSet<const PHINode*, 32> LoadUsingPHIsPerLoad;
1109 for (const User *U : GV->users())
1110 if (const LoadInst *LI = dyn_cast<LoadInst>(U)) {
1111 if (!LoadUsesSimpleEnoughForHeapSRA(LI, LoadUsingPHIs,
1112 LoadUsingPHIsPerLoad))
1113 return false;
1114 LoadUsingPHIsPerLoad.clear();
1117 // If we reach here, we know that all uses of the loads and transitive uses
1118 // (through PHI nodes) are simple enough to transform. However, we don't know
1119 // that all inputs the to the PHI nodes are in the same equivalence sets.
1120 // Check to verify that all operands of the PHIs are either PHIS that can be
1121 // transformed, loads from GV, or MI itself.
1122 for (const PHINode *PN : LoadUsingPHIs) {
1123 for (unsigned op = 0, e = PN->getNumIncomingValues(); op != e; ++op) {
1124 Value *InVal = PN->getIncomingValue(op);
1126 // PHI of the stored value itself is ok.
1127 if (InVal == StoredVal) continue;
1129 if (const PHINode *InPN = dyn_cast<PHINode>(InVal)) {
1130 // One of the PHIs in our set is (optimistically) ok.
1131 if (LoadUsingPHIs.count(InPN))
1132 continue;
1133 return false;
1136 // Load from GV is ok.
1137 if (const LoadInst *LI = dyn_cast<LoadInst>(InVal))
1138 if (LI->getOperand(0) == GV)
1139 continue;
1141 // UNDEF? NULL?
1143 // Anything else is rejected.
1144 return false;
1148 return true;
1151 static Value *GetHeapSROAValue(Value *V, unsigned FieldNo,
1152 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1153 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1154 std::vector<Value *> &FieldVals = InsertedScalarizedValues[V];
1156 if (FieldNo >= FieldVals.size())
1157 FieldVals.resize(FieldNo+1);
1159 // If we already have this value, just reuse the previously scalarized
1160 // version.
1161 if (Value *FieldVal = FieldVals[FieldNo])
1162 return FieldVal;
1164 // Depending on what instruction this is, we have several cases.
1165 Value *Result;
1166 if (LoadInst *LI = dyn_cast<LoadInst>(V)) {
1167 // This is a scalarized version of the load from the global. Just create
1168 // a new Load of the scalarized global.
1169 Value *V = GetHeapSROAValue(LI->getOperand(0), FieldNo,
1170 InsertedScalarizedValues, PHIsToRewrite);
1171 Result = new LoadInst(V->getType()->getPointerElementType(), V,
1172 LI->getName() + ".f" + Twine(FieldNo), LI);
1173 } else {
1174 PHINode *PN = cast<PHINode>(V);
1175 // PN's type is pointer to struct. Make a new PHI of pointer to struct
1176 // field.
1178 PointerType *PTy = cast<PointerType>(PN->getType());
1179 StructType *ST = cast<StructType>(PTy->getElementType());
1181 unsigned AS = PTy->getAddressSpace();
1182 PHINode *NewPN =
1183 PHINode::Create(PointerType::get(ST->getElementType(FieldNo), AS),
1184 PN->getNumIncomingValues(),
1185 PN->getName()+".f"+Twine(FieldNo), PN);
1186 Result = NewPN;
1187 PHIsToRewrite.push_back(std::make_pair(PN, FieldNo));
1190 return FieldVals[FieldNo] = Result;
1193 /// Given a load instruction and a value derived from the load, rewrite the
1194 /// derived value to use the HeapSRoA'd load.
1195 static void RewriteHeapSROALoadUser(Instruction *LoadUser,
1196 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1197 std::vector<std::pair<PHINode *, unsigned>> &PHIsToRewrite) {
1198 // If this is a comparison against null, handle it.
1199 if (ICmpInst *SCI = dyn_cast<ICmpInst>(LoadUser)) {
1200 assert(isa<ConstantPointerNull>(SCI->getOperand(1)));
1201 // If we have a setcc of the loaded pointer, we can use a setcc of any
1202 // field.
1203 Value *NPtr = GetHeapSROAValue(SCI->getOperand(0), 0,
1204 InsertedScalarizedValues, PHIsToRewrite);
1206 Value *New = new ICmpInst(SCI, SCI->getPredicate(), NPtr,
1207 Constant::getNullValue(NPtr->getType()),
1208 SCI->getName());
1209 SCI->replaceAllUsesWith(New);
1210 SCI->eraseFromParent();
1211 return;
1214 // Handle 'getelementptr Ptr, Idx, i32 FieldNo ...'
1215 if (GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(LoadUser)) {
1216 assert(GEPI->getNumOperands() >= 3 && isa<ConstantInt>(GEPI->getOperand(2))
1217 && "Unexpected GEPI!");
1219 // Load the pointer for this field.
1220 unsigned FieldNo = cast<ConstantInt>(GEPI->getOperand(2))->getZExtValue();
1221 Value *NewPtr = GetHeapSROAValue(GEPI->getOperand(0), FieldNo,
1222 InsertedScalarizedValues, PHIsToRewrite);
1224 // Create the new GEP idx vector.
1225 SmallVector<Value*, 8> GEPIdx;
1226 GEPIdx.push_back(GEPI->getOperand(1));
1227 GEPIdx.append(GEPI->op_begin()+3, GEPI->op_end());
1229 Value *NGEPI = GetElementPtrInst::Create(GEPI->getResultElementType(), NewPtr, GEPIdx,
1230 GEPI->getName(), GEPI);
1231 GEPI->replaceAllUsesWith(NGEPI);
1232 GEPI->eraseFromParent();
1233 return;
1236 // Recursively transform the users of PHI nodes. This will lazily create the
1237 // PHIs that are needed for individual elements. Keep track of what PHIs we
1238 // see in InsertedScalarizedValues so that we don't get infinite loops (very
1239 // antisocial). If the PHI is already in InsertedScalarizedValues, it has
1240 // already been seen first by another load, so its uses have already been
1241 // processed.
1242 PHINode *PN = cast<PHINode>(LoadUser);
1243 if (!InsertedScalarizedValues.insert(std::make_pair(PN,
1244 std::vector<Value *>())).second)
1245 return;
1247 // If this is the first time we've seen this PHI, recursively process all
1248 // users.
1249 for (auto UI = PN->user_begin(), E = PN->user_end(); UI != E;) {
1250 Instruction *User = cast<Instruction>(*UI++);
1251 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1255 /// We are performing Heap SRoA on a global. Ptr is a value loaded from the
1256 /// global. Eliminate all uses of Ptr, making them use FieldGlobals instead.
1257 /// All uses of loaded values satisfy AllGlobalLoadUsesSimpleEnoughForHeapSRA.
1258 static void RewriteUsesOfLoadForHeapSRoA(LoadInst *Load,
1259 DenseMap<Value *, std::vector<Value *>> &InsertedScalarizedValues,
1260 std::vector<std::pair<PHINode *, unsigned> > &PHIsToRewrite) {
1261 for (auto UI = Load->user_begin(), E = Load->user_end(); UI != E;) {
1262 Instruction *User = cast<Instruction>(*UI++);
1263 RewriteHeapSROALoadUser(User, InsertedScalarizedValues, PHIsToRewrite);
1266 if (Load->use_empty()) {
1267 Load->eraseFromParent();
1268 InsertedScalarizedValues.erase(Load);
1272 /// CI is an allocation of an array of structures. Break it up into multiple
1273 /// allocations of arrays of the fields.
1274 static GlobalVariable *PerformHeapAllocSRoA(GlobalVariable *GV, CallInst *CI,
1275 Value *NElems, const DataLayout &DL,
1276 const TargetLibraryInfo *TLI) {
1277 LLVM_DEBUG(dbgs() << "SROA HEAP ALLOC: " << *GV << " MALLOC = " << *CI
1278 << '\n');
1279 Type *MAT = getMallocAllocatedType(CI, TLI);
1280 StructType *STy = cast<StructType>(MAT);
1282 // There is guaranteed to be at least one use of the malloc (storing
1283 // it into GV). If there are other uses, change them to be uses of
1284 // the global to simplify later code. This also deletes the store
1285 // into GV.
1286 ReplaceUsesOfMallocWithGlobal(CI, GV);
1288 // Okay, at this point, there are no users of the malloc. Insert N
1289 // new mallocs at the same place as CI, and N globals.
1290 std::vector<Value *> FieldGlobals;
1291 std::vector<Value *> FieldMallocs;
1293 SmallVector<OperandBundleDef, 1> OpBundles;
1294 CI->getOperandBundlesAsDefs(OpBundles);
1296 unsigned AS = GV->getType()->getPointerAddressSpace();
1297 for (unsigned FieldNo = 0, e = STy->getNumElements(); FieldNo != e;++FieldNo){
1298 Type *FieldTy = STy->getElementType(FieldNo);
1299 PointerType *PFieldTy = PointerType::get(FieldTy, AS);
1301 GlobalVariable *NGV = new GlobalVariable(
1302 *GV->getParent(), PFieldTy, false, GlobalValue::InternalLinkage,
1303 Constant::getNullValue(PFieldTy), GV->getName() + ".f" + Twine(FieldNo),
1304 nullptr, GV->getThreadLocalMode());
1305 NGV->copyAttributesFrom(GV);
1306 FieldGlobals.push_back(NGV);
1308 unsigned TypeSize = DL.getTypeAllocSize(FieldTy);
1309 if (StructType *ST = dyn_cast<StructType>(FieldTy))
1310 TypeSize = DL.getStructLayout(ST)->getSizeInBytes();
1311 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1312 Value *NMI = CallInst::CreateMalloc(CI, IntPtrTy, FieldTy,
1313 ConstantInt::get(IntPtrTy, TypeSize),
1314 NElems, OpBundles, nullptr,
1315 CI->getName() + ".f" + Twine(FieldNo));
1316 FieldMallocs.push_back(NMI);
1317 new StoreInst(NMI, NGV, CI);
1320 // The tricky aspect of this transformation is handling the case when malloc
1321 // fails. In the original code, malloc failing would set the result pointer
1322 // of malloc to null. In this case, some mallocs could succeed and others
1323 // could fail. As such, we emit code that looks like this:
1324 // F0 = malloc(field0)
1325 // F1 = malloc(field1)
1326 // F2 = malloc(field2)
1327 // if (F0 == 0 || F1 == 0 || F2 == 0) {
1328 // if (F0) { free(F0); F0 = 0; }
1329 // if (F1) { free(F1); F1 = 0; }
1330 // if (F2) { free(F2); F2 = 0; }
1331 // }
1332 // The malloc can also fail if its argument is too large.
1333 Constant *ConstantZero = ConstantInt::get(CI->getArgOperand(0)->getType(), 0);
1334 Value *RunningOr = new ICmpInst(CI, ICmpInst::ICMP_SLT, CI->getArgOperand(0),
1335 ConstantZero, "isneg");
1336 for (unsigned i = 0, e = FieldMallocs.size(); i != e; ++i) {
1337 Value *Cond = new ICmpInst(CI, ICmpInst::ICMP_EQ, FieldMallocs[i],
1338 Constant::getNullValue(FieldMallocs[i]->getType()),
1339 "isnull");
1340 RunningOr = BinaryOperator::CreateOr(RunningOr, Cond, "tmp", CI);
1343 // Split the basic block at the old malloc.
1344 BasicBlock *OrigBB = CI->getParent();
1345 BasicBlock *ContBB =
1346 OrigBB->splitBasicBlock(CI->getIterator(), "malloc_cont");
1348 // Create the block to check the first condition. Put all these blocks at the
1349 // end of the function as they are unlikely to be executed.
1350 BasicBlock *NullPtrBlock = BasicBlock::Create(OrigBB->getContext(),
1351 "malloc_ret_null",
1352 OrigBB->getParent());
1354 // Remove the uncond branch from OrigBB to ContBB, turning it into a cond
1355 // branch on RunningOr.
1356 OrigBB->getTerminator()->eraseFromParent();
1357 BranchInst::Create(NullPtrBlock, ContBB, RunningOr, OrigBB);
1359 // Within the NullPtrBlock, we need to emit a comparison and branch for each
1360 // pointer, because some may be null while others are not.
1361 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1362 Value *GVVal =
1363 new LoadInst(cast<GlobalVariable>(FieldGlobals[i])->getValueType(),
1364 FieldGlobals[i], "tmp", NullPtrBlock);
1365 Value *Cmp = new ICmpInst(*NullPtrBlock, ICmpInst::ICMP_NE, GVVal,
1366 Constant::getNullValue(GVVal->getType()));
1367 BasicBlock *FreeBlock = BasicBlock::Create(Cmp->getContext(), "free_it",
1368 OrigBB->getParent());
1369 BasicBlock *NextBlock = BasicBlock::Create(Cmp->getContext(), "next",
1370 OrigBB->getParent());
1371 Instruction *BI = BranchInst::Create(FreeBlock, NextBlock,
1372 Cmp, NullPtrBlock);
1374 // Fill in FreeBlock.
1375 CallInst::CreateFree(GVVal, OpBundles, BI);
1376 new StoreInst(Constant::getNullValue(GVVal->getType()), FieldGlobals[i],
1377 FreeBlock);
1378 BranchInst::Create(NextBlock, FreeBlock);
1380 NullPtrBlock = NextBlock;
1383 BranchInst::Create(ContBB, NullPtrBlock);
1385 // CI is no longer needed, remove it.
1386 CI->eraseFromParent();
1388 /// As we process loads, if we can't immediately update all uses of the load,
1389 /// keep track of what scalarized loads are inserted for a given load.
1390 DenseMap<Value *, std::vector<Value *>> InsertedScalarizedValues;
1391 InsertedScalarizedValues[GV] = FieldGlobals;
1393 std::vector<std::pair<PHINode *, unsigned>> PHIsToRewrite;
1395 // Okay, the malloc site is completely handled. All of the uses of GV are now
1396 // loads, and all uses of those loads are simple. Rewrite them to use loads
1397 // of the per-field globals instead.
1398 for (auto UI = GV->user_begin(), E = GV->user_end(); UI != E;) {
1399 Instruction *User = cast<Instruction>(*UI++);
1401 if (LoadInst *LI = dyn_cast<LoadInst>(User)) {
1402 RewriteUsesOfLoadForHeapSRoA(LI, InsertedScalarizedValues, PHIsToRewrite);
1403 continue;
1406 // Must be a store of null.
1407 StoreInst *SI = cast<StoreInst>(User);
1408 assert(isa<ConstantPointerNull>(SI->getOperand(0)) &&
1409 "Unexpected heap-sra user!");
1411 // Insert a store of null into each global.
1412 for (unsigned i = 0, e = FieldGlobals.size(); i != e; ++i) {
1413 Type *ValTy = cast<GlobalValue>(FieldGlobals[i])->getValueType();
1414 Constant *Null = Constant::getNullValue(ValTy);
1415 new StoreInst(Null, FieldGlobals[i], SI);
1417 // Erase the original store.
1418 SI->eraseFromParent();
1421 // While we have PHIs that are interesting to rewrite, do it.
1422 while (!PHIsToRewrite.empty()) {
1423 PHINode *PN = PHIsToRewrite.back().first;
1424 unsigned FieldNo = PHIsToRewrite.back().second;
1425 PHIsToRewrite.pop_back();
1426 PHINode *FieldPN = cast<PHINode>(InsertedScalarizedValues[PN][FieldNo]);
1427 assert(FieldPN->getNumIncomingValues() == 0 &&"Already processed this phi");
1429 // Add all the incoming values. This can materialize more phis.
1430 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1431 Value *InVal = PN->getIncomingValue(i);
1432 InVal = GetHeapSROAValue(InVal, FieldNo, InsertedScalarizedValues,
1433 PHIsToRewrite);
1434 FieldPN->addIncoming(InVal, PN->getIncomingBlock(i));
1438 // Drop all inter-phi links and any loads that made it this far.
1439 for (DenseMap<Value *, std::vector<Value *>>::iterator
1440 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1441 I != E; ++I) {
1442 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1443 PN->dropAllReferences();
1444 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1445 LI->dropAllReferences();
1448 // Delete all the phis and loads now that inter-references are dead.
1449 for (DenseMap<Value *, std::vector<Value *>>::iterator
1450 I = InsertedScalarizedValues.begin(), E = InsertedScalarizedValues.end();
1451 I != E; ++I) {
1452 if (PHINode *PN = dyn_cast<PHINode>(I->first))
1453 PN->eraseFromParent();
1454 else if (LoadInst *LI = dyn_cast<LoadInst>(I->first))
1455 LI->eraseFromParent();
1458 // The old global is now dead, remove it.
1459 GV->eraseFromParent();
1461 ++NumHeapSRA;
1462 return cast<GlobalVariable>(FieldGlobals[0]);
1465 /// This function is called when we see a pointer global variable with a single
1466 /// value stored it that is a malloc or cast of malloc.
1467 static bool tryToOptimizeStoreOfMallocToGlobal(GlobalVariable *GV, CallInst *CI,
1468 Type *AllocTy,
1469 AtomicOrdering Ordering,
1470 const DataLayout &DL,
1471 TargetLibraryInfo *TLI) {
1472 // If this is a malloc of an abstract type, don't touch it.
1473 if (!AllocTy->isSized())
1474 return false;
1476 // We can't optimize this global unless all uses of it are *known* to be
1477 // of the malloc value, not of the null initializer value (consider a use
1478 // that compares the global's value against zero to see if the malloc has
1479 // been reached). To do this, we check to see if all uses of the global
1480 // would trap if the global were null: this proves that they must all
1481 // happen after the malloc.
1482 if (!AllUsesOfLoadedValueWillTrapIfNull(GV))
1483 return false;
1485 // We can't optimize this if the malloc itself is used in a complex way,
1486 // for example, being stored into multiple globals. This allows the
1487 // malloc to be stored into the specified global, loaded icmp'd, and
1488 // GEP'd. These are all things we could transform to using the global
1489 // for.
1490 SmallPtrSet<const PHINode*, 8> PHIs;
1491 if (!ValueIsOnlyUsedLocallyOrStoredToOneGlobal(CI, GV, PHIs))
1492 return false;
1494 // If we have a global that is only initialized with a fixed size malloc,
1495 // transform the program to use global memory instead of malloc'd memory.
1496 // This eliminates dynamic allocation, avoids an indirection accessing the
1497 // data, and exposes the resultant global to further GlobalOpt.
1498 // We cannot optimize the malloc if we cannot determine malloc array size.
1499 Value *NElems = getMallocArraySize(CI, DL, TLI, true);
1500 if (!NElems)
1501 return false;
1503 if (ConstantInt *NElements = dyn_cast<ConstantInt>(NElems))
1504 // Restrict this transformation to only working on small allocations
1505 // (2048 bytes currently), as we don't want to introduce a 16M global or
1506 // something.
1507 if (NElements->getZExtValue() * DL.getTypeAllocSize(AllocTy) < 2048) {
1508 OptimizeGlobalAddressOfMalloc(GV, CI, AllocTy, NElements, DL, TLI);
1509 return true;
1512 // If the allocation is an array of structures, consider transforming this
1513 // into multiple malloc'd arrays, one for each field. This is basically
1514 // SRoA for malloc'd memory.
1516 if (Ordering != AtomicOrdering::NotAtomic)
1517 return false;
1519 // If this is an allocation of a fixed size array of structs, analyze as a
1520 // variable size array. malloc [100 x struct],1 -> malloc struct, 100
1521 if (NElems == ConstantInt::get(CI->getArgOperand(0)->getType(), 1))
1522 if (ArrayType *AT = dyn_cast<ArrayType>(AllocTy))
1523 AllocTy = AT->getElementType();
1525 StructType *AllocSTy = dyn_cast<StructType>(AllocTy);
1526 if (!AllocSTy)
1527 return false;
1529 // This the structure has an unreasonable number of fields, leave it
1530 // alone.
1531 if (AllocSTy->getNumElements() <= 16 && AllocSTy->getNumElements() != 0 &&
1532 AllGlobalLoadUsesSimpleEnoughForHeapSRA(GV, CI)) {
1534 // If this is a fixed size array, transform the Malloc to be an alloc of
1535 // structs. malloc [100 x struct],1 -> malloc struct, 100
1536 if (ArrayType *AT = dyn_cast<ArrayType>(getMallocAllocatedType(CI, TLI))) {
1537 Type *IntPtrTy = DL.getIntPtrType(CI->getType());
1538 unsigned TypeSize = DL.getStructLayout(AllocSTy)->getSizeInBytes();
1539 Value *AllocSize = ConstantInt::get(IntPtrTy, TypeSize);
1540 Value *NumElements = ConstantInt::get(IntPtrTy, AT->getNumElements());
1541 SmallVector<OperandBundleDef, 1> OpBundles;
1542 CI->getOperandBundlesAsDefs(OpBundles);
1543 Instruction *Malloc =
1544 CallInst::CreateMalloc(CI, IntPtrTy, AllocSTy, AllocSize, NumElements,
1545 OpBundles, nullptr, CI->getName());
1546 Instruction *Cast = new BitCastInst(Malloc, CI->getType(), "tmp", CI);
1547 CI->replaceAllUsesWith(Cast);
1548 CI->eraseFromParent();
1549 if (BitCastInst *BCI = dyn_cast<BitCastInst>(Malloc))
1550 CI = cast<CallInst>(BCI->getOperand(0));
1551 else
1552 CI = cast<CallInst>(Malloc);
1555 PerformHeapAllocSRoA(GV, CI, getMallocArraySize(CI, DL, TLI, true), DL,
1556 TLI);
1557 return true;
1560 return false;
1563 // Try to optimize globals based on the knowledge that only one value (besides
1564 // its initializer) is ever stored to the global.
1565 static bool optimizeOnceStoredGlobal(GlobalVariable *GV, Value *StoredOnceVal,
1566 AtomicOrdering Ordering,
1567 const DataLayout &DL,
1568 TargetLibraryInfo *TLI) {
1569 // Ignore no-op GEPs and bitcasts.
1570 StoredOnceVal = StoredOnceVal->stripPointerCasts();
1572 // If we are dealing with a pointer global that is initialized to null and
1573 // only has one (non-null) value stored into it, then we can optimize any
1574 // users of the loaded value (often calls and loads) that would trap if the
1575 // value was null.
1576 if (GV->getInitializer()->getType()->isPointerTy() &&
1577 GV->getInitializer()->isNullValue() &&
1578 !NullPointerIsDefined(
1579 nullptr /* F */,
1580 GV->getInitializer()->getType()->getPointerAddressSpace())) {
1581 if (Constant *SOVC = dyn_cast<Constant>(StoredOnceVal)) {
1582 if (GV->getInitializer()->getType() != SOVC->getType())
1583 SOVC = ConstantExpr::getBitCast(SOVC, GV->getInitializer()->getType());
1585 // Optimize away any trapping uses of the loaded value.
1586 if (OptimizeAwayTrappingUsesOfLoads(GV, SOVC, DL, TLI))
1587 return true;
1588 } else if (CallInst *CI = extractMallocCall(StoredOnceVal, TLI)) {
1589 Type *MallocType = getMallocAllocatedType(CI, TLI);
1590 if (MallocType && tryToOptimizeStoreOfMallocToGlobal(GV, CI, MallocType,
1591 Ordering, DL, TLI))
1592 return true;
1596 return false;
1599 /// At this point, we have learned that the only two values ever stored into GV
1600 /// are its initializer and OtherVal. See if we can shrink the global into a
1601 /// boolean and select between the two values whenever it is used. This exposes
1602 /// the values to other scalar optimizations.
1603 static bool TryToShrinkGlobalToBoolean(GlobalVariable *GV, Constant *OtherVal) {
1604 Type *GVElType = GV->getValueType();
1606 // If GVElType is already i1, it is already shrunk. If the type of the GV is
1607 // an FP value, pointer or vector, don't do this optimization because a select
1608 // between them is very expensive and unlikely to lead to later
1609 // simplification. In these cases, we typically end up with "cond ? v1 : v2"
1610 // where v1 and v2 both require constant pool loads, a big loss.
1611 if (GVElType == Type::getInt1Ty(GV->getContext()) ||
1612 GVElType->isFloatingPointTy() ||
1613 GVElType->isPointerTy() || GVElType->isVectorTy())
1614 return false;
1616 // Walk the use list of the global seeing if all the uses are load or store.
1617 // If there is anything else, bail out.
1618 for (User *U : GV->users())
1619 if (!isa<LoadInst>(U) && !isa<StoreInst>(U))
1620 return false;
1622 LLVM_DEBUG(dbgs() << " *** SHRINKING TO BOOL: " << *GV << "\n");
1624 // Create the new global, initializing it to false.
1625 GlobalVariable *NewGV = new GlobalVariable(Type::getInt1Ty(GV->getContext()),
1626 false,
1627 GlobalValue::InternalLinkage,
1628 ConstantInt::getFalse(GV->getContext()),
1629 GV->getName()+".b",
1630 GV->getThreadLocalMode(),
1631 GV->getType()->getAddressSpace());
1632 NewGV->copyAttributesFrom(GV);
1633 GV->getParent()->getGlobalList().insert(GV->getIterator(), NewGV);
1635 Constant *InitVal = GV->getInitializer();
1636 assert(InitVal->getType() != Type::getInt1Ty(GV->getContext()) &&
1637 "No reason to shrink to bool!");
1639 SmallVector<DIGlobalVariableExpression *, 1> GVs;
1640 GV->getDebugInfo(GVs);
1642 // If initialized to zero and storing one into the global, we can use a cast
1643 // instead of a select to synthesize the desired value.
1644 bool IsOneZero = false;
1645 bool EmitOneOrZero = true;
1646 auto *CI = dyn_cast<ConstantInt>(OtherVal);
1647 if (CI && CI->getValue().getActiveBits() <= 64) {
1648 IsOneZero = InitVal->isNullValue() && CI->isOne();
1650 auto *CIInit = dyn_cast<ConstantInt>(GV->getInitializer());
1651 if (CIInit && CIInit->getValue().getActiveBits() <= 64) {
1652 uint64_t ValInit = CIInit->getZExtValue();
1653 uint64_t ValOther = CI->getZExtValue();
1654 uint64_t ValMinus = ValOther - ValInit;
1656 for(auto *GVe : GVs){
1657 DIGlobalVariable *DGV = GVe->getVariable();
1658 DIExpression *E = GVe->getExpression();
1659 const DataLayout &DL = GV->getParent()->getDataLayout();
1660 unsigned SizeInOctets =
1661 DL.getTypeAllocSizeInBits(NewGV->getType()->getElementType()) / 8;
1663 // It is expected that the address of global optimized variable is on
1664 // top of the stack. After optimization, value of that variable will
1665 // be ether 0 for initial value or 1 for other value. The following
1666 // expression should return constant integer value depending on the
1667 // value at global object address:
1668 // val * (ValOther - ValInit) + ValInit:
1669 // DW_OP_deref DW_OP_constu <ValMinus>
1670 // DW_OP_mul DW_OP_constu <ValInit> DW_OP_plus DW_OP_stack_value
1671 SmallVector<uint64_t, 12> Ops = {
1672 dwarf::DW_OP_deref_size, SizeInOctets,
1673 dwarf::DW_OP_constu, ValMinus,
1674 dwarf::DW_OP_mul, dwarf::DW_OP_constu, ValInit,
1675 dwarf::DW_OP_plus};
1676 bool WithStackValue = true;
1677 E = DIExpression::prependOpcodes(E, Ops, WithStackValue);
1678 DIGlobalVariableExpression *DGVE =
1679 DIGlobalVariableExpression::get(NewGV->getContext(), DGV, E);
1680 NewGV->addDebugInfo(DGVE);
1682 EmitOneOrZero = false;
1686 if (EmitOneOrZero) {
1687 // FIXME: This will only emit address for debugger on which will
1688 // be written only 0 or 1.
1689 for(auto *GV : GVs)
1690 NewGV->addDebugInfo(GV);
1693 while (!GV->use_empty()) {
1694 Instruction *UI = cast<Instruction>(GV->user_back());
1695 if (StoreInst *SI = dyn_cast<StoreInst>(UI)) {
1696 // Change the store into a boolean store.
1697 bool StoringOther = SI->getOperand(0) == OtherVal;
1698 // Only do this if we weren't storing a loaded value.
1699 Value *StoreVal;
1700 if (StoringOther || SI->getOperand(0) == InitVal) {
1701 StoreVal = ConstantInt::get(Type::getInt1Ty(GV->getContext()),
1702 StoringOther);
1703 } else {
1704 // Otherwise, we are storing a previously loaded copy. To do this,
1705 // change the copy from copying the original value to just copying the
1706 // bool.
1707 Instruction *StoredVal = cast<Instruction>(SI->getOperand(0));
1709 // If we've already replaced the input, StoredVal will be a cast or
1710 // select instruction. If not, it will be a load of the original
1711 // global.
1712 if (LoadInst *LI = dyn_cast<LoadInst>(StoredVal)) {
1713 assert(LI->getOperand(0) == GV && "Not a copy!");
1714 // Insert a new load, to preserve the saved value.
1715 StoreVal = new LoadInst(NewGV->getValueType(), NewGV,
1716 LI->getName() + ".b", false, 0,
1717 LI->getOrdering(), LI->getSyncScopeID(), LI);
1718 } else {
1719 assert((isa<CastInst>(StoredVal) || isa<SelectInst>(StoredVal)) &&
1720 "This is not a form that we understand!");
1721 StoreVal = StoredVal->getOperand(0);
1722 assert(isa<LoadInst>(StoreVal) && "Not a load of NewGV!");
1725 StoreInst *NSI =
1726 new StoreInst(StoreVal, NewGV, false, 0, SI->getOrdering(),
1727 SI->getSyncScopeID(), SI);
1728 NSI->setDebugLoc(SI->getDebugLoc());
1729 } else {
1730 // Change the load into a load of bool then a select.
1731 LoadInst *LI = cast<LoadInst>(UI);
1732 LoadInst *NLI =
1733 new LoadInst(NewGV->getValueType(), NewGV, LI->getName() + ".b",
1734 false, 0, LI->getOrdering(), LI->getSyncScopeID(), LI);
1735 Instruction *NSI;
1736 if (IsOneZero)
1737 NSI = new ZExtInst(NLI, LI->getType(), "", LI);
1738 else
1739 NSI = SelectInst::Create(NLI, OtherVal, InitVal, "", LI);
1740 NSI->takeName(LI);
1741 // Since LI is split into two instructions, NLI and NSI both inherit the
1742 // same DebugLoc
1743 NLI->setDebugLoc(LI->getDebugLoc());
1744 NSI->setDebugLoc(LI->getDebugLoc());
1745 LI->replaceAllUsesWith(NSI);
1747 UI->eraseFromParent();
1750 // Retain the name of the old global variable. People who are debugging their
1751 // programs may expect these variables to be named the same.
1752 NewGV->takeName(GV);
1753 GV->eraseFromParent();
1754 return true;
1757 static bool deleteIfDead(
1758 GlobalValue &GV, SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
1759 GV.removeDeadConstantUsers();
1761 if (!GV.isDiscardableIfUnused() && !GV.isDeclaration())
1762 return false;
1764 if (const Comdat *C = GV.getComdat())
1765 if (!GV.hasLocalLinkage() && NotDiscardableComdats.count(C))
1766 return false;
1768 bool Dead;
1769 if (auto *F = dyn_cast<Function>(&GV))
1770 Dead = (F->isDeclaration() && F->use_empty()) || F->isDefTriviallyDead();
1771 else
1772 Dead = GV.use_empty();
1773 if (!Dead)
1774 return false;
1776 LLVM_DEBUG(dbgs() << "GLOBAL DEAD: " << GV << "\n");
1777 GV.eraseFromParent();
1778 ++NumDeleted;
1779 return true;
1782 static bool isPointerValueDeadOnEntryToFunction(
1783 const Function *F, GlobalValue *GV,
1784 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1785 // Find all uses of GV. We expect them all to be in F, and if we can't
1786 // identify any of the uses we bail out.
1788 // On each of these uses, identify if the memory that GV points to is
1789 // used/required/live at the start of the function. If it is not, for example
1790 // if the first thing the function does is store to the GV, the GV can
1791 // possibly be demoted.
1793 // We don't do an exhaustive search for memory operations - simply look
1794 // through bitcasts as they're quite common and benign.
1795 const DataLayout &DL = GV->getParent()->getDataLayout();
1796 SmallVector<LoadInst *, 4> Loads;
1797 SmallVector<StoreInst *, 4> Stores;
1798 for (auto *U : GV->users()) {
1799 if (Operator::getOpcode(U) == Instruction::BitCast) {
1800 for (auto *UU : U->users()) {
1801 if (auto *LI = dyn_cast<LoadInst>(UU))
1802 Loads.push_back(LI);
1803 else if (auto *SI = dyn_cast<StoreInst>(UU))
1804 Stores.push_back(SI);
1805 else
1806 return false;
1808 continue;
1811 Instruction *I = dyn_cast<Instruction>(U);
1812 if (!I)
1813 return false;
1814 assert(I->getParent()->getParent() == F);
1816 if (auto *LI = dyn_cast<LoadInst>(I))
1817 Loads.push_back(LI);
1818 else if (auto *SI = dyn_cast<StoreInst>(I))
1819 Stores.push_back(SI);
1820 else
1821 return false;
1824 // We have identified all uses of GV into loads and stores. Now check if all
1825 // of them are known not to depend on the value of the global at the function
1826 // entry point. We do this by ensuring that every load is dominated by at
1827 // least one store.
1828 auto &DT = LookupDomTree(*const_cast<Function *>(F));
1830 // The below check is quadratic. Check we're not going to do too many tests.
1831 // FIXME: Even though this will always have worst-case quadratic time, we
1832 // could put effort into minimizing the average time by putting stores that
1833 // have been shown to dominate at least one load at the beginning of the
1834 // Stores array, making subsequent dominance checks more likely to succeed
1835 // early.
1837 // The threshold here is fairly large because global->local demotion is a
1838 // very powerful optimization should it fire.
1839 const unsigned Threshold = 100;
1840 if (Loads.size() * Stores.size() > Threshold)
1841 return false;
1843 for (auto *L : Loads) {
1844 auto *LTy = L->getType();
1845 if (none_of(Stores, [&](const StoreInst *S) {
1846 auto *STy = S->getValueOperand()->getType();
1847 // The load is only dominated by the store if DomTree says so
1848 // and the number of bits loaded in L is less than or equal to
1849 // the number of bits stored in S.
1850 return DT.dominates(S, L) &&
1851 DL.getTypeStoreSize(LTy) <= DL.getTypeStoreSize(STy);
1853 return false;
1855 // All loads have known dependences inside F, so the global can be localized.
1856 return true;
1859 /// C may have non-instruction users. Can all of those users be turned into
1860 /// instructions?
1861 static bool allNonInstructionUsersCanBeMadeInstructions(Constant *C) {
1862 // We don't do this exhaustively. The most common pattern that we really need
1863 // to care about is a constant GEP or constant bitcast - so just looking
1864 // through one single ConstantExpr.
1866 // The set of constants that this function returns true for must be able to be
1867 // handled by makeAllConstantUsesInstructions.
1868 for (auto *U : C->users()) {
1869 if (isa<Instruction>(U))
1870 continue;
1871 if (!isa<ConstantExpr>(U))
1872 // Non instruction, non-constantexpr user; cannot convert this.
1873 return false;
1874 for (auto *UU : U->users())
1875 if (!isa<Instruction>(UU))
1876 // A constantexpr used by another constant. We don't try and recurse any
1877 // further but just bail out at this point.
1878 return false;
1881 return true;
1884 /// C may have non-instruction users, and
1885 /// allNonInstructionUsersCanBeMadeInstructions has returned true. Convert the
1886 /// non-instruction users to instructions.
1887 static void makeAllConstantUsesInstructions(Constant *C) {
1888 SmallVector<ConstantExpr*,4> Users;
1889 for (auto *U : C->users()) {
1890 if (isa<ConstantExpr>(U))
1891 Users.push_back(cast<ConstantExpr>(U));
1892 else
1893 // We should never get here; allNonInstructionUsersCanBeMadeInstructions
1894 // should not have returned true for C.
1895 assert(
1896 isa<Instruction>(U) &&
1897 "Can't transform non-constantexpr non-instruction to instruction!");
1900 SmallVector<Value*,4> UUsers;
1901 for (auto *U : Users) {
1902 UUsers.clear();
1903 for (auto *UU : U->users())
1904 UUsers.push_back(UU);
1905 for (auto *UU : UUsers) {
1906 Instruction *UI = cast<Instruction>(UU);
1907 Instruction *NewU = U->getAsInstruction();
1908 NewU->insertBefore(UI);
1909 UI->replaceUsesOfWith(U, NewU);
1911 // We've replaced all the uses, so destroy the constant. (destroyConstant
1912 // will update value handles and metadata.)
1913 U->destroyConstant();
1917 /// Analyze the specified global variable and optimize
1918 /// it if possible. If we make a change, return true.
1919 static bool processInternalGlobal(
1920 GlobalVariable *GV, const GlobalStatus &GS, TargetLibraryInfo *TLI,
1921 function_ref<DominatorTree &(Function &)> LookupDomTree) {
1922 auto &DL = GV->getParent()->getDataLayout();
1923 // If this is a first class global and has only one accessing function and
1924 // this function is non-recursive, we replace the global with a local alloca
1925 // in this function.
1927 // NOTE: It doesn't make sense to promote non-single-value types since we
1928 // are just replacing static memory to stack memory.
1930 // If the global is in different address space, don't bring it to stack.
1931 if (!GS.HasMultipleAccessingFunctions &&
1932 GS.AccessingFunction &&
1933 GV->getValueType()->isSingleValueType() &&
1934 GV->getType()->getAddressSpace() == 0 &&
1935 !GV->isExternallyInitialized() &&
1936 allNonInstructionUsersCanBeMadeInstructions(GV) &&
1937 GS.AccessingFunction->doesNotRecurse() &&
1938 isPointerValueDeadOnEntryToFunction(GS.AccessingFunction, GV,
1939 LookupDomTree)) {
1940 const DataLayout &DL = GV->getParent()->getDataLayout();
1942 LLVM_DEBUG(dbgs() << "LOCALIZING GLOBAL: " << *GV << "\n");
1943 Instruction &FirstI = const_cast<Instruction&>(*GS.AccessingFunction
1944 ->getEntryBlock().begin());
1945 Type *ElemTy = GV->getValueType();
1946 // FIXME: Pass Global's alignment when globals have alignment
1947 AllocaInst *Alloca = new AllocaInst(ElemTy, DL.getAllocaAddrSpace(), nullptr,
1948 GV->getName(), &FirstI);
1949 if (!isa<UndefValue>(GV->getInitializer()))
1950 new StoreInst(GV->getInitializer(), Alloca, &FirstI);
1952 makeAllConstantUsesInstructions(GV);
1954 GV->replaceAllUsesWith(Alloca);
1955 GV->eraseFromParent();
1956 ++NumLocalized;
1957 return true;
1960 // If the global is never loaded (but may be stored to), it is dead.
1961 // Delete it now.
1962 if (!GS.IsLoaded) {
1963 LLVM_DEBUG(dbgs() << "GLOBAL NEVER LOADED: " << *GV << "\n");
1965 bool Changed;
1966 if (isLeakCheckerRoot(GV)) {
1967 // Delete any constant stores to the global.
1968 Changed = CleanupPointerRootUsers(GV, TLI);
1969 } else {
1970 // Delete any stores we can find to the global. We may not be able to
1971 // make it completely dead though.
1972 Changed = CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1975 // If the global is dead now, delete it.
1976 if (GV->use_empty()) {
1977 GV->eraseFromParent();
1978 ++NumDeleted;
1979 Changed = true;
1981 return Changed;
1984 if (GS.StoredType <= GlobalStatus::InitializerStored) {
1985 LLVM_DEBUG(dbgs() << "MARKING CONSTANT: " << *GV << "\n");
1987 // Don't actually mark a global constant if it's atomic because atomic loads
1988 // are implemented by a trivial cmpxchg in some edge-cases and that usually
1989 // requires write access to the variable even if it's not actually changed.
1990 if (GS.Ordering == AtomicOrdering::NotAtomic)
1991 GV->setConstant(true);
1993 // Clean up any obviously simplifiable users now.
1994 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
1996 // If the global is dead now, just nuke it.
1997 if (GV->use_empty()) {
1998 LLVM_DEBUG(dbgs() << " *** Marking constant allowed us to simplify "
1999 << "all users and delete global!\n");
2000 GV->eraseFromParent();
2001 ++NumDeleted;
2002 return true;
2005 // Fall through to the next check; see if we can optimize further.
2006 ++NumMarked;
2008 if (!GV->getInitializer()->getType()->isSingleValueType()) {
2009 const DataLayout &DL = GV->getParent()->getDataLayout();
2010 if (SRAGlobal(GV, DL))
2011 return true;
2013 if (GS.StoredType == GlobalStatus::StoredOnce && GS.StoredOnceValue) {
2014 // If the initial value for the global was an undef value, and if only
2015 // one other value was stored into it, we can just change the
2016 // initializer to be the stored value, then delete all stores to the
2017 // global. This allows us to mark it constant.
2018 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue))
2019 if (isa<UndefValue>(GV->getInitializer())) {
2020 // Change the initial value here.
2021 GV->setInitializer(SOVConstant);
2023 // Clean up any obviously simplifiable users now.
2024 CleanupConstantGlobalUsers(GV, GV->getInitializer(), DL, TLI);
2026 if (GV->use_empty()) {
2027 LLVM_DEBUG(dbgs() << " *** Substituting initializer allowed us to "
2028 << "simplify all users and delete global!\n");
2029 GV->eraseFromParent();
2030 ++NumDeleted;
2032 ++NumSubstitute;
2033 return true;
2036 // Try to optimize globals based on the knowledge that only one value
2037 // (besides its initializer) is ever stored to the global.
2038 if (optimizeOnceStoredGlobal(GV, GS.StoredOnceValue, GS.Ordering, DL, TLI))
2039 return true;
2041 // Otherwise, if the global was not a boolean, we can shrink it to be a
2042 // boolean.
2043 if (Constant *SOVConstant = dyn_cast<Constant>(GS.StoredOnceValue)) {
2044 if (GS.Ordering == AtomicOrdering::NotAtomic) {
2045 if (TryToShrinkGlobalToBoolean(GV, SOVConstant)) {
2046 ++NumShrunkToBool;
2047 return true;
2053 return false;
2056 /// Analyze the specified global variable and optimize it if possible. If we
2057 /// make a change, return true.
2058 static bool
2059 processGlobal(GlobalValue &GV, TargetLibraryInfo *TLI,
2060 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2061 if (GV.getName().startswith("llvm."))
2062 return false;
2064 GlobalStatus GS;
2066 if (GlobalStatus::analyzeGlobal(&GV, GS))
2067 return false;
2069 bool Changed = false;
2070 if (!GS.IsCompared && !GV.hasGlobalUnnamedAddr()) {
2071 auto NewUnnamedAddr = GV.hasLocalLinkage() ? GlobalValue::UnnamedAddr::Global
2072 : GlobalValue::UnnamedAddr::Local;
2073 if (NewUnnamedAddr != GV.getUnnamedAddr()) {
2074 GV.setUnnamedAddr(NewUnnamedAddr);
2075 NumUnnamed++;
2076 Changed = true;
2080 // Do more involved optimizations if the global is internal.
2081 if (!GV.hasLocalLinkage())
2082 return Changed;
2084 auto *GVar = dyn_cast<GlobalVariable>(&GV);
2085 if (!GVar)
2086 return Changed;
2088 if (GVar->isConstant() || !GVar->hasInitializer())
2089 return Changed;
2091 return processInternalGlobal(GVar, GS, TLI, LookupDomTree) || Changed;
2094 /// Walk all of the direct calls of the specified function, changing them to
2095 /// FastCC.
2096 static void ChangeCalleesToFastCall(Function *F) {
2097 for (User *U : F->users()) {
2098 if (isa<BlockAddress>(U))
2099 continue;
2100 CallSite CS(cast<Instruction>(U));
2101 CS.setCallingConv(CallingConv::Fast);
2105 static AttributeList StripAttr(LLVMContext &C, AttributeList Attrs,
2106 Attribute::AttrKind A) {
2107 unsigned AttrIndex;
2108 if (Attrs.hasAttrSomewhere(A, &AttrIndex))
2109 return Attrs.removeAttribute(C, AttrIndex, A);
2110 return Attrs;
2113 static void RemoveAttribute(Function *F, Attribute::AttrKind A) {
2114 F->setAttributes(StripAttr(F->getContext(), F->getAttributes(), A));
2115 for (User *U : F->users()) {
2116 if (isa<BlockAddress>(U))
2117 continue;
2118 CallSite CS(cast<Instruction>(U));
2119 CS.setAttributes(StripAttr(F->getContext(), CS.getAttributes(), A));
2123 /// Return true if this is a calling convention that we'd like to change. The
2124 /// idea here is that we don't want to mess with the convention if the user
2125 /// explicitly requested something with performance implications like coldcc,
2126 /// GHC, or anyregcc.
2127 static bool hasChangeableCC(Function *F) {
2128 CallingConv::ID CC = F->getCallingConv();
2130 // FIXME: Is it worth transforming x86_stdcallcc and x86_fastcallcc?
2131 if (CC != CallingConv::C && CC != CallingConv::X86_ThisCall)
2132 return false;
2134 // FIXME: Change CC for the whole chain of musttail calls when possible.
2136 // Can't change CC of the function that either has musttail calls, or is a
2137 // musttail callee itself
2138 for (User *U : F->users()) {
2139 if (isa<BlockAddress>(U))
2140 continue;
2141 CallInst* CI = dyn_cast<CallInst>(U);
2142 if (!CI)
2143 continue;
2145 if (CI->isMustTailCall())
2146 return false;
2149 for (BasicBlock &BB : *F)
2150 if (BB.getTerminatingMustTailCall())
2151 return false;
2153 return true;
2156 /// Return true if the block containing the call site has a BlockFrequency of
2157 /// less than ColdCCRelFreq% of the entry block.
2158 static bool isColdCallSite(CallSite CS, BlockFrequencyInfo &CallerBFI) {
2159 const BranchProbability ColdProb(ColdCCRelFreq, 100);
2160 auto CallSiteBB = CS.getInstruction()->getParent();
2161 auto CallSiteFreq = CallerBFI.getBlockFreq(CallSiteBB);
2162 auto CallerEntryFreq =
2163 CallerBFI.getBlockFreq(&(CS.getCaller()->getEntryBlock()));
2164 return CallSiteFreq < CallerEntryFreq * ColdProb;
2167 // This function checks if the input function F is cold at all call sites. It
2168 // also looks each call site's containing function, returning false if the
2169 // caller function contains other non cold calls. The input vector AllCallsCold
2170 // contains a list of functions that only have call sites in cold blocks.
2171 static bool
2172 isValidCandidateForColdCC(Function &F,
2173 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2174 const std::vector<Function *> &AllCallsCold) {
2176 if (F.user_empty())
2177 return false;
2179 for (User *U : F.users()) {
2180 if (isa<BlockAddress>(U))
2181 continue;
2183 CallSite CS(cast<Instruction>(U));
2184 Function *CallerFunc = CS.getInstruction()->getParent()->getParent();
2185 BlockFrequencyInfo &CallerBFI = GetBFI(*CallerFunc);
2186 if (!isColdCallSite(CS, CallerBFI))
2187 return false;
2188 auto It = std::find(AllCallsCold.begin(), AllCallsCold.end(), CallerFunc);
2189 if (It == AllCallsCold.end())
2190 return false;
2192 return true;
2195 static void changeCallSitesToColdCC(Function *F) {
2196 for (User *U : F->users()) {
2197 if (isa<BlockAddress>(U))
2198 continue;
2199 CallSite CS(cast<Instruction>(U));
2200 CS.setCallingConv(CallingConv::Cold);
2204 // This function iterates over all the call instructions in the input Function
2205 // and checks that all call sites are in cold blocks and are allowed to use the
2206 // coldcc calling convention.
2207 static bool
2208 hasOnlyColdCalls(Function &F,
2209 function_ref<BlockFrequencyInfo &(Function &)> GetBFI) {
2210 for (BasicBlock &BB : F) {
2211 for (Instruction &I : BB) {
2212 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
2213 CallSite CS(cast<Instruction>(CI));
2214 // Skip over isline asm instructions since they aren't function calls.
2215 if (CI->isInlineAsm())
2216 continue;
2217 Function *CalledFn = CI->getCalledFunction();
2218 if (!CalledFn)
2219 return false;
2220 if (!CalledFn->hasLocalLinkage())
2221 return false;
2222 // Skip over instrinsics since they won't remain as function calls.
2223 if (CalledFn->getIntrinsicID() != Intrinsic::not_intrinsic)
2224 continue;
2225 // Check if it's valid to use coldcc calling convention.
2226 if (!hasChangeableCC(CalledFn) || CalledFn->isVarArg() ||
2227 CalledFn->hasAddressTaken())
2228 return false;
2229 BlockFrequencyInfo &CallerBFI = GetBFI(F);
2230 if (!isColdCallSite(CS, CallerBFI))
2231 return false;
2235 return true;
2238 static bool
2239 OptimizeFunctions(Module &M, TargetLibraryInfo *TLI,
2240 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2241 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2242 function_ref<DominatorTree &(Function &)> LookupDomTree,
2243 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2245 bool Changed = false;
2247 std::vector<Function *> AllCallsCold;
2248 for (Module::iterator FI = M.begin(), E = M.end(); FI != E;) {
2249 Function *F = &*FI++;
2250 if (hasOnlyColdCalls(*F, GetBFI))
2251 AllCallsCold.push_back(F);
2254 // Optimize functions.
2255 for (Module::iterator FI = M.begin(), E = M.end(); FI != E; ) {
2256 Function *F = &*FI++;
2258 // Don't perform global opt pass on naked functions; we don't want fast
2259 // calling conventions for naked functions.
2260 if (F->hasFnAttribute(Attribute::Naked))
2261 continue;
2263 // Functions without names cannot be referenced outside this module.
2264 if (!F->hasName() && !F->isDeclaration() && !F->hasLocalLinkage())
2265 F->setLinkage(GlobalValue::InternalLinkage);
2267 if (deleteIfDead(*F, NotDiscardableComdats)) {
2268 Changed = true;
2269 continue;
2272 // LLVM's definition of dominance allows instructions that are cyclic
2273 // in unreachable blocks, e.g.:
2274 // %pat = select i1 %condition, @global, i16* %pat
2275 // because any instruction dominates an instruction in a block that's
2276 // not reachable from entry.
2277 // So, remove unreachable blocks from the function, because a) there's
2278 // no point in analyzing them and b) GlobalOpt should otherwise grow
2279 // some more complicated logic to break these cycles.
2280 // Removing unreachable blocks might invalidate the dominator so we
2281 // recalculate it.
2282 if (!F->isDeclaration()) {
2283 if (removeUnreachableBlocks(*F)) {
2284 auto &DT = LookupDomTree(*F);
2285 DT.recalculate(*F);
2286 Changed = true;
2290 Changed |= processGlobal(*F, TLI, LookupDomTree);
2292 if (!F->hasLocalLinkage())
2293 continue;
2295 // If we have an inalloca parameter that we can safely remove the
2296 // inalloca attribute from, do so. This unlocks optimizations that
2297 // wouldn't be safe in the presence of inalloca.
2298 // FIXME: We should also hoist alloca affected by this to the entry
2299 // block if possible.
2300 if (F->getAttributes().hasAttrSomewhere(Attribute::InAlloca) &&
2301 !F->hasAddressTaken()) {
2302 RemoveAttribute(F, Attribute::InAlloca);
2303 Changed = true;
2306 if (hasChangeableCC(F) && !F->isVarArg() && !F->hasAddressTaken()) {
2307 NumInternalFunc++;
2308 TargetTransformInfo &TTI = GetTTI(*F);
2309 // Change the calling convention to coldcc if either stress testing is
2310 // enabled or the target would like to use coldcc on functions which are
2311 // cold at all call sites and the callers contain no other non coldcc
2312 // calls.
2313 if (EnableColdCCStressTest ||
2314 (TTI.useColdCCForColdCall(*F) &&
2315 isValidCandidateForColdCC(*F, GetBFI, AllCallsCold))) {
2316 F->setCallingConv(CallingConv::Cold);
2317 changeCallSitesToColdCC(F);
2318 Changed = true;
2319 NumColdCC++;
2323 if (hasChangeableCC(F) && !F->isVarArg() &&
2324 !F->hasAddressTaken()) {
2325 // If this function has a calling convention worth changing, is not a
2326 // varargs function, and is only called directly, promote it to use the
2327 // Fast calling convention.
2328 F->setCallingConv(CallingConv::Fast);
2329 ChangeCalleesToFastCall(F);
2330 ++NumFastCallFns;
2331 Changed = true;
2334 if (F->getAttributes().hasAttrSomewhere(Attribute::Nest) &&
2335 !F->hasAddressTaken()) {
2336 // The function is not used by a trampoline intrinsic, so it is safe
2337 // to remove the 'nest' attribute.
2338 RemoveAttribute(F, Attribute::Nest);
2339 ++NumNestRemoved;
2340 Changed = true;
2343 return Changed;
2346 static bool
2347 OptimizeGlobalVars(Module &M, TargetLibraryInfo *TLI,
2348 function_ref<DominatorTree &(Function &)> LookupDomTree,
2349 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2350 bool Changed = false;
2352 for (Module::global_iterator GVI = M.global_begin(), E = M.global_end();
2353 GVI != E; ) {
2354 GlobalVariable *GV = &*GVI++;
2355 // Global variables without names cannot be referenced outside this module.
2356 if (!GV->hasName() && !GV->isDeclaration() && !GV->hasLocalLinkage())
2357 GV->setLinkage(GlobalValue::InternalLinkage);
2358 // Simplify the initializer.
2359 if (GV->hasInitializer())
2360 if (auto *C = dyn_cast<Constant>(GV->getInitializer())) {
2361 auto &DL = M.getDataLayout();
2362 Constant *New = ConstantFoldConstant(C, DL, TLI);
2363 if (New && New != C)
2364 GV->setInitializer(New);
2367 if (deleteIfDead(*GV, NotDiscardableComdats)) {
2368 Changed = true;
2369 continue;
2372 Changed |= processGlobal(*GV, TLI, LookupDomTree);
2374 return Changed;
2377 /// Evaluate a piece of a constantexpr store into a global initializer. This
2378 /// returns 'Init' modified to reflect 'Val' stored into it. At this point, the
2379 /// GEP operands of Addr [0, OpNo) have been stepped into.
2380 static Constant *EvaluateStoreInto(Constant *Init, Constant *Val,
2381 ConstantExpr *Addr, unsigned OpNo) {
2382 // Base case of the recursion.
2383 if (OpNo == Addr->getNumOperands()) {
2384 assert(Val->getType() == Init->getType() && "Type mismatch!");
2385 return Val;
2388 SmallVector<Constant*, 32> Elts;
2389 if (StructType *STy = dyn_cast<StructType>(Init->getType())) {
2390 // Break up the constant into its elements.
2391 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
2392 Elts.push_back(Init->getAggregateElement(i));
2394 // Replace the element that we are supposed to.
2395 ConstantInt *CU = cast<ConstantInt>(Addr->getOperand(OpNo));
2396 unsigned Idx = CU->getZExtValue();
2397 assert(Idx < STy->getNumElements() && "Struct index out of range!");
2398 Elts[Idx] = EvaluateStoreInto(Elts[Idx], Val, Addr, OpNo+1);
2400 // Return the modified struct.
2401 return ConstantStruct::get(STy, Elts);
2404 ConstantInt *CI = cast<ConstantInt>(Addr->getOperand(OpNo));
2405 SequentialType *InitTy = cast<SequentialType>(Init->getType());
2406 uint64_t NumElts = InitTy->getNumElements();
2408 // Break up the array into elements.
2409 for (uint64_t i = 0, e = NumElts; i != e; ++i)
2410 Elts.push_back(Init->getAggregateElement(i));
2412 assert(CI->getZExtValue() < NumElts);
2413 Elts[CI->getZExtValue()] =
2414 EvaluateStoreInto(Elts[CI->getZExtValue()], Val, Addr, OpNo+1);
2416 if (Init->getType()->isArrayTy())
2417 return ConstantArray::get(cast<ArrayType>(InitTy), Elts);
2418 return ConstantVector::get(Elts);
2421 /// We have decided that Addr (which satisfies the predicate
2422 /// isSimpleEnoughPointerToCommit) should get Val as its value. Make it happen.
2423 static void CommitValueTo(Constant *Val, Constant *Addr) {
2424 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(Addr)) {
2425 assert(GV->hasInitializer());
2426 GV->setInitializer(Val);
2427 return;
2430 ConstantExpr *CE = cast<ConstantExpr>(Addr);
2431 GlobalVariable *GV = cast<GlobalVariable>(CE->getOperand(0));
2432 GV->setInitializer(EvaluateStoreInto(GV->getInitializer(), Val, CE, 2));
2435 /// Given a map of address -> value, where addresses are expected to be some form
2436 /// of either a global or a constant GEP, set the initializer for the address to
2437 /// be the value. This performs mostly the same function as CommitValueTo()
2438 /// and EvaluateStoreInto() but is optimized to be more efficient for the common
2439 /// case where the set of addresses are GEPs sharing the same underlying global,
2440 /// processing the GEPs in batches rather than individually.
2442 /// To give an example, consider the following C++ code adapted from the clang
2443 /// regression tests:
2444 /// struct S {
2445 /// int n = 10;
2446 /// int m = 2 * n;
2447 /// S(int a) : n(a) {}
2448 /// };
2450 /// template<typename T>
2451 /// struct U {
2452 /// T *r = &q;
2453 /// T q = 42;
2454 /// U *p = this;
2455 /// };
2457 /// U<S> e;
2459 /// The global static constructor for 'e' will need to initialize 'r' and 'p' of
2460 /// the outer struct, while also initializing the inner 'q' structs 'n' and 'm'
2461 /// members. This batch algorithm will simply use general CommitValueTo() method
2462 /// to handle the complex nested S struct initialization of 'q', before
2463 /// processing the outermost members in a single batch. Using CommitValueTo() to
2464 /// handle member in the outer struct is inefficient when the struct/array is
2465 /// very large as we end up creating and destroy constant arrays for each
2466 /// initialization.
2467 /// For the above case, we expect the following IR to be generated:
2469 /// %struct.U = type { %struct.S*, %struct.S, %struct.U* }
2470 /// %struct.S = type { i32, i32 }
2471 /// @e = global %struct.U { %struct.S* gep inbounds (%struct.U, %struct.U* @e,
2472 /// i64 0, i32 1),
2473 /// %struct.S { i32 42, i32 84 }, %struct.U* @e }
2474 /// The %struct.S { i32 42, i32 84 } inner initializer is treated as a complex
2475 /// constant expression, while the other two elements of @e are "simple".
2476 static void BatchCommitValueTo(const DenseMap<Constant*, Constant*> &Mem) {
2477 SmallVector<std::pair<GlobalVariable*, Constant*>, 32> GVs;
2478 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> ComplexCEs;
2479 SmallVector<std::pair<ConstantExpr*, Constant*>, 32> SimpleCEs;
2480 SimpleCEs.reserve(Mem.size());
2482 for (const auto &I : Mem) {
2483 if (auto *GV = dyn_cast<GlobalVariable>(I.first)) {
2484 GVs.push_back(std::make_pair(GV, I.second));
2485 } else {
2486 ConstantExpr *GEP = cast<ConstantExpr>(I.first);
2487 // We don't handle the deeply recursive case using the batch method.
2488 if (GEP->getNumOperands() > 3)
2489 ComplexCEs.push_back(std::make_pair(GEP, I.second));
2490 else
2491 SimpleCEs.push_back(std::make_pair(GEP, I.second));
2495 // The algorithm below doesn't handle cases like nested structs, so use the
2496 // slower fully general method if we have to.
2497 for (auto ComplexCE : ComplexCEs)
2498 CommitValueTo(ComplexCE.second, ComplexCE.first);
2500 for (auto GVPair : GVs) {
2501 assert(GVPair.first->hasInitializer());
2502 GVPair.first->setInitializer(GVPair.second);
2505 if (SimpleCEs.empty())
2506 return;
2508 // We cache a single global's initializer elements in the case where the
2509 // subsequent address/val pair uses the same one. This avoids throwing away and
2510 // rebuilding the constant struct/vector/array just because one element is
2511 // modified at a time.
2512 SmallVector<Constant *, 32> Elts;
2513 Elts.reserve(SimpleCEs.size());
2514 GlobalVariable *CurrentGV = nullptr;
2516 auto commitAndSetupCache = [&](GlobalVariable *GV, bool Update) {
2517 Constant *Init = GV->getInitializer();
2518 Type *Ty = Init->getType();
2519 if (Update) {
2520 if (CurrentGV) {
2521 assert(CurrentGV && "Expected a GV to commit to!");
2522 Type *CurrentInitTy = CurrentGV->getInitializer()->getType();
2523 // We have a valid cache that needs to be committed.
2524 if (StructType *STy = dyn_cast<StructType>(CurrentInitTy))
2525 CurrentGV->setInitializer(ConstantStruct::get(STy, Elts));
2526 else if (ArrayType *ArrTy = dyn_cast<ArrayType>(CurrentInitTy))
2527 CurrentGV->setInitializer(ConstantArray::get(ArrTy, Elts));
2528 else
2529 CurrentGV->setInitializer(ConstantVector::get(Elts));
2531 if (CurrentGV == GV)
2532 return;
2533 // Need to clear and set up cache for new initializer.
2534 CurrentGV = GV;
2535 Elts.clear();
2536 unsigned NumElts;
2537 if (auto *STy = dyn_cast<StructType>(Ty))
2538 NumElts = STy->getNumElements();
2539 else
2540 NumElts = cast<SequentialType>(Ty)->getNumElements();
2541 for (unsigned i = 0, e = NumElts; i != e; ++i)
2542 Elts.push_back(Init->getAggregateElement(i));
2546 for (auto CEPair : SimpleCEs) {
2547 ConstantExpr *GEP = CEPair.first;
2548 Constant *Val = CEPair.second;
2550 GlobalVariable *GV = cast<GlobalVariable>(GEP->getOperand(0));
2551 commitAndSetupCache(GV, GV != CurrentGV);
2552 ConstantInt *CI = cast<ConstantInt>(GEP->getOperand(2));
2553 Elts[CI->getZExtValue()] = Val;
2555 // The last initializer in the list needs to be committed, others
2556 // will be committed on a new initializer being processed.
2557 commitAndSetupCache(CurrentGV, true);
2560 /// Evaluate static constructors in the function, if we can. Return true if we
2561 /// can, false otherwise.
2562 static bool EvaluateStaticConstructor(Function *F, const DataLayout &DL,
2563 TargetLibraryInfo *TLI) {
2564 // Call the function.
2565 Evaluator Eval(DL, TLI);
2566 Constant *RetValDummy;
2567 bool EvalSuccess = Eval.EvaluateFunction(F, RetValDummy,
2568 SmallVector<Constant*, 0>());
2570 if (EvalSuccess) {
2571 ++NumCtorsEvaluated;
2573 // We succeeded at evaluation: commit the result.
2574 LLVM_DEBUG(dbgs() << "FULLY EVALUATED GLOBAL CTOR FUNCTION '"
2575 << F->getName() << "' to "
2576 << Eval.getMutatedMemory().size() << " stores.\n");
2577 BatchCommitValueTo(Eval.getMutatedMemory());
2578 for (GlobalVariable *GV : Eval.getInvariants())
2579 GV->setConstant(true);
2582 return EvalSuccess;
2585 static int compareNames(Constant *const *A, Constant *const *B) {
2586 Value *AStripped = (*A)->stripPointerCasts();
2587 Value *BStripped = (*B)->stripPointerCasts();
2588 return AStripped->getName().compare(BStripped->getName());
2591 static void setUsedInitializer(GlobalVariable &V,
2592 const SmallPtrSetImpl<GlobalValue *> &Init) {
2593 if (Init.empty()) {
2594 V.eraseFromParent();
2595 return;
2598 // Type of pointer to the array of pointers.
2599 PointerType *Int8PtrTy = Type::getInt8PtrTy(V.getContext(), 0);
2601 SmallVector<Constant *, 8> UsedArray;
2602 for (GlobalValue *GV : Init) {
2603 Constant *Cast
2604 = ConstantExpr::getPointerBitCastOrAddrSpaceCast(GV, Int8PtrTy);
2605 UsedArray.push_back(Cast);
2607 // Sort to get deterministic order.
2608 array_pod_sort(UsedArray.begin(), UsedArray.end(), compareNames);
2609 ArrayType *ATy = ArrayType::get(Int8PtrTy, UsedArray.size());
2611 Module *M = V.getParent();
2612 V.removeFromParent();
2613 GlobalVariable *NV =
2614 new GlobalVariable(*M, ATy, false, GlobalValue::AppendingLinkage,
2615 ConstantArray::get(ATy, UsedArray), "");
2616 NV->takeName(&V);
2617 NV->setSection("llvm.metadata");
2618 delete &V;
2621 namespace {
2623 /// An easy to access representation of llvm.used and llvm.compiler.used.
2624 class LLVMUsed {
2625 SmallPtrSet<GlobalValue *, 8> Used;
2626 SmallPtrSet<GlobalValue *, 8> CompilerUsed;
2627 GlobalVariable *UsedV;
2628 GlobalVariable *CompilerUsedV;
2630 public:
2631 LLVMUsed(Module &M) {
2632 UsedV = collectUsedGlobalVariables(M, Used, false);
2633 CompilerUsedV = collectUsedGlobalVariables(M, CompilerUsed, true);
2636 using iterator = SmallPtrSet<GlobalValue *, 8>::iterator;
2637 using used_iterator_range = iterator_range<iterator>;
2639 iterator usedBegin() { return Used.begin(); }
2640 iterator usedEnd() { return Used.end(); }
2642 used_iterator_range used() {
2643 return used_iterator_range(usedBegin(), usedEnd());
2646 iterator compilerUsedBegin() { return CompilerUsed.begin(); }
2647 iterator compilerUsedEnd() { return CompilerUsed.end(); }
2649 used_iterator_range compilerUsed() {
2650 return used_iterator_range(compilerUsedBegin(), compilerUsedEnd());
2653 bool usedCount(GlobalValue *GV) const { return Used.count(GV); }
2655 bool compilerUsedCount(GlobalValue *GV) const {
2656 return CompilerUsed.count(GV);
2659 bool usedErase(GlobalValue *GV) { return Used.erase(GV); }
2660 bool compilerUsedErase(GlobalValue *GV) { return CompilerUsed.erase(GV); }
2661 bool usedInsert(GlobalValue *GV) { return Used.insert(GV).second; }
2663 bool compilerUsedInsert(GlobalValue *GV) {
2664 return CompilerUsed.insert(GV).second;
2667 void syncVariablesAndSets() {
2668 if (UsedV)
2669 setUsedInitializer(*UsedV, Used);
2670 if (CompilerUsedV)
2671 setUsedInitializer(*CompilerUsedV, CompilerUsed);
2675 } // end anonymous namespace
2677 static bool hasUseOtherThanLLVMUsed(GlobalAlias &GA, const LLVMUsed &U) {
2678 if (GA.use_empty()) // No use at all.
2679 return false;
2681 assert((!U.usedCount(&GA) || !U.compilerUsedCount(&GA)) &&
2682 "We should have removed the duplicated "
2683 "element from llvm.compiler.used");
2684 if (!GA.hasOneUse())
2685 // Strictly more than one use. So at least one is not in llvm.used and
2686 // llvm.compiler.used.
2687 return true;
2689 // Exactly one use. Check if it is in llvm.used or llvm.compiler.used.
2690 return !U.usedCount(&GA) && !U.compilerUsedCount(&GA);
2693 static bool hasMoreThanOneUseOtherThanLLVMUsed(GlobalValue &V,
2694 const LLVMUsed &U) {
2695 unsigned N = 2;
2696 assert((!U.usedCount(&V) || !U.compilerUsedCount(&V)) &&
2697 "We should have removed the duplicated "
2698 "element from llvm.compiler.used");
2699 if (U.usedCount(&V) || U.compilerUsedCount(&V))
2700 ++N;
2701 return V.hasNUsesOrMore(N);
2704 static bool mayHaveOtherReferences(GlobalAlias &GA, const LLVMUsed &U) {
2705 if (!GA.hasLocalLinkage())
2706 return true;
2708 return U.usedCount(&GA) || U.compilerUsedCount(&GA);
2711 static bool hasUsesToReplace(GlobalAlias &GA, const LLVMUsed &U,
2712 bool &RenameTarget) {
2713 RenameTarget = false;
2714 bool Ret = false;
2715 if (hasUseOtherThanLLVMUsed(GA, U))
2716 Ret = true;
2718 // If the alias is externally visible, we may still be able to simplify it.
2719 if (!mayHaveOtherReferences(GA, U))
2720 return Ret;
2722 // If the aliasee has internal linkage, give it the name and linkage
2723 // of the alias, and delete the alias. This turns:
2724 // define internal ... @f(...)
2725 // @a = alias ... @f
2726 // into:
2727 // define ... @a(...)
2728 Constant *Aliasee = GA.getAliasee();
2729 GlobalValue *Target = cast<GlobalValue>(Aliasee->stripPointerCasts());
2730 if (!Target->hasLocalLinkage())
2731 return Ret;
2733 // Do not perform the transform if multiple aliases potentially target the
2734 // aliasee. This check also ensures that it is safe to replace the section
2735 // and other attributes of the aliasee with those of the alias.
2736 if (hasMoreThanOneUseOtherThanLLVMUsed(*Target, U))
2737 return Ret;
2739 RenameTarget = true;
2740 return true;
2743 static bool
2744 OptimizeGlobalAliases(Module &M,
2745 SmallPtrSetImpl<const Comdat *> &NotDiscardableComdats) {
2746 bool Changed = false;
2747 LLVMUsed Used(M);
2749 for (GlobalValue *GV : Used.used())
2750 Used.compilerUsedErase(GV);
2752 for (Module::alias_iterator I = M.alias_begin(), E = M.alias_end();
2753 I != E;) {
2754 GlobalAlias *J = &*I++;
2756 // Aliases without names cannot be referenced outside this module.
2757 if (!J->hasName() && !J->isDeclaration() && !J->hasLocalLinkage())
2758 J->setLinkage(GlobalValue::InternalLinkage);
2760 if (deleteIfDead(*J, NotDiscardableComdats)) {
2761 Changed = true;
2762 continue;
2765 // If the alias can change at link time, nothing can be done - bail out.
2766 if (J->isInterposable())
2767 continue;
2769 Constant *Aliasee = J->getAliasee();
2770 GlobalValue *Target = dyn_cast<GlobalValue>(Aliasee->stripPointerCasts());
2771 // We can't trivially replace the alias with the aliasee if the aliasee is
2772 // non-trivial in some way.
2773 // TODO: Try to handle non-zero GEPs of local aliasees.
2774 if (!Target)
2775 continue;
2776 Target->removeDeadConstantUsers();
2778 // Make all users of the alias use the aliasee instead.
2779 bool RenameTarget;
2780 if (!hasUsesToReplace(*J, Used, RenameTarget))
2781 continue;
2783 J->replaceAllUsesWith(ConstantExpr::getBitCast(Aliasee, J->getType()));
2784 ++NumAliasesResolved;
2785 Changed = true;
2787 if (RenameTarget) {
2788 // Give the aliasee the name, linkage and other attributes of the alias.
2789 Target->takeName(&*J);
2790 Target->setLinkage(J->getLinkage());
2791 Target->setDSOLocal(J->isDSOLocal());
2792 Target->setVisibility(J->getVisibility());
2793 Target->setDLLStorageClass(J->getDLLStorageClass());
2795 if (Used.usedErase(&*J))
2796 Used.usedInsert(Target);
2798 if (Used.compilerUsedErase(&*J))
2799 Used.compilerUsedInsert(Target);
2800 } else if (mayHaveOtherReferences(*J, Used))
2801 continue;
2803 // Delete the alias.
2804 M.getAliasList().erase(J);
2805 ++NumAliasesRemoved;
2806 Changed = true;
2809 Used.syncVariablesAndSets();
2811 return Changed;
2814 static Function *FindCXAAtExit(Module &M, TargetLibraryInfo *TLI) {
2815 LibFunc F = LibFunc_cxa_atexit;
2816 if (!TLI->has(F))
2817 return nullptr;
2819 Function *Fn = M.getFunction(TLI->getName(F));
2820 if (!Fn)
2821 return nullptr;
2823 // Make sure that the function has the correct prototype.
2824 if (!TLI->getLibFunc(*Fn, F) || F != LibFunc_cxa_atexit)
2825 return nullptr;
2827 return Fn;
2830 /// Returns whether the given function is an empty C++ destructor and can
2831 /// therefore be eliminated.
2832 /// Note that we assume that other optimization passes have already simplified
2833 /// the code so we simply check for 'ret'.
2834 static bool cxxDtorIsEmpty(const Function &Fn) {
2835 // FIXME: We could eliminate C++ destructors if they're readonly/readnone and
2836 // nounwind, but that doesn't seem worth doing.
2837 if (Fn.isDeclaration())
2838 return false;
2840 for (auto &I : Fn.getEntryBlock()) {
2841 if (isa<DbgInfoIntrinsic>(I))
2842 continue;
2843 if (isa<ReturnInst>(I))
2844 return true;
2845 break;
2847 return false;
2850 static bool OptimizeEmptyGlobalCXXDtors(Function *CXAAtExitFn) {
2851 /// Itanium C++ ABI p3.3.5:
2853 /// After constructing a global (or local static) object, that will require
2854 /// destruction on exit, a termination function is registered as follows:
2856 /// extern "C" int __cxa_atexit ( void (*f)(void *), void *p, void *d );
2858 /// This registration, e.g. __cxa_atexit(f,p,d), is intended to cause the
2859 /// call f(p) when DSO d is unloaded, before all such termination calls
2860 /// registered before this one. It returns zero if registration is
2861 /// successful, nonzero on failure.
2863 // This pass will look for calls to __cxa_atexit where the function is trivial
2864 // and remove them.
2865 bool Changed = false;
2867 for (auto I = CXAAtExitFn->user_begin(), E = CXAAtExitFn->user_end();
2868 I != E;) {
2869 // We're only interested in calls. Theoretically, we could handle invoke
2870 // instructions as well, but neither llvm-gcc nor clang generate invokes
2871 // to __cxa_atexit.
2872 CallInst *CI = dyn_cast<CallInst>(*I++);
2873 if (!CI)
2874 continue;
2876 Function *DtorFn =
2877 dyn_cast<Function>(CI->getArgOperand(0)->stripPointerCasts());
2878 if (!DtorFn || !cxxDtorIsEmpty(*DtorFn))
2879 continue;
2881 // Just remove the call.
2882 CI->replaceAllUsesWith(Constant::getNullValue(CI->getType()));
2883 CI->eraseFromParent();
2885 ++NumCXXDtorsRemoved;
2887 Changed |= true;
2890 return Changed;
2893 static bool optimizeGlobalsInModule(
2894 Module &M, const DataLayout &DL, TargetLibraryInfo *TLI,
2895 function_ref<TargetTransformInfo &(Function &)> GetTTI,
2896 function_ref<BlockFrequencyInfo &(Function &)> GetBFI,
2897 function_ref<DominatorTree &(Function &)> LookupDomTree) {
2898 SmallPtrSet<const Comdat *, 8> NotDiscardableComdats;
2899 bool Changed = false;
2900 bool LocalChange = true;
2901 while (LocalChange) {
2902 LocalChange = false;
2904 NotDiscardableComdats.clear();
2905 for (const GlobalVariable &GV : M.globals())
2906 if (const Comdat *C = GV.getComdat())
2907 if (!GV.isDiscardableIfUnused() || !GV.use_empty())
2908 NotDiscardableComdats.insert(C);
2909 for (Function &F : M)
2910 if (const Comdat *C = F.getComdat())
2911 if (!F.isDefTriviallyDead())
2912 NotDiscardableComdats.insert(C);
2913 for (GlobalAlias &GA : M.aliases())
2914 if (const Comdat *C = GA.getComdat())
2915 if (!GA.isDiscardableIfUnused() || !GA.use_empty())
2916 NotDiscardableComdats.insert(C);
2918 // Delete functions that are trivially dead, ccc -> fastcc
2919 LocalChange |= OptimizeFunctions(M, TLI, GetTTI, GetBFI, LookupDomTree,
2920 NotDiscardableComdats);
2922 // Optimize global_ctors list.
2923 LocalChange |= optimizeGlobalCtorsList(M, [&](Function *F) {
2924 return EvaluateStaticConstructor(F, DL, TLI);
2927 // Optimize non-address-taken globals.
2928 LocalChange |= OptimizeGlobalVars(M, TLI, LookupDomTree,
2929 NotDiscardableComdats);
2931 // Resolve aliases, when possible.
2932 LocalChange |= OptimizeGlobalAliases(M, NotDiscardableComdats);
2934 // Try to remove trivial global destructors if they are not removed
2935 // already.
2936 Function *CXAAtExitFn = FindCXAAtExit(M, TLI);
2937 if (CXAAtExitFn)
2938 LocalChange |= OptimizeEmptyGlobalCXXDtors(CXAAtExitFn);
2940 Changed |= LocalChange;
2943 // TODO: Move all global ctors functions to the end of the module for code
2944 // layout.
2946 return Changed;
2949 PreservedAnalyses GlobalOptPass::run(Module &M, ModuleAnalysisManager &AM) {
2950 auto &DL = M.getDataLayout();
2951 auto &TLI = AM.getResult<TargetLibraryAnalysis>(M);
2952 auto &FAM =
2953 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
2954 auto LookupDomTree = [&FAM](Function &F) -> DominatorTree &{
2955 return FAM.getResult<DominatorTreeAnalysis>(F);
2957 auto GetTTI = [&FAM](Function &F) -> TargetTransformInfo & {
2958 return FAM.getResult<TargetIRAnalysis>(F);
2961 auto GetBFI = [&FAM](Function &F) -> BlockFrequencyInfo & {
2962 return FAM.getResult<BlockFrequencyAnalysis>(F);
2965 if (!optimizeGlobalsInModule(M, DL, &TLI, GetTTI, GetBFI, LookupDomTree))
2966 return PreservedAnalyses::all();
2967 return PreservedAnalyses::none();
2970 namespace {
2972 struct GlobalOptLegacyPass : public ModulePass {
2973 static char ID; // Pass identification, replacement for typeid
2975 GlobalOptLegacyPass() : ModulePass(ID) {
2976 initializeGlobalOptLegacyPassPass(*PassRegistry::getPassRegistry());
2979 bool runOnModule(Module &M) override {
2980 if (skipModule(M))
2981 return false;
2983 auto &DL = M.getDataLayout();
2984 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
2985 auto LookupDomTree = [this](Function &F) -> DominatorTree & {
2986 return this->getAnalysis<DominatorTreeWrapperPass>(F).getDomTree();
2988 auto GetTTI = [this](Function &F) -> TargetTransformInfo & {
2989 return this->getAnalysis<TargetTransformInfoWrapperPass>().getTTI(F);
2992 auto GetBFI = [this](Function &F) -> BlockFrequencyInfo & {
2993 return this->getAnalysis<BlockFrequencyInfoWrapperPass>(F).getBFI();
2996 return optimizeGlobalsInModule(M, DL, TLI, GetTTI, GetBFI, LookupDomTree);
2999 void getAnalysisUsage(AnalysisUsage &AU) const override {
3000 AU.addRequired<TargetLibraryInfoWrapperPass>();
3001 AU.addRequired<TargetTransformInfoWrapperPass>();
3002 AU.addRequired<DominatorTreeWrapperPass>();
3003 AU.addRequired<BlockFrequencyInfoWrapperPass>();
3007 } // end anonymous namespace
3009 char GlobalOptLegacyPass::ID = 0;
3011 INITIALIZE_PASS_BEGIN(GlobalOptLegacyPass, "globalopt",
3012 "Global Variable Optimizer", false, false)
3013 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3014 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
3015 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
3016 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
3017 INITIALIZE_PASS_END(GlobalOptLegacyPass, "globalopt",
3018 "Global Variable Optimizer", false, false)
3020 ModulePass *llvm::createGlobalOptimizerPass() {
3021 return new GlobalOptLegacyPass();