[llvm-objcopy] [COFF] Test absolute symbols wrt --strip-unneeded and --discard-all...
[llvm-complete.git] / lib / CodeGen / GlobalMerge.cpp
blobd3364952f2444a726c5d6abb2d2de0d10edcde51
1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass merges globals with internal linkage into one. This way all the
11 // globals which were merged into a biggest one can be addressed using offsets
12 // from the same base pointer (no need for separate base pointer for each of the
13 // global). Such a transformation can significantly reduce the register pressure
14 // when many globals are involved.
16 // For example, consider the code which touches several global variables at
17 // once:
19 // static int foo[N], bar[N], baz[N];
21 // for (i = 0; i < N; ++i) {
22 // foo[i] = bar[i] * baz[i];
23 // }
25 // On ARM the addresses of 3 arrays should be kept in the registers, thus
26 // this code has quite large register pressure (loop body):
28 // ldr r1, [r5], #4
29 // ldr r2, [r6], #4
30 // mul r1, r2, r1
31 // str r1, [r0], #4
33 // Pass converts the code to something like:
35 // static struct {
36 // int foo[N];
37 // int bar[N];
38 // int baz[N];
39 // } merged;
41 // for (i = 0; i < N; ++i) {
42 // merged.foo[i] = merged.bar[i] * merged.baz[i];
43 // }
45 // and in ARM code this becomes:
47 // ldr r0, [r5, #40]
48 // ldr r1, [r5, #80]
49 // mul r0, r1, r0
50 // str r0, [r5], #4
52 // note that we saved 2 registers here almostly "for free".
54 // However, merging globals can have tradeoffs:
55 // - it confuses debuggers, tools, and users
56 // - it makes linker optimizations less useful (order files, LOHs, ...)
57 // - it forces usage of indexed addressing (which isn't necessarily "free")
58 // - it can increase register pressure when the uses are disparate enough.
60 // We use heuristics to discover the best global grouping we can (cf cl::opts).
62 // ===---------------------------------------------------------------------===//
64 #include "llvm/ADT/BitVector.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/SmallPtrSet.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/Statistic.h"
69 #include "llvm/ADT/StringRef.h"
70 #include "llvm/ADT/Triple.h"
71 #include "llvm/ADT/Twine.h"
72 #include "llvm/CodeGen/Passes.h"
73 #include "llvm/IR/BasicBlock.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Function.h"
78 #include "llvm/IR/GlobalAlias.h"
79 #include "llvm/IR/GlobalValue.h"
80 #include "llvm/IR/GlobalVariable.h"
81 #include "llvm/IR/Instruction.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/Use.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/Pass.h"
87 #include "llvm/Support/Casting.h"
88 #include "llvm/Support/CommandLine.h"
89 #include "llvm/Support/Debug.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetLoweringObjectFile.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstddef>
96 #include <cstdint>
97 #include <string>
98 #include <vector>
100 using namespace llvm;
102 #define DEBUG_TYPE "global-merge"
104 // FIXME: This is only useful as a last-resort way to disable the pass.
105 static cl::opt<bool>
106 EnableGlobalMerge("enable-global-merge", cl::Hidden,
107 cl::desc("Enable the global merge pass"),
108 cl::init(true));
110 static cl::opt<unsigned>
111 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden,
112 cl::desc("Set maximum offset for global merge pass"),
113 cl::init(0));
115 static cl::opt<bool> GlobalMergeGroupByUse(
116 "global-merge-group-by-use", cl::Hidden,
117 cl::desc("Improve global merge pass to look at uses"), cl::init(true));
119 static cl::opt<bool> GlobalMergeIgnoreSingleUse(
120 "global-merge-ignore-single-use", cl::Hidden,
121 cl::desc("Improve global merge pass to ignore globals only used alone"),
122 cl::init(true));
124 static cl::opt<bool>
125 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
126 cl::desc("Enable global merge pass on constants"),
127 cl::init(false));
129 // FIXME: this could be a transitional option, and we probably need to remove
130 // it if only we are sure this optimization could always benefit all targets.
131 static cl::opt<cl::boolOrDefault>
132 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
133 cl::desc("Enable global merge pass on external linkage"));
135 STATISTIC(NumMerged, "Number of globals merged");
137 namespace {
139 class GlobalMerge : public FunctionPass {
140 const TargetMachine *TM = nullptr;
142 // FIXME: Infer the maximum possible offset depending on the actual users
143 // (these max offsets are different for the users inside Thumb or ARM
144 // functions), see the code that passes in the offset in the ARM backend
145 // for more information.
146 unsigned MaxOffset;
148 /// Whether we should try to optimize for size only.
149 /// Currently, this applies a dead simple heuristic: only consider globals
150 /// used in minsize functions for merging.
151 /// FIXME: This could learn about optsize, and be used in the cost model.
152 bool OnlyOptimizeForSize = false;
154 /// Whether we should merge global variables that have external linkage.
155 bool MergeExternalGlobals = false;
157 bool IsMachO;
159 bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
160 Module &M, bool isConst, unsigned AddrSpace) const;
162 /// Merge everything in \p Globals for which the corresponding bit
163 /// in \p GlobalSet is set.
164 bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
165 const BitVector &GlobalSet, Module &M, bool isConst,
166 unsigned AddrSpace) const;
168 /// Check if the given variable has been identified as must keep
169 /// \pre setMustKeepGlobalVariables must have been called on the Module that
170 /// contains GV
171 bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
172 return MustKeepGlobalVariables.count(GV);
175 /// Collect every variables marked as "used" or used in a landing pad
176 /// instruction for this Module.
177 void setMustKeepGlobalVariables(Module &M);
179 /// Collect every variables marked as "used"
180 void collectUsedGlobalVariables(Module &M, StringRef Name);
182 /// Keep track of the GlobalVariable that must not be merged away
183 SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
185 public:
186 static char ID; // Pass identification, replacement for typeid.
188 explicit GlobalMerge()
189 : FunctionPass(ID), MaxOffset(GlobalMergeMaxOffset) {
190 initializeGlobalMergePass(*PassRegistry::getPassRegistry());
193 explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset,
194 bool OnlyOptimizeForSize, bool MergeExternalGlobals)
195 : FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
196 OnlyOptimizeForSize(OnlyOptimizeForSize),
197 MergeExternalGlobals(MergeExternalGlobals) {
198 initializeGlobalMergePass(*PassRegistry::getPassRegistry());
201 bool doInitialization(Module &M) override;
202 bool runOnFunction(Function &F) override;
203 bool doFinalization(Module &M) override;
205 StringRef getPassName() const override { return "Merge internal globals"; }
207 void getAnalysisUsage(AnalysisUsage &AU) const override {
208 AU.setPreservesCFG();
209 FunctionPass::getAnalysisUsage(AU);
213 } // end anonymous namespace
215 char GlobalMerge::ID = 0;
217 INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false)
219 bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
220 Module &M, bool isConst, unsigned AddrSpace) const {
221 auto &DL = M.getDataLayout();
222 // FIXME: Find better heuristics
223 std::stable_sort(Globals.begin(), Globals.end(),
224 [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
225 return DL.getTypeAllocSize(GV1->getValueType()) <
226 DL.getTypeAllocSize(GV2->getValueType());
229 // If we want to just blindly group all globals together, do so.
230 if (!GlobalMergeGroupByUse) {
231 BitVector AllGlobals(Globals.size());
232 AllGlobals.set();
233 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
236 // If we want to be smarter, look at all uses of each global, to try to
237 // discover all sets of globals used together, and how many times each of
238 // these sets occurred.
240 // Keep this reasonably efficient, by having an append-only list of all sets
241 // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
242 // code (currently, a Function) to the set of globals seen so far that are
243 // used together in that unit (GlobalUsesByFunction).
245 // When we look at the Nth global, we know that any new set is either:
246 // - the singleton set {N}, containing this global only, or
247 // - the union of {N} and a previously-discovered set, containing some
248 // combination of the previous N-1 globals.
249 // Using that knowledge, when looking at the Nth global, we can keep:
250 // - a reference to the singleton set {N} (CurGVOnlySetIdx)
251 // - a list mapping each previous set to its union with {N} (EncounteredUGS),
252 // if it actually occurs.
254 // We keep track of the sets of globals used together "close enough".
255 struct UsedGlobalSet {
256 BitVector Globals;
257 unsigned UsageCount = 1;
259 UsedGlobalSet(size_t Size) : Globals(Size) {}
262 // Each set is unique in UsedGlobalSets.
263 std::vector<UsedGlobalSet> UsedGlobalSets;
265 // Avoid repeating the create-global-set pattern.
266 auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
267 UsedGlobalSets.emplace_back(Globals.size());
268 return UsedGlobalSets.back();
271 // The first set is the empty set.
272 CreateGlobalSet().UsageCount = 0;
274 // We define "close enough" to be "in the same function".
275 // FIXME: Grouping uses by function is way too aggressive, so we should have
276 // a better metric for distance between uses.
277 // The obvious alternative would be to group by BasicBlock, but that's in
278 // turn too conservative..
279 // Anything in between wouldn't be trivial to compute, so just stick with
280 // per-function grouping.
282 // The value type is an index into UsedGlobalSets.
283 // The default (0) conveniently points to the empty set.
284 DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
286 // Now, look at each merge-eligible global in turn.
288 // Keep track of the sets we already encountered to which we added the
289 // current global.
290 // Each element matches the same-index element in UsedGlobalSets.
291 // This lets us efficiently tell whether a set has already been expanded to
292 // include the current global.
293 std::vector<size_t> EncounteredUGS;
295 for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
296 GlobalVariable *GV = Globals[GI];
298 // Reset the encountered sets for this global...
299 std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
300 // ...and grow it in case we created new sets for the previous global.
301 EncounteredUGS.resize(UsedGlobalSets.size());
303 // We might need to create a set that only consists of the current global.
304 // Keep track of its index into UsedGlobalSets.
305 size_t CurGVOnlySetIdx = 0;
307 // For each global, look at all its Uses.
308 for (auto &U : GV->uses()) {
309 // This Use might be a ConstantExpr. We're interested in Instruction
310 // users, so look through ConstantExpr...
311 Use *UI, *UE;
312 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
313 if (CE->use_empty())
314 continue;
315 UI = &*CE->use_begin();
316 UE = nullptr;
317 } else if (isa<Instruction>(U.getUser())) {
318 UI = &U;
319 UE = UI->getNext();
320 } else {
321 continue;
324 // ...to iterate on all the instruction users of the global.
325 // Note that we iterate on Uses and not on Users to be able to getNext().
326 for (; UI != UE; UI = UI->getNext()) {
327 Instruction *I = dyn_cast<Instruction>(UI->getUser());
328 if (!I)
329 continue;
331 Function *ParentFn = I->getParent()->getParent();
333 // If we're only optimizing for size, ignore non-minsize functions.
334 if (OnlyOptimizeForSize && !ParentFn->optForMinSize())
335 continue;
337 size_t UGSIdx = GlobalUsesByFunction[ParentFn];
339 // If this is the first global the basic block uses, map it to the set
340 // consisting of this global only.
341 if (!UGSIdx) {
342 // If that set doesn't exist yet, create it.
343 if (!CurGVOnlySetIdx) {
344 CurGVOnlySetIdx = UsedGlobalSets.size();
345 CreateGlobalSet().Globals.set(GI);
346 } else {
347 ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
350 GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
351 continue;
354 // If we already encountered this BB, just increment the counter.
355 if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
356 ++UsedGlobalSets[UGSIdx].UsageCount;
357 continue;
360 // If not, the previous set wasn't actually used in this function.
361 --UsedGlobalSets[UGSIdx].UsageCount;
363 // If we already expanded the previous set to include this global, just
364 // reuse that expanded set.
365 if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
366 ++UsedGlobalSets[ExpandedIdx].UsageCount;
367 GlobalUsesByFunction[ParentFn] = ExpandedIdx;
368 continue;
371 // If not, create a new set consisting of the union of the previous set
372 // and this global. Mark it as encountered, so we can reuse it later.
373 GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
374 UsedGlobalSets.size();
376 UsedGlobalSet &NewUGS = CreateGlobalSet();
377 NewUGS.Globals.set(GI);
378 NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
383 // Now we found a bunch of sets of globals used together. We accumulated
384 // the number of times we encountered the sets (i.e., the number of blocks
385 // that use that exact set of globals).
387 // Multiply that by the size of the set to give us a crude profitability
388 // metric.
389 std::stable_sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
390 [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
391 return UGS1.Globals.count() * UGS1.UsageCount <
392 UGS2.Globals.count() * UGS2.UsageCount;
395 // We can choose to merge all globals together, but ignore globals never used
396 // with another global. This catches the obviously non-profitable cases of
397 // having a single global, but is aggressive enough for any other case.
398 if (GlobalMergeIgnoreSingleUse) {
399 BitVector AllGlobals(Globals.size());
400 for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
401 const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
402 if (UGS.UsageCount == 0)
403 continue;
404 if (UGS.Globals.count() > 1)
405 AllGlobals |= UGS.Globals;
407 return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
410 // Starting from the sets with the best (=biggest) profitability, find a
411 // good combination.
412 // The ideal (and expensive) solution can only be found by trying all
413 // combinations, looking for the one with the best profitability.
414 // Don't be smart about it, and just pick the first compatible combination,
415 // starting with the sets with the best profitability.
416 BitVector PickedGlobals(Globals.size());
417 bool Changed = false;
419 for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
420 const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
421 if (UGS.UsageCount == 0)
422 continue;
423 if (PickedGlobals.anyCommon(UGS.Globals))
424 continue;
425 PickedGlobals |= UGS.Globals;
426 // If the set only contains one global, there's no point in merging.
427 // Ignore the global for inclusion in other sets though, so keep it in
428 // PickedGlobals.
429 if (UGS.Globals.count() < 2)
430 continue;
431 Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
434 return Changed;
437 bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
438 const BitVector &GlobalSet, Module &M, bool isConst,
439 unsigned AddrSpace) const {
440 assert(Globals.size() > 1);
442 Type *Int32Ty = Type::getInt32Ty(M.getContext());
443 Type *Int8Ty = Type::getInt8Ty(M.getContext());
444 auto &DL = M.getDataLayout();
446 LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
447 << GlobalSet.find_first() << "\n");
449 bool Changed = false;
450 ssize_t i = GlobalSet.find_first();
451 while (i != -1) {
452 ssize_t j = 0;
453 uint64_t MergedSize = 0;
454 std::vector<Type*> Tys;
455 std::vector<Constant*> Inits;
456 std::vector<unsigned> StructIdxs;
458 bool HasExternal = false;
459 StringRef FirstExternalName;
460 unsigned MaxAlign = 1;
461 unsigned CurIdx = 0;
462 for (j = i; j != -1; j = GlobalSet.find_next(j)) {
463 Type *Ty = Globals[j]->getValueType();
465 // Make sure we use the same alignment AsmPrinter would use.
466 unsigned Align = DL.getPreferredAlignment(Globals[j]);
467 unsigned Padding = alignTo(MergedSize, Align) - MergedSize;
468 MergedSize += Padding;
469 MergedSize += DL.getTypeAllocSize(Ty);
470 if (MergedSize > MaxOffset) {
471 break;
473 if (Padding) {
474 Tys.push_back(ArrayType::get(Int8Ty, Padding));
475 Inits.push_back(ConstantAggregateZero::get(Tys.back()));
476 ++CurIdx;
478 Tys.push_back(Ty);
479 Inits.push_back(Globals[j]->getInitializer());
480 StructIdxs.push_back(CurIdx++);
482 MaxAlign = std::max(MaxAlign, Align);
484 if (Globals[j]->hasExternalLinkage() && !HasExternal) {
485 HasExternal = true;
486 FirstExternalName = Globals[j]->getName();
490 // Exit early if there is only one global to merge.
491 if (Tys.size() < 2) {
492 i = j;
493 continue;
496 // If merged variables doesn't have external linkage, we needn't to expose
497 // the symbol after merging.
498 GlobalValue::LinkageTypes Linkage = HasExternal
499 ? GlobalValue::ExternalLinkage
500 : GlobalValue::InternalLinkage;
501 // Use a packed struct so we can control alignment.
502 StructType *MergedTy = StructType::get(M.getContext(), Tys, true);
503 Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
505 // On Darwin external linkage needs to be preserved, otherwise
506 // dsymutil cannot preserve the debug info for the merged
507 // variables. If they have external linkage, use the symbol name
508 // of the first variable merged as the suffix of global symbol
509 // name. This avoids a link-time naming conflict for the
510 // _MergedGlobals symbols.
511 Twine MergedName =
512 (IsMachO && HasExternal)
513 ? "_MergedGlobals_" + FirstExternalName
514 : "_MergedGlobals";
515 auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage;
516 auto *MergedGV = new GlobalVariable(
517 M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr,
518 GlobalVariable::NotThreadLocal, AddrSpace);
520 MergedGV->setAlignment(MaxAlign);
521 MergedGV->setSection(Globals[i]->getSection());
523 const StructLayout *MergedLayout = DL.getStructLayout(MergedTy);
524 for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
525 GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
526 std::string Name = Globals[k]->getName();
527 GlobalValue::DLLStorageClassTypes DLLStorage =
528 Globals[k]->getDLLStorageClass();
530 // Copy metadata while adjusting any debug info metadata by the original
531 // global's offset within the merged global.
532 MergedGV->copyMetadata(Globals[k],
533 MergedLayout->getElementOffset(StructIdxs[idx]));
535 Constant *Idx[2] = {
536 ConstantInt::get(Int32Ty, 0),
537 ConstantInt::get(Int32Ty, StructIdxs[idx]),
539 Constant *GEP =
540 ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
541 Globals[k]->replaceAllUsesWith(GEP);
542 Globals[k]->eraseFromParent();
544 // When the linkage is not internal we must emit an alias for the original
545 // variable name as it may be accessed from another object. On non-Mach-O
546 // we can also emit an alias for internal linkage as it's safe to do so.
547 // It's not safe on Mach-O as the alias (and thus the portion of the
548 // MergedGlobals variable) may be dead stripped at link time.
549 if (Linkage != GlobalValue::InternalLinkage || !IsMachO) {
550 GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace,
551 Linkage, Name, GEP, &M);
552 GA->setDLLStorageClass(DLLStorage);
555 NumMerged++;
557 Changed = true;
558 i = j;
561 return Changed;
564 void GlobalMerge::collectUsedGlobalVariables(Module &M, StringRef Name) {
565 // Extract global variables from llvm.used array
566 const GlobalVariable *GV = M.getGlobalVariable(Name);
567 if (!GV || !GV->hasInitializer()) return;
569 // Should be an array of 'i8*'.
570 const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
572 for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
573 if (const GlobalVariable *G =
574 dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
575 MustKeepGlobalVariables.insert(G);
578 void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
579 collectUsedGlobalVariables(M, "llvm.used");
580 collectUsedGlobalVariables(M, "llvm.compiler.used");
582 for (Function &F : M) {
583 for (BasicBlock &BB : F) {
584 Instruction *Pad = BB.getFirstNonPHI();
585 if (!Pad->isEHPad())
586 continue;
588 // Keep globals used by landingpads and catchpads.
589 for (const Use &U : Pad->operands()) {
590 if (const GlobalVariable *GV =
591 dyn_cast<GlobalVariable>(U->stripPointerCasts()))
592 MustKeepGlobalVariables.insert(GV);
598 bool GlobalMerge::doInitialization(Module &M) {
599 if (!EnableGlobalMerge)
600 return false;
602 IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO();
604 auto &DL = M.getDataLayout();
605 DenseMap<std::pair<unsigned, StringRef>, SmallVector<GlobalVariable *, 16>>
606 Globals, ConstGlobals, BSSGlobals;
607 bool Changed = false;
608 setMustKeepGlobalVariables(M);
610 // Grab all non-const globals.
611 for (auto &GV : M.globals()) {
612 // Merge is safe for "normal" internal or external globals only
613 if (GV.isDeclaration() || GV.isThreadLocal() || GV.hasImplicitSection())
614 continue;
616 // It's not safe to merge globals that may be preempted
617 if (TM && !TM->shouldAssumeDSOLocal(M, &GV))
618 continue;
620 if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
621 !GV.hasInternalLinkage())
622 continue;
624 PointerType *PT = dyn_cast<PointerType>(GV.getType());
625 assert(PT && "Global variable is not a pointer!");
627 unsigned AddressSpace = PT->getAddressSpace();
628 StringRef Section = GV.getSection();
630 // Ignore all 'special' globals.
631 if (GV.getName().startswith("llvm.") ||
632 GV.getName().startswith(".llvm."))
633 continue;
635 // Ignore all "required" globals:
636 if (isMustKeepGlobalVariable(&GV))
637 continue;
639 Type *Ty = GV.getValueType();
640 if (DL.getTypeAllocSize(Ty) < MaxOffset) {
641 if (TM &&
642 TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSS())
643 BSSGlobals[{AddressSpace, Section}].push_back(&GV);
644 else if (GV.isConstant())
645 ConstGlobals[{AddressSpace, Section}].push_back(&GV);
646 else
647 Globals[{AddressSpace, Section}].push_back(&GV);
651 for (auto &P : Globals)
652 if (P.second.size() > 1)
653 Changed |= doMerge(P.second, M, false, P.first.first);
655 for (auto &P : BSSGlobals)
656 if (P.second.size() > 1)
657 Changed |= doMerge(P.second, M, false, P.first.first);
659 if (EnableGlobalMergeOnConst)
660 for (auto &P : ConstGlobals)
661 if (P.second.size() > 1)
662 Changed |= doMerge(P.second, M, true, P.first.first);
664 return Changed;
667 bool GlobalMerge::runOnFunction(Function &F) {
668 return false;
671 bool GlobalMerge::doFinalization(Module &M) {
672 MustKeepGlobalVariables.clear();
673 return false;
676 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
677 bool OnlyOptimizeForSize,
678 bool MergeExternalByDefault) {
679 bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ?
680 MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE);
681 return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal);