1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass merges globals with internal linkage into one. This way all the
10 // globals which were merged into a biggest one can be addressed using offsets
11 // from the same base pointer (no need for separate base pointer for each of the
12 // global). Such a transformation can significantly reduce the register pressure
13 // when many globals are involved.
15 // For example, consider the code which touches several global variables at
18 // static int foo[N], bar[N], baz[N];
20 // for (i = 0; i < N; ++i) {
21 // foo[i] = bar[i] * baz[i];
24 // On ARM the addresses of 3 arrays should be kept in the registers, thus
25 // this code has quite large register pressure (loop body):
32 // Pass converts the code to something like:
40 // for (i = 0; i < N; ++i) {
41 // merged.foo[i] = merged.bar[i] * merged.baz[i];
44 // and in ARM code this becomes:
51 // note that we saved 2 registers here almostly "for free".
53 // However, merging globals can have tradeoffs:
54 // - it confuses debuggers, tools, and users
55 // - it makes linker optimizations less useful (order files, LOHs, ...)
56 // - it forces usage of indexed addressing (which isn't necessarily "free")
57 // - it can increase register pressure when the uses are disparate enough.
59 // We use heuristics to discover the best global grouping we can (cf cl::opts).
61 // ===---------------------------------------------------------------------===//
63 #include "llvm/ADT/BitVector.h"
64 #include "llvm/ADT/DenseMap.h"
65 #include "llvm/ADT/SmallPtrSet.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/Statistic.h"
68 #include "llvm/ADT/StringRef.h"
69 #include "llvm/ADT/Triple.h"
70 #include "llvm/ADT/Twine.h"
71 #include "llvm/CodeGen/Passes.h"
72 #include "llvm/IR/BasicBlock.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DerivedTypes.h"
76 #include "llvm/IR/Function.h"
77 #include "llvm/IR/GlobalAlias.h"
78 #include "llvm/IR/GlobalValue.h"
79 #include "llvm/IR/GlobalVariable.h"
80 #include "llvm/IR/Instruction.h"
81 #include "llvm/IR/Module.h"
82 #include "llvm/IR/Type.h"
83 #include "llvm/IR/Use.h"
84 #include "llvm/IR/User.h"
85 #include "llvm/InitializePasses.h"
86 #include "llvm/MC/SectionKind.h"
87 #include "llvm/Pass.h"
88 #include "llvm/Support/Casting.h"
89 #include "llvm/Support/CommandLine.h"
90 #include "llvm/Support/Debug.h"
91 #include "llvm/Support/raw_ostream.h"
92 #include "llvm/Target/TargetLoweringObjectFile.h"
93 #include "llvm/Target/TargetMachine.h"
101 using namespace llvm
;
103 #define DEBUG_TYPE "global-merge"
105 // FIXME: This is only useful as a last-resort way to disable the pass.
107 EnableGlobalMerge("enable-global-merge", cl::Hidden
,
108 cl::desc("Enable the global merge pass"),
111 static cl::opt
<unsigned>
112 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden
,
113 cl::desc("Set maximum offset for global merge pass"),
116 static cl::opt
<bool> GlobalMergeGroupByUse(
117 "global-merge-group-by-use", cl::Hidden
,
118 cl::desc("Improve global merge pass to look at uses"), cl::init(true));
120 static cl::opt
<bool> GlobalMergeIgnoreSingleUse(
121 "global-merge-ignore-single-use", cl::Hidden
,
122 cl::desc("Improve global merge pass to ignore globals only used alone"),
126 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden
,
127 cl::desc("Enable global merge pass on constants"),
130 // FIXME: this could be a transitional option, and we probably need to remove
131 // it if only we are sure this optimization could always benefit all targets.
132 static cl::opt
<cl::boolOrDefault
>
133 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden
,
134 cl::desc("Enable global merge pass on external linkage"));
136 STATISTIC(NumMerged
, "Number of globals merged");
140 class GlobalMerge
: public FunctionPass
{
141 const TargetMachine
*TM
= nullptr;
143 // FIXME: Infer the maximum possible offset depending on the actual users
144 // (these max offsets are different for the users inside Thumb or ARM
145 // functions), see the code that passes in the offset in the ARM backend
146 // for more information.
149 /// Whether we should try to optimize for size only.
150 /// Currently, this applies a dead simple heuristic: only consider globals
151 /// used in minsize functions for merging.
152 /// FIXME: This could learn about optsize, and be used in the cost model.
153 bool OnlyOptimizeForSize
= false;
155 /// Whether we should merge global variables that have external linkage.
156 bool MergeExternalGlobals
= false;
160 bool doMerge(SmallVectorImpl
<GlobalVariable
*> &Globals
,
161 Module
&M
, bool isConst
, unsigned AddrSpace
) const;
163 /// Merge everything in \p Globals for which the corresponding bit
164 /// in \p GlobalSet is set.
165 bool doMerge(const SmallVectorImpl
<GlobalVariable
*> &Globals
,
166 const BitVector
&GlobalSet
, Module
&M
, bool isConst
,
167 unsigned AddrSpace
) const;
169 /// Check if the given variable has been identified as must keep
170 /// \pre setMustKeepGlobalVariables must have been called on the Module that
172 bool isMustKeepGlobalVariable(const GlobalVariable
*GV
) const {
173 return MustKeepGlobalVariables
.count(GV
);
176 /// Collect every variables marked as "used" or used in a landing pad
177 /// instruction for this Module.
178 void setMustKeepGlobalVariables(Module
&M
);
180 /// Collect every variables marked as "used"
181 void collectUsedGlobalVariables(Module
&M
, StringRef Name
);
183 /// Keep track of the GlobalVariable that must not be merged away
184 SmallPtrSet
<const GlobalVariable
*, 16> MustKeepGlobalVariables
;
187 static char ID
; // Pass identification, replacement for typeid.
189 explicit GlobalMerge()
190 : FunctionPass(ID
), MaxOffset(GlobalMergeMaxOffset
) {
191 initializeGlobalMergePass(*PassRegistry::getPassRegistry());
194 explicit GlobalMerge(const TargetMachine
*TM
, unsigned MaximalOffset
,
195 bool OnlyOptimizeForSize
, bool MergeExternalGlobals
)
196 : FunctionPass(ID
), TM(TM
), MaxOffset(MaximalOffset
),
197 OnlyOptimizeForSize(OnlyOptimizeForSize
),
198 MergeExternalGlobals(MergeExternalGlobals
) {
199 initializeGlobalMergePass(*PassRegistry::getPassRegistry());
202 bool doInitialization(Module
&M
) override
;
203 bool runOnFunction(Function
&F
) override
;
204 bool doFinalization(Module
&M
) override
;
206 StringRef
getPassName() const override
{ return "Merge internal globals"; }
208 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
209 AU
.setPreservesCFG();
210 FunctionPass::getAnalysisUsage(AU
);
214 } // end anonymous namespace
216 char GlobalMerge::ID
= 0;
218 INITIALIZE_PASS(GlobalMerge
, DEBUG_TYPE
, "Merge global variables", false, false)
220 bool GlobalMerge::doMerge(SmallVectorImpl
<GlobalVariable
*> &Globals
,
221 Module
&M
, bool isConst
, unsigned AddrSpace
) const {
222 auto &DL
= M
.getDataLayout();
223 // FIXME: Find better heuristics
225 Globals
, [&DL
](const GlobalVariable
*GV1
, const GlobalVariable
*GV2
) {
226 // We don't support scalable global variables.
227 return DL
.getTypeAllocSize(GV1
->getValueType()).getFixedSize() <
228 DL
.getTypeAllocSize(GV2
->getValueType()).getFixedSize();
231 // If we want to just blindly group all globals together, do so.
232 if (!GlobalMergeGroupByUse
) {
233 BitVector
AllGlobals(Globals
.size());
235 return doMerge(Globals
, AllGlobals
, M
, isConst
, AddrSpace
);
238 // If we want to be smarter, look at all uses of each global, to try to
239 // discover all sets of globals used together, and how many times each of
240 // these sets occurred.
242 // Keep this reasonably efficient, by having an append-only list of all sets
243 // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
244 // code (currently, a Function) to the set of globals seen so far that are
245 // used together in that unit (GlobalUsesByFunction).
247 // When we look at the Nth global, we know that any new set is either:
248 // - the singleton set {N}, containing this global only, or
249 // - the union of {N} and a previously-discovered set, containing some
250 // combination of the previous N-1 globals.
251 // Using that knowledge, when looking at the Nth global, we can keep:
252 // - a reference to the singleton set {N} (CurGVOnlySetIdx)
253 // - a list mapping each previous set to its union with {N} (EncounteredUGS),
254 // if it actually occurs.
256 // We keep track of the sets of globals used together "close enough".
257 struct UsedGlobalSet
{
259 unsigned UsageCount
= 1;
261 UsedGlobalSet(size_t Size
) : Globals(Size
) {}
264 // Each set is unique in UsedGlobalSets.
265 std::vector
<UsedGlobalSet
> UsedGlobalSets
;
267 // Avoid repeating the create-global-set pattern.
268 auto CreateGlobalSet
= [&]() -> UsedGlobalSet
& {
269 UsedGlobalSets
.emplace_back(Globals
.size());
270 return UsedGlobalSets
.back();
273 // The first set is the empty set.
274 CreateGlobalSet().UsageCount
= 0;
276 // We define "close enough" to be "in the same function".
277 // FIXME: Grouping uses by function is way too aggressive, so we should have
278 // a better metric for distance between uses.
279 // The obvious alternative would be to group by BasicBlock, but that's in
280 // turn too conservative..
281 // Anything in between wouldn't be trivial to compute, so just stick with
282 // per-function grouping.
284 // The value type is an index into UsedGlobalSets.
285 // The default (0) conveniently points to the empty set.
286 DenseMap
<Function
*, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction
;
288 // Now, look at each merge-eligible global in turn.
290 // Keep track of the sets we already encountered to which we added the
292 // Each element matches the same-index element in UsedGlobalSets.
293 // This lets us efficiently tell whether a set has already been expanded to
294 // include the current global.
295 std::vector
<size_t> EncounteredUGS
;
297 for (size_t GI
= 0, GE
= Globals
.size(); GI
!= GE
; ++GI
) {
298 GlobalVariable
*GV
= Globals
[GI
];
300 // Reset the encountered sets for this global...
301 std::fill(EncounteredUGS
.begin(), EncounteredUGS
.end(), 0);
302 // ...and grow it in case we created new sets for the previous global.
303 EncounteredUGS
.resize(UsedGlobalSets
.size());
305 // We might need to create a set that only consists of the current global.
306 // Keep track of its index into UsedGlobalSets.
307 size_t CurGVOnlySetIdx
= 0;
309 // For each global, look at all its Uses.
310 for (auto &U
: GV
->uses()) {
311 // This Use might be a ConstantExpr. We're interested in Instruction
312 // users, so look through ConstantExpr...
314 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(U
.getUser())) {
317 UI
= &*CE
->use_begin();
319 } else if (isa
<Instruction
>(U
.getUser())) {
326 // ...to iterate on all the instruction users of the global.
327 // Note that we iterate on Uses and not on Users to be able to getNext().
328 for (; UI
!= UE
; UI
= UI
->getNext()) {
329 Instruction
*I
= dyn_cast
<Instruction
>(UI
->getUser());
333 Function
*ParentFn
= I
->getParent()->getParent();
335 // If we're only optimizing for size, ignore non-minsize functions.
336 if (OnlyOptimizeForSize
&& !ParentFn
->hasMinSize())
339 size_t UGSIdx
= GlobalUsesByFunction
[ParentFn
];
341 // If this is the first global the basic block uses, map it to the set
342 // consisting of this global only.
344 // If that set doesn't exist yet, create it.
345 if (!CurGVOnlySetIdx
) {
346 CurGVOnlySetIdx
= UsedGlobalSets
.size();
347 CreateGlobalSet().Globals
.set(GI
);
349 ++UsedGlobalSets
[CurGVOnlySetIdx
].UsageCount
;
352 GlobalUsesByFunction
[ParentFn
] = CurGVOnlySetIdx
;
356 // If we already encountered this BB, just increment the counter.
357 if (UsedGlobalSets
[UGSIdx
].Globals
.test(GI
)) {
358 ++UsedGlobalSets
[UGSIdx
].UsageCount
;
362 // If not, the previous set wasn't actually used in this function.
363 --UsedGlobalSets
[UGSIdx
].UsageCount
;
365 // If we already expanded the previous set to include this global, just
366 // reuse that expanded set.
367 if (size_t ExpandedIdx
= EncounteredUGS
[UGSIdx
]) {
368 ++UsedGlobalSets
[ExpandedIdx
].UsageCount
;
369 GlobalUsesByFunction
[ParentFn
] = ExpandedIdx
;
373 // If not, create a new set consisting of the union of the previous set
374 // and this global. Mark it as encountered, so we can reuse it later.
375 GlobalUsesByFunction
[ParentFn
] = EncounteredUGS
[UGSIdx
] =
376 UsedGlobalSets
.size();
378 UsedGlobalSet
&NewUGS
= CreateGlobalSet();
379 NewUGS
.Globals
.set(GI
);
380 NewUGS
.Globals
|= UsedGlobalSets
[UGSIdx
].Globals
;
385 // Now we found a bunch of sets of globals used together. We accumulated
386 // the number of times we encountered the sets (i.e., the number of blocks
387 // that use that exact set of globals).
389 // Multiply that by the size of the set to give us a crude profitability
391 llvm::stable_sort(UsedGlobalSets
,
392 [](const UsedGlobalSet
&UGS1
, const UsedGlobalSet
&UGS2
) {
393 return UGS1
.Globals
.count() * UGS1
.UsageCount
<
394 UGS2
.Globals
.count() * UGS2
.UsageCount
;
397 // We can choose to merge all globals together, but ignore globals never used
398 // with another global. This catches the obviously non-profitable cases of
399 // having a single global, but is aggressive enough for any other case.
400 if (GlobalMergeIgnoreSingleUse
) {
401 BitVector
AllGlobals(Globals
.size());
402 for (size_t i
= 0, e
= UsedGlobalSets
.size(); i
!= e
; ++i
) {
403 const UsedGlobalSet
&UGS
= UsedGlobalSets
[e
- i
- 1];
404 if (UGS
.UsageCount
== 0)
406 if (UGS
.Globals
.count() > 1)
407 AllGlobals
|= UGS
.Globals
;
409 return doMerge(Globals
, AllGlobals
, M
, isConst
, AddrSpace
);
412 // Starting from the sets with the best (=biggest) profitability, find a
414 // The ideal (and expensive) solution can only be found by trying all
415 // combinations, looking for the one with the best profitability.
416 // Don't be smart about it, and just pick the first compatible combination,
417 // starting with the sets with the best profitability.
418 BitVector
PickedGlobals(Globals
.size());
419 bool Changed
= false;
421 for (size_t i
= 0, e
= UsedGlobalSets
.size(); i
!= e
; ++i
) {
422 const UsedGlobalSet
&UGS
= UsedGlobalSets
[e
- i
- 1];
423 if (UGS
.UsageCount
== 0)
425 if (PickedGlobals
.anyCommon(UGS
.Globals
))
427 PickedGlobals
|= UGS
.Globals
;
428 // If the set only contains one global, there's no point in merging.
429 // Ignore the global for inclusion in other sets though, so keep it in
431 if (UGS
.Globals
.count() < 2)
433 Changed
|= doMerge(Globals
, UGS
.Globals
, M
, isConst
, AddrSpace
);
439 bool GlobalMerge::doMerge(const SmallVectorImpl
<GlobalVariable
*> &Globals
,
440 const BitVector
&GlobalSet
, Module
&M
, bool isConst
,
441 unsigned AddrSpace
) const {
442 assert(Globals
.size() > 1);
444 Type
*Int32Ty
= Type::getInt32Ty(M
.getContext());
445 Type
*Int8Ty
= Type::getInt8Ty(M
.getContext());
446 auto &DL
= M
.getDataLayout();
448 LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
449 << GlobalSet
.find_first() << "\n");
451 bool Changed
= false;
452 ssize_t i
= GlobalSet
.find_first();
455 uint64_t MergedSize
= 0;
456 std::vector
<Type
*> Tys
;
457 std::vector
<Constant
*> Inits
;
458 std::vector
<unsigned> StructIdxs
;
460 bool HasExternal
= false;
461 StringRef FirstExternalName
;
464 for (j
= i
; j
!= -1; j
= GlobalSet
.find_next(j
)) {
465 Type
*Ty
= Globals
[j
]->getValueType();
467 // Make sure we use the same alignment AsmPrinter would use.
468 Align Alignment
= DL
.getPreferredAlign(Globals
[j
]);
469 unsigned Padding
= alignTo(MergedSize
, Alignment
) - MergedSize
;
470 MergedSize
+= Padding
;
471 MergedSize
+= DL
.getTypeAllocSize(Ty
);
472 if (MergedSize
> MaxOffset
) {
476 Tys
.push_back(ArrayType::get(Int8Ty
, Padding
));
477 Inits
.push_back(ConstantAggregateZero::get(Tys
.back()));
481 Inits
.push_back(Globals
[j
]->getInitializer());
482 StructIdxs
.push_back(CurIdx
++);
484 MaxAlign
= std::max(MaxAlign
, Alignment
);
486 if (Globals
[j
]->hasExternalLinkage() && !HasExternal
) {
488 FirstExternalName
= Globals
[j
]->getName();
492 // Exit early if there is only one global to merge.
493 if (Tys
.size() < 2) {
498 // If merged variables doesn't have external linkage, we needn't to expose
499 // the symbol after merging.
500 GlobalValue::LinkageTypes Linkage
= HasExternal
501 ? GlobalValue::ExternalLinkage
502 : GlobalValue::InternalLinkage
;
503 // Use a packed struct so we can control alignment.
504 StructType
*MergedTy
= StructType::get(M
.getContext(), Tys
, true);
505 Constant
*MergedInit
= ConstantStruct::get(MergedTy
, Inits
);
507 // On Darwin external linkage needs to be preserved, otherwise
508 // dsymutil cannot preserve the debug info for the merged
509 // variables. If they have external linkage, use the symbol name
510 // of the first variable merged as the suffix of global symbol
511 // name. This avoids a link-time naming conflict for the
512 // _MergedGlobals symbols.
514 (IsMachO
&& HasExternal
)
515 ? "_MergedGlobals_" + FirstExternalName
517 auto MergedLinkage
= IsMachO
? Linkage
: GlobalValue::PrivateLinkage
;
518 auto *MergedGV
= new GlobalVariable(
519 M
, MergedTy
, isConst
, MergedLinkage
, MergedInit
, MergedName
, nullptr,
520 GlobalVariable::NotThreadLocal
, AddrSpace
);
522 MergedGV
->setAlignment(MaxAlign
);
523 MergedGV
->setSection(Globals
[i
]->getSection());
525 const StructLayout
*MergedLayout
= DL
.getStructLayout(MergedTy
);
526 for (ssize_t k
= i
, idx
= 0; k
!= j
; k
= GlobalSet
.find_next(k
), ++idx
) {
527 GlobalValue::LinkageTypes Linkage
= Globals
[k
]->getLinkage();
528 std::string
Name(Globals
[k
]->getName());
529 GlobalValue::VisibilityTypes Visibility
= Globals
[k
]->getVisibility();
530 GlobalValue::DLLStorageClassTypes DLLStorage
=
531 Globals
[k
]->getDLLStorageClass();
533 // Copy metadata while adjusting any debug info metadata by the original
534 // global's offset within the merged global.
535 MergedGV
->copyMetadata(Globals
[k
],
536 MergedLayout
->getElementOffset(StructIdxs
[idx
]));
539 ConstantInt::get(Int32Ty
, 0),
540 ConstantInt::get(Int32Ty
, StructIdxs
[idx
]),
543 ConstantExpr::getInBoundsGetElementPtr(MergedTy
, MergedGV
, Idx
);
544 Globals
[k
]->replaceAllUsesWith(GEP
);
545 Globals
[k
]->eraseFromParent();
547 // When the linkage is not internal we must emit an alias for the original
548 // variable name as it may be accessed from another object. On non-Mach-O
549 // we can also emit an alias for internal linkage as it's safe to do so.
550 // It's not safe on Mach-O as the alias (and thus the portion of the
551 // MergedGlobals variable) may be dead stripped at link time.
552 if (Linkage
!= GlobalValue::InternalLinkage
|| !IsMachO
) {
553 GlobalAlias
*GA
= GlobalAlias::create(Tys
[StructIdxs
[idx
]], AddrSpace
,
554 Linkage
, Name
, GEP
, &M
);
555 GA
->setVisibility(Visibility
);
556 GA
->setDLLStorageClass(DLLStorage
);
568 void GlobalMerge::collectUsedGlobalVariables(Module
&M
, StringRef Name
) {
569 // Extract global variables from llvm.used array
570 const GlobalVariable
*GV
= M
.getGlobalVariable(Name
);
571 if (!GV
|| !GV
->hasInitializer()) return;
573 // Should be an array of 'i8*'.
574 const ConstantArray
*InitList
= cast
<ConstantArray
>(GV
->getInitializer());
576 for (unsigned i
= 0, e
= InitList
->getNumOperands(); i
!= e
; ++i
)
577 if (const GlobalVariable
*G
=
578 dyn_cast
<GlobalVariable
>(InitList
->getOperand(i
)->stripPointerCasts()))
579 MustKeepGlobalVariables
.insert(G
);
582 void GlobalMerge::setMustKeepGlobalVariables(Module
&M
) {
583 collectUsedGlobalVariables(M
, "llvm.used");
584 collectUsedGlobalVariables(M
, "llvm.compiler.used");
586 for (Function
&F
: M
) {
587 for (BasicBlock
&BB
: F
) {
588 Instruction
*Pad
= BB
.getFirstNonPHI();
592 // Keep globals used by landingpads and catchpads.
593 for (const Use
&U
: Pad
->operands()) {
594 if (const GlobalVariable
*GV
=
595 dyn_cast
<GlobalVariable
>(U
->stripPointerCasts()))
596 MustKeepGlobalVariables
.insert(GV
);
602 bool GlobalMerge::doInitialization(Module
&M
) {
603 if (!EnableGlobalMerge
)
606 IsMachO
= Triple(M
.getTargetTriple()).isOSBinFormatMachO();
608 auto &DL
= M
.getDataLayout();
609 DenseMap
<std::pair
<unsigned, StringRef
>, SmallVector
<GlobalVariable
*, 16>>
610 Globals
, ConstGlobals
, BSSGlobals
;
611 bool Changed
= false;
612 setMustKeepGlobalVariables(M
);
614 // Grab all non-const globals.
615 for (auto &GV
: M
.globals()) {
616 // Merge is safe for "normal" internal or external globals only
617 if (GV
.isDeclaration() || GV
.isThreadLocal() || GV
.hasImplicitSection())
620 // It's not safe to merge globals that may be preempted
621 if (TM
&& !TM
->shouldAssumeDSOLocal(M
, &GV
))
624 if (!(MergeExternalGlobals
&& GV
.hasExternalLinkage()) &&
625 !GV
.hasInternalLinkage())
628 PointerType
*PT
= dyn_cast
<PointerType
>(GV
.getType());
629 assert(PT
&& "Global variable is not a pointer!");
631 unsigned AddressSpace
= PT
->getAddressSpace();
632 StringRef Section
= GV
.getSection();
634 // Ignore all 'special' globals.
635 if (GV
.getName().startswith("llvm.") ||
636 GV
.getName().startswith(".llvm."))
639 // Ignore all "required" globals:
640 if (isMustKeepGlobalVariable(&GV
))
643 Type
*Ty
= GV
.getValueType();
644 if (DL
.getTypeAllocSize(Ty
) < MaxOffset
) {
646 TargetLoweringObjectFile::getKindForGlobal(&GV
, *TM
).isBSS())
647 BSSGlobals
[{AddressSpace
, Section
}].push_back(&GV
);
648 else if (GV
.isConstant())
649 ConstGlobals
[{AddressSpace
, Section
}].push_back(&GV
);
651 Globals
[{AddressSpace
, Section
}].push_back(&GV
);
655 for (auto &P
: Globals
)
656 if (P
.second
.size() > 1)
657 Changed
|= doMerge(P
.second
, M
, false, P
.first
.first
);
659 for (auto &P
: BSSGlobals
)
660 if (P
.second
.size() > 1)
661 Changed
|= doMerge(P
.second
, M
, false, P
.first
.first
);
663 if (EnableGlobalMergeOnConst
)
664 for (auto &P
: ConstGlobals
)
665 if (P
.second
.size() > 1)
666 Changed
|= doMerge(P
.second
, M
, true, P
.first
.first
);
671 bool GlobalMerge::runOnFunction(Function
&F
) {
675 bool GlobalMerge::doFinalization(Module
&M
) {
676 MustKeepGlobalVariables
.clear();
680 Pass
*llvm::createGlobalMergePass(const TargetMachine
*TM
, unsigned Offset
,
681 bool OnlyOptimizeForSize
,
682 bool MergeExternalByDefault
) {
683 bool MergeExternal
= (EnableGlobalMergeOnExternal
== cl::BOU_UNSET
) ?
684 MergeExternalByDefault
: (EnableGlobalMergeOnExternal
== cl::BOU_TRUE
);
685 return new GlobalMerge(TM
, Offset
, OnlyOptimizeForSize
, MergeExternal
);