1 //===- ReducerWorkItem.cpp - Wrapper for Module and MachineFunction -------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 #include "ReducerWorkItem.h"
10 #include "TestRunner.h"
11 #include "llvm/Analysis/ModuleSummaryAnalysis.h"
12 #include "llvm/Analysis/ProfileSummaryInfo.h"
13 #include "llvm/Bitcode/BitcodeReader.h"
14 #include "llvm/Bitcode/BitcodeWriter.h"
15 #include "llvm/CodeGen/CommandFlags.h"
16 #include "llvm/CodeGen/MIRParser/MIRParser.h"
17 #include "llvm/CodeGen/MIRPrinter.h"
18 #include "llvm/CodeGen/MachineDominators.h"
19 #include "llvm/CodeGen/MachineFrameInfo.h"
20 #include "llvm/CodeGen/MachineFunction.h"
21 #include "llvm/CodeGen/MachineFunctionPass.h"
22 #include "llvm/CodeGen/MachineJumpTableInfo.h"
23 #include "llvm/CodeGen/MachineModuleInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/PseudoSourceValueManager.h"
26 #include "llvm/CodeGen/TargetInstrInfo.h"
27 #include "llvm/IR/Constants.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/ModuleSummaryIndex.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/IR/Verifier.h"
32 #include "llvm/IRReader/IRReader.h"
33 #include "llvm/MC/TargetRegistry.h"
34 #include "llvm/Passes/PassBuilder.h"
35 #include "llvm/Support/MemoryBufferRef.h"
36 #include "llvm/Support/SourceMgr.h"
37 #include "llvm/Support/TargetSelect.h"
38 #include "llvm/Support/ToolOutputFile.h"
39 #include "llvm/Support/WithColor.h"
40 #include "llvm/Target/TargetMachine.h"
41 #include "llvm/TargetParser/Host.h"
42 #include "llvm/Transforms/IPO/ThinLTOBitcodeWriter.h"
43 #include "llvm/Transforms/Utils/Cloning.h"
48 ReducerWorkItem::ReducerWorkItem() = default;
49 ReducerWorkItem::~ReducerWorkItem() = default;
51 extern cl::OptionCategory LLVMReduceOptions
;
52 static cl::opt
<std::string
> TargetTriple("mtriple",
53 cl::desc("Set the target triple"),
54 cl::cat(LLVMReduceOptions
));
55 static cl::opt
<bool> PrintInvalidMachineReductions(
56 "print-invalid-reduction-machine-verifier-errors",
58 "Print machine verifier errors on invalid reduction attempts triple"),
59 cl::cat(LLVMReduceOptions
));
61 static cl::opt
<bool> TmpFilesAsBitcode(
62 "write-tmp-files-as-bitcode",
63 cl::desc("Always write temporary files as bitcode instead of textual IR"),
64 cl::init(false), cl::cat(LLVMReduceOptions
));
66 static void cloneFrameInfo(
67 MachineFrameInfo
&DstMFI
, const MachineFrameInfo
&SrcMFI
,
68 const DenseMap
<MachineBasicBlock
*, MachineBasicBlock
*> &Src2DstMBB
) {
69 DstMFI
.setFrameAddressIsTaken(SrcMFI
.isFrameAddressTaken());
70 DstMFI
.setReturnAddressIsTaken(SrcMFI
.isReturnAddressTaken());
71 DstMFI
.setHasStackMap(SrcMFI
.hasStackMap());
72 DstMFI
.setHasPatchPoint(SrcMFI
.hasPatchPoint());
73 DstMFI
.setUseLocalStackAllocationBlock(
74 SrcMFI
.getUseLocalStackAllocationBlock());
75 DstMFI
.setOffsetAdjustment(SrcMFI
.getOffsetAdjustment());
77 DstMFI
.ensureMaxAlignment(SrcMFI
.getMaxAlign());
78 assert(DstMFI
.getMaxAlign() == SrcMFI
.getMaxAlign() &&
79 "we need to set exact alignment");
81 DstMFI
.setAdjustsStack(SrcMFI
.adjustsStack());
82 DstMFI
.setHasCalls(SrcMFI
.hasCalls());
83 DstMFI
.setHasOpaqueSPAdjustment(SrcMFI
.hasOpaqueSPAdjustment());
84 DstMFI
.setHasCopyImplyingStackAdjustment(
85 SrcMFI
.hasCopyImplyingStackAdjustment());
86 DstMFI
.setHasVAStart(SrcMFI
.hasVAStart());
87 DstMFI
.setHasMustTailInVarArgFunc(SrcMFI
.hasMustTailInVarArgFunc());
88 DstMFI
.setHasTailCall(SrcMFI
.hasTailCall());
90 if (SrcMFI
.isMaxCallFrameSizeComputed())
91 DstMFI
.setMaxCallFrameSize(SrcMFI
.getMaxCallFrameSize());
93 DstMFI
.setCVBytesOfCalleeSavedRegisters(
94 SrcMFI
.getCVBytesOfCalleeSavedRegisters());
96 if (MachineBasicBlock
*SavePt
= SrcMFI
.getSavePoint())
97 DstMFI
.setSavePoint(Src2DstMBB
.find(SavePt
)->second
);
98 if (MachineBasicBlock
*RestorePt
= SrcMFI
.getRestorePoint())
99 DstMFI
.setRestorePoint(Src2DstMBB
.find(RestorePt
)->second
);
102 auto CopyObjectProperties
= [](MachineFrameInfo
&DstMFI
,
103 const MachineFrameInfo
&SrcMFI
, int FI
) {
104 if (SrcMFI
.isStatepointSpillSlotObjectIndex(FI
))
105 DstMFI
.markAsStatepointSpillSlotObjectIndex(FI
);
106 DstMFI
.setObjectSSPLayout(FI
, SrcMFI
.getObjectSSPLayout(FI
));
107 DstMFI
.setObjectZExt(FI
, SrcMFI
.isObjectZExt(FI
));
108 DstMFI
.setObjectSExt(FI
, SrcMFI
.isObjectSExt(FI
));
111 for (int i
= 0, e
= SrcMFI
.getNumObjects() - SrcMFI
.getNumFixedObjects();
115 assert(!SrcMFI
.isFixedObjectIndex(i
));
116 if (SrcMFI
.isVariableSizedObjectIndex(i
)) {
117 NewFI
= DstMFI
.CreateVariableSizedObject(SrcMFI
.getObjectAlign(i
),
118 SrcMFI
.getObjectAllocation(i
));
120 NewFI
= DstMFI
.CreateStackObject(
121 SrcMFI
.getObjectSize(i
), SrcMFI
.getObjectAlign(i
),
122 SrcMFI
.isSpillSlotObjectIndex(i
), SrcMFI
.getObjectAllocation(i
),
123 SrcMFI
.getStackID(i
));
124 DstMFI
.setObjectOffset(NewFI
, SrcMFI
.getObjectOffset(i
));
127 CopyObjectProperties(DstMFI
, SrcMFI
, i
);
130 assert(i
== NewFI
&& "expected to keep stable frame index numbering");
133 // Copy the fixed frame objects backwards to preserve frame index numbers,
134 // since CreateFixedObject uses front insertion.
135 for (int i
= -1; i
>= (int)-SrcMFI
.getNumFixedObjects(); --i
) {
136 assert(SrcMFI
.isFixedObjectIndex(i
));
137 int NewFI
= DstMFI
.CreateFixedObject(
138 SrcMFI
.getObjectSize(i
), SrcMFI
.getObjectOffset(i
),
139 SrcMFI
.isImmutableObjectIndex(i
), SrcMFI
.isAliasedObjectIndex(i
));
140 CopyObjectProperties(DstMFI
, SrcMFI
, i
);
143 assert(i
== NewFI
&& "expected to keep stable frame index numbering");
146 for (unsigned I
= 0, E
= SrcMFI
.getLocalFrameObjectCount(); I
< E
; ++I
) {
147 auto LocalObject
= SrcMFI
.getLocalFrameObjectMap(I
);
148 DstMFI
.mapLocalFrameObject(LocalObject
.first
, LocalObject
.second
);
151 DstMFI
.setCalleeSavedInfo(SrcMFI
.getCalleeSavedInfo());
153 if (SrcMFI
.hasStackProtectorIndex()) {
154 DstMFI
.setStackProtectorIndex(SrcMFI
.getStackProtectorIndex());
157 // FIXME: Needs test, missing MIR serialization.
158 if (SrcMFI
.hasFunctionContextIndex()) {
159 DstMFI
.setFunctionContextIndex(SrcMFI
.getFunctionContextIndex());
163 static void cloneJumpTableInfo(
164 MachineFunction
&DstMF
, const MachineJumpTableInfo
&SrcJTI
,
165 const DenseMap
<MachineBasicBlock
*, MachineBasicBlock
*> &Src2DstMBB
) {
167 auto *DstJTI
= DstMF
.getOrCreateJumpTableInfo(SrcJTI
.getEntryKind());
169 std::vector
<MachineBasicBlock
*> DstBBs
;
171 for (const MachineJumpTableEntry
&Entry
: SrcJTI
.getJumpTables()) {
172 for (MachineBasicBlock
*X
: Entry
.MBBs
)
173 DstBBs
.push_back(Src2DstMBB
.find(X
)->second
);
175 DstJTI
->createJumpTableIndex(DstBBs
);
180 static void cloneMemOperands(MachineInstr
&DstMI
, MachineInstr
&SrcMI
,
181 MachineFunction
&SrcMF
, MachineFunction
&DstMF
) {
182 // The new MachineMemOperands should be owned by the new function's
184 PseudoSourceValueManager
&PSVMgr
= DstMF
.getPSVManager();
186 // We also need to remap the PseudoSourceValues from the new function's
187 // PseudoSourceValueManager.
188 SmallVector
<MachineMemOperand
*, 2> NewMMOs
;
189 for (MachineMemOperand
*OldMMO
: SrcMI
.memoperands()) {
190 MachinePointerInfo
NewPtrInfo(OldMMO
->getPointerInfo());
191 if (const PseudoSourceValue
*PSV
=
192 dyn_cast_if_present
<const PseudoSourceValue
*>(NewPtrInfo
.V
)) {
193 switch (PSV
->kind()) {
194 case PseudoSourceValue::Stack
:
195 NewPtrInfo
.V
= PSVMgr
.getStack();
197 case PseudoSourceValue::GOT
:
198 NewPtrInfo
.V
= PSVMgr
.getGOT();
200 case PseudoSourceValue::JumpTable
:
201 NewPtrInfo
.V
= PSVMgr
.getJumpTable();
203 case PseudoSourceValue::ConstantPool
:
204 NewPtrInfo
.V
= PSVMgr
.getConstantPool();
206 case PseudoSourceValue::FixedStack
:
207 NewPtrInfo
.V
= PSVMgr
.getFixedStack(
208 cast
<FixedStackPseudoSourceValue
>(PSV
)->getFrameIndex());
210 case PseudoSourceValue::GlobalValueCallEntry
:
211 NewPtrInfo
.V
= PSVMgr
.getGlobalValueCallEntry(
212 cast
<GlobalValuePseudoSourceValue
>(PSV
)->getValue());
214 case PseudoSourceValue::ExternalSymbolCallEntry
:
215 NewPtrInfo
.V
= PSVMgr
.getExternalSymbolCallEntry(
216 cast
<ExternalSymbolPseudoSourceValue
>(PSV
)->getSymbol());
218 case PseudoSourceValue::TargetCustom
:
220 // FIXME: We have no generic interface for allocating custom PSVs.
221 report_fatal_error("Cloning TargetCustom PSV not handled");
225 MachineMemOperand
*NewMMO
= DstMF
.getMachineMemOperand(
226 NewPtrInfo
, OldMMO
->getFlags(), OldMMO
->getMemoryType(),
227 OldMMO
->getBaseAlign(), OldMMO
->getAAInfo(), OldMMO
->getRanges(),
228 OldMMO
->getSyncScopeID(), OldMMO
->getSuccessOrdering(),
229 OldMMO
->getFailureOrdering());
230 NewMMOs
.push_back(NewMMO
);
233 DstMI
.setMemRefs(DstMF
, NewMMOs
);
236 static std::unique_ptr
<MachineFunction
> cloneMF(MachineFunction
*SrcMF
,
237 MachineModuleInfo
&DestMMI
) {
238 auto DstMF
= std::make_unique
<MachineFunction
>(
239 SrcMF
->getFunction(), SrcMF
->getTarget(), SrcMF
->getSubtarget(),
240 SrcMF
->getContext(), SrcMF
->getFunctionNumber());
241 DenseMap
<MachineBasicBlock
*, MachineBasicBlock
*> Src2DstMBB
;
243 auto *SrcMRI
= &SrcMF
->getRegInfo();
244 auto *DstMRI
= &DstMF
->getRegInfo();
247 for (MachineBasicBlock
&SrcMBB
: *SrcMF
) {
248 MachineBasicBlock
*DstMBB
=
249 DstMF
->CreateMachineBasicBlock(SrcMBB
.getBasicBlock());
250 Src2DstMBB
[&SrcMBB
] = DstMBB
;
252 DstMBB
->setCallFrameSize(SrcMBB
.getCallFrameSize());
254 if (SrcMBB
.isIRBlockAddressTaken())
255 DstMBB
->setAddressTakenIRBlock(SrcMBB
.getAddressTakenIRBlock());
256 if (SrcMBB
.isMachineBlockAddressTaken())
257 DstMBB
->setMachineBlockAddressTaken();
259 // FIXME: This is not serialized
260 if (SrcMBB
.hasLabelMustBeEmitted())
261 DstMBB
->setLabelMustBeEmitted();
263 DstMBB
->setAlignment(SrcMBB
.getAlignment());
265 // FIXME: This is not serialized
266 DstMBB
->setMaxBytesForAlignment(SrcMBB
.getMaxBytesForAlignment());
268 DstMBB
->setIsEHPad(SrcMBB
.isEHPad());
269 DstMBB
->setIsEHScopeEntry(SrcMBB
.isEHScopeEntry());
270 DstMBB
->setIsEHCatchretTarget(SrcMBB
.isEHCatchretTarget());
271 DstMBB
->setIsEHFuncletEntry(SrcMBB
.isEHFuncletEntry());
273 // FIXME: These are not serialized
274 DstMBB
->setIsCleanupFuncletEntry(SrcMBB
.isCleanupFuncletEntry());
275 DstMBB
->setIsBeginSection(SrcMBB
.isBeginSection());
276 DstMBB
->setIsEndSection(SrcMBB
.isEndSection());
278 DstMBB
->setSectionID(SrcMBB
.getSectionID());
279 DstMBB
->setIsInlineAsmBrIndirectTarget(
280 SrcMBB
.isInlineAsmBrIndirectTarget());
282 // FIXME: This is not serialized
283 if (std::optional
<uint64_t> Weight
= SrcMBB
.getIrrLoopHeaderWeight())
284 DstMBB
->setIrrLoopHeaderWeight(*Weight
);
287 const MachineFrameInfo
&SrcMFI
= SrcMF
->getFrameInfo();
288 MachineFrameInfo
&DstMFI
= DstMF
->getFrameInfo();
290 // Copy stack objects and other info
291 cloneFrameInfo(DstMFI
, SrcMFI
, Src2DstMBB
);
293 if (MachineJumpTableInfo
*SrcJTI
= SrcMF
->getJumpTableInfo()) {
294 cloneJumpTableInfo(*DstMF
, *SrcJTI
, Src2DstMBB
);
297 // Remap the debug info frame index references.
298 DstMF
->VariableDbgInfos
= SrcMF
->VariableDbgInfos
;
300 // Clone virtual registers
301 for (unsigned I
= 0, E
= SrcMRI
->getNumVirtRegs(); I
!= E
; ++I
) {
302 Register Reg
= Register::index2VirtReg(I
);
303 Register NewReg
= DstMRI
->createIncompleteVirtualRegister(
304 SrcMRI
->getVRegName(Reg
));
305 assert(NewReg
== Reg
&& "expected to preserve virtreg number");
307 DstMRI
->setRegClassOrRegBank(NewReg
, SrcMRI
->getRegClassOrRegBank(Reg
));
309 LLT RegTy
= SrcMRI
->getType(Reg
);
311 DstMRI
->setType(NewReg
, RegTy
);
313 // Copy register allocation hints.
314 const auto *Hints
= SrcMRI
->getRegAllocationHints(Reg
);
316 for (Register PrefReg
: Hints
->second
)
317 DstMRI
->addRegAllocationHint(NewReg
, PrefReg
);
320 const TargetSubtargetInfo
&STI
= DstMF
->getSubtarget();
321 const TargetInstrInfo
*TII
= STI
.getInstrInfo();
322 const TargetRegisterInfo
*TRI
= STI
.getRegisterInfo();
325 for (auto &SrcMBB
: *SrcMF
) {
326 auto *DstMBB
= Src2DstMBB
[&SrcMBB
];
327 DstMF
->push_back(DstMBB
);
329 for (auto It
= SrcMBB
.succ_begin(), IterEnd
= SrcMBB
.succ_end();
330 It
!= IterEnd
; ++It
) {
331 auto *SrcSuccMBB
= *It
;
332 auto *DstSuccMBB
= Src2DstMBB
[SrcSuccMBB
];
333 DstMBB
->addSuccessor(DstSuccMBB
, SrcMBB
.getSuccProbability(It
));
336 for (auto &LI
: SrcMBB
.liveins_dbg())
337 DstMBB
->addLiveIn(LI
);
339 // Make sure MRI knows about registers clobbered by unwinder.
340 if (DstMBB
->isEHPad()) {
341 if (auto *RegMask
= TRI
->getCustomEHPadPreservedMask(*DstMF
))
342 DstMRI
->addPhysRegsUsedFromRegMask(RegMask
);
346 DenseSet
<const uint32_t *> ConstRegisterMasks
;
348 // Track predefined/named regmasks which we ignore.
349 for (const uint32_t *Mask
: TRI
->getRegMasks())
350 ConstRegisterMasks
.insert(Mask
);
352 // Clone instructions.
353 for (auto &SrcMBB
: *SrcMF
) {
354 auto *DstMBB
= Src2DstMBB
[&SrcMBB
];
355 for (auto &SrcMI
: SrcMBB
) {
356 const auto &MCID
= TII
->get(SrcMI
.getOpcode());
357 auto *DstMI
= DstMF
->CreateMachineInstr(MCID
, SrcMI
.getDebugLoc(),
358 /*NoImplicit=*/true);
359 DstMI
->setFlags(SrcMI
.getFlags());
360 DstMI
->setAsmPrinterFlag(SrcMI
.getAsmPrinterFlags());
362 DstMBB
->push_back(DstMI
);
363 for (auto &SrcMO
: SrcMI
.operands()) {
364 MachineOperand
DstMO(SrcMO
);
369 DstMO
.setMBB(Src2DstMBB
[DstMO
.getMBB()]);
370 else if (DstMO
.isRegMask()) {
371 DstMRI
->addPhysRegsUsedFromRegMask(DstMO
.getRegMask());
373 if (!ConstRegisterMasks
.count(DstMO
.getRegMask())) {
374 uint32_t *DstMask
= DstMF
->allocateRegMask();
375 std::memcpy(DstMask
, SrcMO
.getRegMask(),
377 MachineOperand::getRegMaskSize(TRI
->getNumRegs()));
378 DstMO
.setRegMask(DstMask
);
382 DstMI
->addOperand(DstMO
);
385 cloneMemOperands(*DstMI
, SrcMI
, *SrcMF
, *DstMF
);
389 DstMF
->setAlignment(SrcMF
->getAlignment());
390 DstMF
->setExposesReturnsTwice(SrcMF
->exposesReturnsTwice());
391 DstMF
->setHasInlineAsm(SrcMF
->hasInlineAsm());
392 DstMF
->setHasWinCFI(SrcMF
->hasWinCFI());
394 DstMF
->getProperties().reset().set(SrcMF
->getProperties());
396 if (!SrcMF
->getFrameInstructions().empty() ||
397 !SrcMF
->getLongjmpTargets().empty() ||
398 !SrcMF
->getCatchretTargets().empty())
399 report_fatal_error("cloning not implemented for machine function property");
401 DstMF
->setCallsEHReturn(SrcMF
->callsEHReturn());
402 DstMF
->setCallsUnwindInit(SrcMF
->callsUnwindInit());
403 DstMF
->setHasEHCatchret(SrcMF
->hasEHCatchret());
404 DstMF
->setHasEHScopes(SrcMF
->hasEHScopes());
405 DstMF
->setHasEHFunclets(SrcMF
->hasEHFunclets());
406 DstMF
->setHasFakeUses(SrcMF
->hasFakeUses());
407 DstMF
->setIsOutlined(SrcMF
->isOutlined());
409 if (!SrcMF
->getLandingPads().empty() ||
410 !SrcMF
->getCodeViewAnnotations().empty() ||
411 !SrcMF
->getTypeInfos().empty() ||
412 !SrcMF
->getFilterIds().empty() ||
413 SrcMF
->hasAnyWasmLandingPadIndex() ||
414 SrcMF
->hasAnyCallSiteLandingPad() ||
415 SrcMF
->hasAnyCallSiteLabel() ||
416 !SrcMF
->getCallSitesInfo().empty())
417 report_fatal_error("cloning not implemented for machine function property");
419 DstMF
->setDebugInstrNumberingCount(SrcMF
->DebugInstrNumberingCount
);
421 if (!DstMF
->cloneInfoFrom(*SrcMF
, Src2DstMBB
))
422 report_fatal_error("target does not implement MachineFunctionInfo cloning");
424 DstMRI
->freezeReservedRegs();
426 DstMF
->verify(nullptr, "", &errs(), /*AbortOnError=*/true);
430 static void initializeTargetInfo() {
431 InitializeAllTargets();
432 InitializeAllTargetMCs();
433 InitializeAllAsmPrinters();
434 InitializeAllAsmParsers();
437 void ReducerWorkItem::print(raw_ostream
&ROS
, void *p
) const {
440 for (Function
&F
: *M
) {
441 if (auto *MF
= MMI
->getMachineFunction(F
))
442 printMIR(ROS
, *MMI
, *MF
);
445 M
->print(ROS
, /*AssemblyAnnotationWriter=*/nullptr,
446 /*ShouldPreserveUseListOrder=*/true);
450 bool ReducerWorkItem::verify(raw_fd_ostream
*OS
) const {
451 if (verifyModule(*M
, OS
))
457 for (const Function
&F
: getModule()) {
458 if (const MachineFunction
*MF
= MMI
->getMachineFunction(F
)) {
459 // With the current state of quality, most reduction attempts fail the
460 // machine verifier. Avoid spamming large function dumps on nearly every
461 // attempt until the situation is better.
462 if (!MF
->verify(nullptr, "",
463 /*OS=*/PrintInvalidMachineReductions
? &errs() : nullptr,
464 /*AbortOnError=*/false)) {
466 if (!PrintInvalidMachineReductions
) {
467 WithColor::warning(errs())
468 << "reduction attempt on function '" << MF
->getName()
469 << "' failed machine verifier (debug with "
470 "-print-invalid-reduction-machine-verifier-errors)\n";
480 bool ReducerWorkItem::isReduced(const TestRunner
&Test
) const {
481 const bool UseBitcode
= Test
.inputIsBitcode() || TmpFilesAsBitcode
;
483 SmallString
<128> CurrentFilepath
;
485 // Write ReducerWorkItem to tmp file
487 std::error_code EC
= sys::fs::createTemporaryFile(
488 "llvm-reduce", isMIR() ? "mir" : (UseBitcode
? "bc" : "ll"), FD
,
490 UseBitcode
&& !isMIR() ? sys::fs::OF_None
: sys::fs::OF_Text
);
492 WithColor::error(errs(), Test
.getToolName())
493 << "error making unique filename: " << EC
.message() << '\n';
497 ToolOutputFile
Out(CurrentFilepath
, FD
);
499 writeOutput(Out
.os(), UseBitcode
);
502 if (Out
.os().has_error()) {
503 WithColor::error(errs(), Test
.getToolName())
504 << "error emitting bitcode to file '" << CurrentFilepath
505 << "': " << Out
.os().error().message() << '\n';
509 // Current Chunks aren't interesting
510 return Test
.run(CurrentFilepath
);
513 std::unique_ptr
<ReducerWorkItem
>
514 ReducerWorkItem::clone(const TargetMachine
*TM
) const {
515 auto CloneMMM
= std::make_unique
<ReducerWorkItem
>();
517 // We're assuming the Module IR contents are always unchanged by MIR
518 // reductions, and can share it as a constant.
521 // MachineModuleInfo contains a lot of other state used during codegen which
522 // we won't be using here, but we should be able to ignore it (although this
524 CloneMMM
->MMI
= std::make_unique
<MachineModuleInfo
>(TM
);
526 for (const Function
&F
: getModule()) {
527 if (auto *MF
= MMI
->getMachineFunction(F
))
528 CloneMMM
->MMI
->insertFunction(F
, cloneMF(MF
, *CloneMMM
->MMI
));
531 CloneMMM
->M
= CloneModule(*M
);
536 /// Try to produce some number that indicates a function is getting smaller /
538 static uint64_t computeMIRComplexityScoreImpl(const MachineFunction
&MF
) {
540 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
542 // Add for stack objects
543 Score
+= MFI
.getNumObjects();
545 // Add in the block count.
546 Score
+= 2 * MF
.size();
548 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
549 for (unsigned I
= 0, E
= MRI
.getNumVirtRegs(); I
!= E
; ++I
) {
550 Register Reg
= Register::index2VirtReg(I
);
551 if (const auto *Hints
= MRI
.getRegAllocationHints(Reg
))
552 Score
+= Hints
->second
.size();
555 for (const MachineBasicBlock
&MBB
: MF
) {
556 for (const MachineInstr
&MI
: MBB
) {
557 const unsigned Opc
= MI
.getOpcode();
559 // Reductions may want or need to introduce implicit_defs, so don't count
561 // TODO: These probably should count in some way.
562 if (Opc
== TargetOpcode::IMPLICIT_DEF
||
563 Opc
== TargetOpcode::G_IMPLICIT_DEF
)
566 // Each instruction adds to the score
569 if (Opc
== TargetOpcode::PHI
|| Opc
== TargetOpcode::G_PHI
||
570 Opc
== TargetOpcode::INLINEASM
|| Opc
== TargetOpcode::INLINEASM_BR
)
573 if (MI
.getFlags() != 0)
576 // Increase weight for more operands.
577 for (const MachineOperand
&MO
: MI
.operands()) {
580 // Treat registers as more complex.
584 // And subregisters as even more complex.
585 if (MO
.getSubReg()) {
590 } else if (MO
.isRegMask())
599 uint64_t ReducerWorkItem::computeMIRComplexityScore() const {
602 for (const Function
&F
: getModule()) {
603 if (auto *MF
= MMI
->getMachineFunction(F
))
604 Score
+= computeMIRComplexityScoreImpl(*MF
);
610 // FIXME: ReduceOperandsSkip has similar function, except it uses larger numbers
612 static unsigned classifyReductivePower(const Value
*V
) {
613 if (auto *C
= dyn_cast
<ConstantData
>(V
)) {
614 if (C
->isNullValue())
618 if (isa
<UndefValue
>(V
))
623 if (isa
<GlobalValue
>(V
))
626 // TODO: Account for expression size
627 if (isa
<ConstantExpr
>(V
))
630 if (isa
<Constant
>(V
))
633 if (isa
<Argument
>(V
))
636 if (isa
<Instruction
>(V
))
642 // TODO: Additional flags and attributes may be complexity reducing. If we start
643 // adding flags and attributes, they could have negative cost.
644 static uint64_t computeIRComplexityScoreImpl(const Function
&F
) {
645 uint64_t Score
= 1; // Count the function itself
646 SmallVector
<std::pair
<unsigned, MDNode
*>> MDs
;
648 AttributeList Attrs
= F
.getAttributes();
649 for (AttributeSet AttrSet
: Attrs
)
650 Score
+= AttrSet
.getNumAttributes();
652 for (const BasicBlock
&BB
: F
) {
655 for (const Instruction
&I
: BB
) {
658 if (const auto *OverflowOp
= dyn_cast
<OverflowingBinaryOperator
>(&I
)) {
659 if (OverflowOp
->hasNoUnsignedWrap())
661 if (OverflowOp
->hasNoSignedWrap())
663 } else if (const auto *Trunc
= dyn_cast
<TruncInst
>(&I
)) {
664 if (Trunc
->hasNoSignedWrap())
666 if (Trunc
->hasNoUnsignedWrap())
668 } else if (const auto *ExactOp
= dyn_cast
<PossiblyExactOperator
>(&I
)) {
669 if (ExactOp
->isExact())
671 } else if (const auto *NNI
= dyn_cast
<PossiblyNonNegInst
>(&I
)) {
672 if (NNI
->hasNonNeg())
674 } else if (const auto *PDI
= dyn_cast
<PossiblyDisjointInst
>(&I
)) {
675 if (PDI
->isDisjoint())
677 } else if (const auto *GEP
= dyn_cast
<GEPOperator
>(&I
)) {
678 if (GEP
->isInBounds())
680 if (GEP
->hasNoUnsignedSignedWrap())
682 if (GEP
->hasNoUnsignedWrap())
684 } else if (const auto *FPOp
= dyn_cast
<FPMathOperator
>(&I
)) {
685 FastMathFlags FMF
= FPOp
->getFastMathFlags();
686 if (FMF
.allowReassoc())
692 if (FMF
.noSignedZeros())
694 if (FMF
.allowReciprocal())
696 if (FMF
.allowContract())
698 if (FMF
.approxFunc())
702 for (const Value
*Operand
: I
.operands()) {
704 Score
+= classifyReductivePower(Operand
);
707 I
.getAllMetadata(MDs
);
716 uint64_t ReducerWorkItem::computeIRComplexityScore() const {
719 const Module
&M
= getModule();
720 Score
+= M
.named_metadata_size();
722 SmallVector
<std::pair
<unsigned, MDNode
*>, 32> GlobalMetadata
;
723 for (const GlobalVariable
&GV
: M
.globals()) {
726 if (GV
.hasInitializer())
727 Score
+= classifyReductivePower(GV
.getInitializer());
729 // TODO: Account for linkage?
731 GV
.getAllMetadata(GlobalMetadata
);
732 Score
+= GlobalMetadata
.size();
733 GlobalMetadata
.clear();
736 for (const GlobalAlias
&GA
: M
.aliases())
737 Score
+= classifyReductivePower(GA
.getAliasee());
739 for (const GlobalIFunc
&GI
: M
.ifuncs())
740 Score
+= classifyReductivePower(GI
.getResolver());
742 for (const Function
&F
: M
)
743 Score
+= computeIRComplexityScoreImpl(F
);
748 void ReducerWorkItem::writeOutput(raw_ostream
&OS
, bool EmitBitcode
) const {
749 // Requesting bitcode emission with mir is nonsense, so just ignore it.
750 if (EmitBitcode
&& !isMIR())
753 print(OS
, /*AnnotationWriter=*/nullptr);
756 void ReducerWorkItem::readBitcode(MemoryBufferRef Data
, LLVMContext
&Ctx
,
757 StringRef ToolName
) {
758 Expected
<BitcodeFileContents
> IF
= llvm::getBitcodeFileContents(Data
);
760 WithColor::error(errs(), ToolName
) << IF
.takeError();
763 BitcodeModule BM
= IF
->Mods
[0];
764 Expected
<BitcodeLTOInfo
> LI
= BM
.getLTOInfo();
765 Expected
<std::unique_ptr
<Module
>> MOrErr
= BM
.parseModule(Ctx
);
766 if (!LI
|| !MOrErr
) {
767 WithColor::error(errs(), ToolName
) << IF
.takeError();
770 LTOInfo
= std::make_unique
<BitcodeLTOInfo
>(*LI
);
771 M
= std::move(MOrErr
.get());
774 void ReducerWorkItem::writeBitcode(raw_ostream
&OutStream
) const {
775 if (LTOInfo
&& LTOInfo
->IsThinLTO
&& LTOInfo
->EnableSplitLTOUnit
) {
777 LoopAnalysisManager LAM
;
778 FunctionAnalysisManager FAM
;
779 CGSCCAnalysisManager CGAM
;
780 ModuleAnalysisManager MAM
;
781 PB
.registerModuleAnalyses(MAM
);
782 PB
.registerCGSCCAnalyses(CGAM
);
783 PB
.registerFunctionAnalyses(FAM
);
784 PB
.registerLoopAnalyses(LAM
);
785 PB
.crossRegisterProxies(LAM
, FAM
, CGAM
, MAM
);
786 ModulePassManager MPM
;
787 MPM
.addPass(ThinLTOBitcodeWriterPass(OutStream
, nullptr));
790 std::unique_ptr
<ModuleSummaryIndex
> Index
;
791 if (LTOInfo
&& LTOInfo
->HasSummary
) {
792 ProfileSummaryInfo
PSI(*M
);
793 Index
= std::make_unique
<ModuleSummaryIndex
>(
794 buildModuleSummaryIndex(*M
, nullptr, &PSI
));
796 WriteBitcodeToFile(getModule(), OutStream
,
797 /*ShouldPreserveUseListOrder=*/true, Index
.get());
801 std::pair
<std::unique_ptr
<ReducerWorkItem
>, bool>
802 llvm::parseReducerWorkItem(StringRef ToolName
, StringRef Filename
,
804 std::unique_ptr
<TargetMachine
> &TM
, bool IsMIR
) {
805 bool IsBitcode
= false;
808 auto MMM
= std::make_unique
<ReducerWorkItem
>();
811 initializeTargetInfo();
813 auto FileOrErr
= MemoryBuffer::getFileOrSTDIN(Filename
, /*IsText=*/true);
814 if (std::error_code EC
= FileOrErr
.getError()) {
815 WithColor::error(errs(), ToolName
) << EC
.message() << '\n';
816 return {nullptr, false};
819 std::unique_ptr
<MIRParser
> MParser
=
820 createMIRParser(std::move(FileOrErr
.get()), Ctxt
);
822 auto SetDataLayout
= [&](StringRef DataLayoutTargetTriple
,
823 StringRef OldDLStr
) -> std::optional
<std::string
> {
824 // NB: We always call createTargetMachineForTriple() even if an explicit
825 // DataLayout is already set in the module since we want to use this
826 // callback to setup the TargetMachine rather than doing it later.
827 std::string IRTargetTriple
= DataLayoutTargetTriple
.str();
828 if (!TargetTriple
.empty())
829 IRTargetTriple
= Triple::normalize(TargetTriple
);
830 TheTriple
= Triple(IRTargetTriple
);
831 if (TheTriple
.getTriple().empty())
832 TheTriple
.setTriple(sys::getDefaultTargetTriple());
833 ExitOnError
ExitOnErr(std::string(ToolName
) + ": error: ");
834 TM
= ExitOnErr(codegen::createTargetMachineForTriple(TheTriple
.str()));
836 return TM
->createDataLayout().getStringRepresentation();
839 std::unique_ptr
<Module
> M
= MParser
->parseIRModule(SetDataLayout
);
841 MMM
->MMI
= std::make_unique
<MachineModuleInfo
>(TM
.get());
842 MParser
->parseMachineFunctions(*M
, *MMM
->MMI
);
843 MMM
->M
= std::move(M
);
846 ErrorOr
<std::unique_ptr
<MemoryBuffer
>> MB
=
847 MemoryBuffer::getFileOrSTDIN(Filename
);
848 if (std::error_code EC
= MB
.getError()) {
849 WithColor::error(errs(), ToolName
)
850 << Filename
<< ": " << EC
.message() << "\n";
851 return {nullptr, false};
854 if (!isBitcode((const unsigned char *)(*MB
)->getBufferStart(),
855 (const unsigned char *)(*MB
)->getBufferEnd())) {
856 std::unique_ptr
<Module
> Result
= parseIR(**MB
, Err
, Ctxt
);
858 Err
.print(ToolName
.data(), errs());
859 return {nullptr, false};
861 MMM
->M
= std::move(Result
);
864 MMM
->readBitcode(MemoryBufferRef(**MB
), Ctxt
, ToolName
);
866 if (MMM
->LTOInfo
->IsThinLTO
&& MMM
->LTOInfo
->EnableSplitLTOUnit
)
867 initializeTargetInfo();
870 if (MMM
->verify(&errs())) {
871 WithColor::error(errs(), ToolName
)
872 << Filename
<< " - input module is broken!\n";
873 return {nullptr, false};
875 return {std::move(MMM
), IsBitcode
};