1 //===- R600ControlFlowFinalizer.cpp - Finalize Control Flow Inst ----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This pass compute turns all control flow pseudo instructions into native one
11 /// computing their address on the fly; it also sets STACK_SIZE info.
13 //===----------------------------------------------------------------------===//
15 #include "MCTargetDesc/R600MCTargetDesc.h"
17 #include "R600MachineFunctionInfo.h"
18 #include "R600Subtarget.h"
19 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #define DEBUG_TYPE "r600cf"
32 FIRST_NON_WQM_PUSH
= 2,
33 FIRST_NON_WQM_PUSH_W_FULL_ENTRY
= 3
36 const R600Subtarget
*ST
;
37 std::vector
<StackItem
> BranchStack
;
38 std::vector
<StackItem
> LoopStack
;
39 unsigned MaxStackSize
;
40 unsigned CurrentEntries
= 0;
41 unsigned CurrentSubEntries
= 0;
43 CFStack(const R600Subtarget
*st
, CallingConv::ID cc
) : ST(st
),
44 // We need to reserve a stack entry for CALL_FS in vertex shaders.
45 MaxStackSize(cc
== CallingConv::AMDGPU_VS
? 1 : 0) {}
47 unsigned getLoopDepth();
48 bool branchStackContains(CFStack::StackItem
);
49 bool requiresWorkAroundForInst(unsigned Opcode
);
50 unsigned getSubEntrySize(CFStack::StackItem Item
);
51 void updateMaxStackSize();
52 void pushBranch(unsigned Opcode
, bool isWQM
= false);
58 unsigned CFStack::getLoopDepth() {
59 return LoopStack
.size();
62 bool CFStack::branchStackContains(CFStack::StackItem Item
) {
63 return llvm::is_contained(BranchStack
, Item
);
66 bool CFStack::requiresWorkAroundForInst(unsigned Opcode
) {
67 if (Opcode
== R600::CF_ALU_PUSH_BEFORE
&& ST
->hasCaymanISA() &&
71 if (!ST
->hasCFAluBug())
75 default: return false;
76 case R600::CF_ALU_PUSH_BEFORE
:
77 case R600::CF_ALU_ELSE_AFTER
:
78 case R600::CF_ALU_BREAK
:
79 case R600::CF_ALU_CONTINUE
:
80 if (CurrentSubEntries
== 0)
82 if (ST
->getWavefrontSize() == 64) {
83 // We are being conservative here. We only require this work-around if
84 // CurrentSubEntries > 3 &&
85 // (CurrentSubEntries % 4 == 3 || CurrentSubEntries % 4 == 0)
87 // We have to be conservative, because we don't know for certain that
88 // our stack allocation algorithm for Evergreen/NI is correct. Applying this
89 // work-around when CurrentSubEntries > 3 allows us to over-allocate stack
90 // resources without any problems.
91 return CurrentSubEntries
> 3;
93 assert(ST
->getWavefrontSize() == 32);
94 // We are being conservative here. We only require the work-around if
95 // CurrentSubEntries > 7 &&
96 // (CurrentSubEntries % 8 == 7 || CurrentSubEntries % 8 == 0)
97 // See the comment on the wavefront size == 64 case for why we are
98 // being conservative.
99 return CurrentSubEntries
> 7;
103 unsigned CFStack::getSubEntrySize(CFStack::StackItem Item
) {
107 case CFStack::FIRST_NON_WQM_PUSH
:
108 assert(!ST
->hasCaymanISA());
109 if (ST
->getGeneration() <= AMDGPUSubtarget::R700
) {
110 // +1 For the push operation.
111 // +2 Extra space required.
114 // Some documentation says that this is not necessary on Evergreen,
115 // but experimentation has show that we need to allocate 1 extra
116 // sub-entry for the first non-WQM push.
117 // +1 For the push operation.
118 // +1 Extra space required.
120 case CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY
:
121 assert(ST
->getGeneration() >= AMDGPUSubtarget::EVERGREEN
);
122 // +1 For the push operation.
123 // +1 Extra space required.
125 case CFStack::SUB_ENTRY
:
130 void CFStack::updateMaxStackSize() {
131 unsigned CurrentStackSize
= CurrentEntries
+ divideCeil(CurrentSubEntries
, 4);
132 MaxStackSize
= std::max(CurrentStackSize
, MaxStackSize
);
135 void CFStack::pushBranch(unsigned Opcode
, bool isWQM
) {
136 CFStack::StackItem Item
= CFStack::ENTRY
;
138 case R600::CF_PUSH_EG
:
139 case R600::CF_ALU_PUSH_BEFORE
:
141 if (!ST
->hasCaymanISA() &&
142 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH
))
143 Item
= CFStack::FIRST_NON_WQM_PUSH
; // May not be required on Evergreen/NI
145 // CFStack::getSubEntrySize()
146 else if (CurrentEntries
> 0 &&
147 ST
->getGeneration() > AMDGPUSubtarget::EVERGREEN
&&
148 !ST
->hasCaymanISA() &&
149 !branchStackContains(CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY
))
150 Item
= CFStack::FIRST_NON_WQM_PUSH_W_FULL_ENTRY
;
152 Item
= CFStack::SUB_ENTRY
;
154 Item
= CFStack::ENTRY
;
157 BranchStack
.push_back(Item
);
158 if (Item
== CFStack::ENTRY
)
161 CurrentSubEntries
+= getSubEntrySize(Item
);
162 updateMaxStackSize();
165 void CFStack::pushLoop() {
166 LoopStack
.push_back(CFStack::ENTRY
);
168 updateMaxStackSize();
171 void CFStack::popBranch() {
172 CFStack::StackItem Top
= BranchStack
.back();
173 if (Top
== CFStack::ENTRY
)
176 CurrentSubEntries
-= getSubEntrySize(Top
);
177 BranchStack
.pop_back();
180 void CFStack::popLoop() {
182 LoopStack
.pop_back();
185 class R600ControlFlowFinalizer
: public MachineFunctionPass
{
187 using ClauseFile
= std::pair
<MachineInstr
*, std::vector
<MachineInstr
*>>;
189 enum ControlFlowInstruction
{
203 const R600InstrInfo
*TII
= nullptr;
204 const R600RegisterInfo
*TRI
= nullptr;
205 unsigned MaxFetchInst
;
206 const R600Subtarget
*ST
= nullptr;
208 bool IsTrivialInst(MachineInstr
&MI
) const {
209 switch (MI
.getOpcode()) {
218 const MCInstrDesc
&getHWInstrDesc(ControlFlowInstruction CFI
) const {
220 bool isEg
= (ST
->getGeneration() >= AMDGPUSubtarget::EVERGREEN
);
223 Opcode
= isEg
? R600::CF_TC_EG
: R600::CF_TC_R600
;
226 Opcode
= isEg
? R600::CF_VC_EG
: R600::CF_VC_R600
;
229 Opcode
= isEg
? R600::CF_CALL_FS_EG
: R600::CF_CALL_FS_R600
;
232 Opcode
= isEg
? R600::WHILE_LOOP_EG
: R600::WHILE_LOOP_R600
;
235 Opcode
= isEg
? R600::END_LOOP_EG
: R600::END_LOOP_R600
;
238 Opcode
= isEg
? R600::LOOP_BREAK_EG
: R600::LOOP_BREAK_R600
;
240 case CF_LOOP_CONTINUE
:
241 Opcode
= isEg
? R600::CF_CONTINUE_EG
: R600::CF_CONTINUE_R600
;
244 Opcode
= isEg
? R600::CF_JUMP_EG
: R600::CF_JUMP_R600
;
247 Opcode
= isEg
? R600::CF_ELSE_EG
: R600::CF_ELSE_R600
;
250 Opcode
= isEg
? R600::POP_EG
: R600::POP_R600
;
253 if (ST
->hasCaymanISA()) {
254 Opcode
= R600::CF_END_CM
;
257 Opcode
= isEg
? R600::CF_END_EG
: R600::CF_END_R600
;
260 assert (Opcode
&& "No opcode selected");
261 return TII
->get(Opcode
);
264 bool isCompatibleWithClause(const MachineInstr
&MI
,
265 std::set
<unsigned> &DstRegs
) const {
266 unsigned DstMI
, SrcMI
;
267 for (MachineInstr::const_mop_iterator I
= MI
.operands_begin(),
268 E
= MI
.operands_end();
270 const MachineOperand
&MO
= *I
;
274 Register Reg
= MO
.getReg();
275 if (R600::R600_Reg128RegClass
.contains(Reg
))
278 DstMI
= TRI
->getMatchingSuperReg(Reg
,
279 R600RegisterInfo::getSubRegFromChannel(TRI
->getHWRegChan(Reg
)),
280 &R600::R600_Reg128RegClass
);
283 Register Reg
= MO
.getReg();
284 if (R600::R600_Reg128RegClass
.contains(Reg
))
287 SrcMI
= TRI
->getMatchingSuperReg(Reg
,
288 R600RegisterInfo::getSubRegFromChannel(TRI
->getHWRegChan(Reg
)),
289 &R600::R600_Reg128RegClass
);
292 if ((DstRegs
.find(SrcMI
) == DstRegs
.end())) {
293 DstRegs
.insert(DstMI
);
300 MakeFetchClause(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&I
)
302 MachineBasicBlock::iterator ClauseHead
= I
;
303 std::vector
<MachineInstr
*> ClauseContent
;
304 unsigned AluInstCount
= 0;
305 bool IsTex
= TII
->usesTextureCache(*ClauseHead
);
306 std::set
<unsigned> DstRegs
;
307 for (MachineBasicBlock::iterator E
= MBB
.end(); I
!= E
; ++I
) {
308 if (IsTrivialInst(*I
))
310 if (AluInstCount
>= MaxFetchInst
)
312 if ((IsTex
&& !TII
->usesTextureCache(*I
)) ||
313 (!IsTex
&& !TII
->usesVertexCache(*I
)))
315 if (!isCompatibleWithClause(*I
, DstRegs
))
318 ClauseContent
.push_back(&*I
);
320 MachineInstr
*MIb
= BuildMI(MBB
, ClauseHead
, MBB
.findDebugLoc(ClauseHead
),
321 getHWInstrDesc(IsTex
?CF_TC
:CF_VC
))
323 .addImm(AluInstCount
- 1); // COUNT
324 return ClauseFile(MIb
, std::move(ClauseContent
));
327 void getLiteral(MachineInstr
&MI
, std::vector
<MachineOperand
*> &Lits
) const {
328 static const unsigned LiteralRegs
[] = {
334 const SmallVector
<std::pair
<MachineOperand
*, int64_t>, 3> Srcs
=
336 for (const auto &Src
:Srcs
) {
337 if (Src
.first
->getReg() != R600::ALU_LITERAL_X
)
339 int64_t Imm
= Src
.second
;
340 std::vector
<MachineOperand
*>::iterator It
=
341 llvm::find_if(Lits
, [&](MachineOperand
*val
) {
342 return val
->isImm() && (val
->getImm() == Imm
);
345 // Get corresponding Operand
346 MachineOperand
&Operand
= MI
.getOperand(
347 TII
->getOperandIdx(MI
.getOpcode(), R600::OpName::literal
));
349 if (It
!= Lits
.end()) {
350 // Reuse existing literal reg
351 unsigned Index
= It
- Lits
.begin();
352 Src
.first
->setReg(LiteralRegs
[Index
]);
354 // Allocate new literal reg
355 assert(Lits
.size() < 4 && "Too many literals in Instruction Group");
356 Src
.first
->setReg(LiteralRegs
[Lits
.size()]);
357 Lits
.push_back(&Operand
);
362 MachineBasicBlock::iterator
insertLiterals(
363 MachineBasicBlock::iterator InsertPos
,
364 const std::vector
<unsigned> &Literals
) const {
365 MachineBasicBlock
*MBB
= InsertPos
->getParent();
366 for (unsigned i
= 0, e
= Literals
.size(); i
< e
; i
+=2) {
367 unsigned LiteralPair0
= Literals
[i
];
368 unsigned LiteralPair1
= (i
+ 1 < e
)?Literals
[i
+ 1]:0;
369 InsertPos
= BuildMI(MBB
, InsertPos
->getDebugLoc(),
370 TII
->get(R600::LITERALS
))
371 .addImm(LiteralPair0
)
372 .addImm(LiteralPair1
);
378 MakeALUClause(MachineBasicBlock
&MBB
, MachineBasicBlock::iterator
&I
)
380 MachineInstr
&ClauseHead
= *I
;
381 std::vector
<MachineInstr
*> ClauseContent
;
383 for (MachineBasicBlock::instr_iterator E
= MBB
.instr_end(); I
!= E
;) {
384 if (IsTrivialInst(*I
)) {
388 if (!I
->isBundle() && !TII
->isALUInstr(I
->getOpcode()))
390 std::vector
<MachineOperand
*>Literals
;
392 MachineInstr
&DeleteMI
= *I
;
393 MachineBasicBlock::instr_iterator BI
= I
.getInstrIterator();
394 while (++BI
!= E
&& BI
->isBundledWithPred()) {
395 BI
->unbundleFromPred();
396 for (MachineOperand
&MO
: BI
->operands()) {
397 if (MO
.isReg() && MO
.isInternalRead())
398 MO
.setIsInternalRead(false);
400 getLiteral(*BI
, Literals
);
401 ClauseContent
.push_back(&*BI
);
404 DeleteMI
.eraseFromParent();
406 getLiteral(*I
, Literals
);
407 ClauseContent
.push_back(&*I
);
410 for (unsigned i
= 0, e
= Literals
.size(); i
< e
; i
+= 2) {
411 MachineInstrBuilder MILit
= BuildMI(MBB
, I
, I
->getDebugLoc(),
412 TII
->get(R600::LITERALS
));
413 if (Literals
[i
]->isImm()) {
414 MILit
.addImm(Literals
[i
]->getImm());
416 MILit
.addGlobalAddress(Literals
[i
]->getGlobal(),
417 Literals
[i
]->getOffset());
420 if (Literals
[i
+ 1]->isImm()) {
421 MILit
.addImm(Literals
[i
+ 1]->getImm());
423 MILit
.addGlobalAddress(Literals
[i
+ 1]->getGlobal(),
424 Literals
[i
+ 1]->getOffset());
428 ClauseContent
.push_back(MILit
);
431 assert(ClauseContent
.size() < 128 && "ALU clause is too big");
432 ClauseHead
.getOperand(7).setImm(ClauseContent
.size() - 1);
433 return ClauseFile(&ClauseHead
, std::move(ClauseContent
));
436 void EmitFetchClause(MachineBasicBlock::iterator InsertPos
,
437 const DebugLoc
&DL
, ClauseFile
&Clause
,
439 CounterPropagateAddr(*Clause
.first
, CfCount
);
440 MachineBasicBlock
*BB
= Clause
.first
->getParent();
441 BuildMI(BB
, DL
, TII
->get(R600::FETCH_CLAUSE
)).addImm(CfCount
);
442 for (MachineInstr
*MI
: Clause
.second
)
443 BB
->splice(InsertPos
, BB
, MI
);
444 CfCount
+= 2 * Clause
.second
.size();
447 void EmitALUClause(MachineBasicBlock::iterator InsertPos
, const DebugLoc
&DL
,
448 ClauseFile
&Clause
, unsigned &CfCount
) {
449 Clause
.first
->getOperand(0).setImm(0);
450 CounterPropagateAddr(*Clause
.first
, CfCount
);
451 MachineBasicBlock
*BB
= Clause
.first
->getParent();
452 BuildMI(BB
, DL
, TII
->get(R600::ALU_CLAUSE
)).addImm(CfCount
);
453 for (MachineInstr
*MI
: Clause
.second
)
454 BB
->splice(InsertPos
, BB
, MI
);
455 CfCount
+= Clause
.second
.size();
458 void CounterPropagateAddr(MachineInstr
&MI
, unsigned Addr
) const {
459 MI
.getOperand(0).setImm(Addr
+ MI
.getOperand(0).getImm());
461 void CounterPropagateAddr(const std::set
<MachineInstr
*> &MIs
,
462 unsigned Addr
) const {
463 for (MachineInstr
*MI
: MIs
) {
464 CounterPropagateAddr(*MI
, Addr
);
471 R600ControlFlowFinalizer() : MachineFunctionPass(ID
) {}
473 bool runOnMachineFunction(MachineFunction
&MF
) override
{
474 ST
= &MF
.getSubtarget
<R600Subtarget
>();
475 MaxFetchInst
= ST
->getTexVTXClauseSize();
476 TII
= ST
->getInstrInfo();
477 TRI
= ST
->getRegisterInfo();
479 R600MachineFunctionInfo
*MFI
= MF
.getInfo
<R600MachineFunctionInfo
>();
481 CFStack
CFStack(ST
, MF
.getFunction().getCallingConv());
482 for (MachineFunction::iterator MB
= MF
.begin(), ME
= MF
.end(); MB
!= ME
;
484 MachineBasicBlock
&MBB
= *MB
;
485 unsigned CfCount
= 0;
486 std::vector
<std::pair
<unsigned, std::set
<MachineInstr
*>>> LoopStack
;
487 std::vector
<MachineInstr
* > IfThenElseStack
;
488 if (MF
.getFunction().getCallingConv() == CallingConv::AMDGPU_VS
) {
489 BuildMI(MBB
, MBB
.begin(), MBB
.findDebugLoc(MBB
.begin()),
490 getHWInstrDesc(CF_CALL_FS
));
493 std::vector
<ClauseFile
> FetchClauses
, AluClauses
;
494 std::vector
<MachineInstr
*> LastAlu(1);
495 std::vector
<MachineInstr
*> ToPopAfter
;
497 for (MachineBasicBlock::iterator I
= MBB
.begin(), E
= MBB
.end();
499 if (TII
->usesTextureCache(*I
) || TII
->usesVertexCache(*I
)) {
500 LLVM_DEBUG(dbgs() << CfCount
<< ":"; I
->dump(););
501 FetchClauses
.push_back(MakeFetchClause(MBB
, I
));
503 LastAlu
.back() = nullptr;
507 MachineBasicBlock::iterator MI
= I
;
508 if (MI
->getOpcode() != R600::ENDIF
)
509 LastAlu
.back() = nullptr;
510 if (MI
->getOpcode() == R600::CF_ALU
)
511 LastAlu
.back() = &*MI
;
513 bool RequiresWorkAround
=
514 CFStack
.requiresWorkAroundForInst(MI
->getOpcode());
515 switch (MI
->getOpcode()) {
516 case R600::CF_ALU_PUSH_BEFORE
:
517 if (RequiresWorkAround
) {
519 << "Applying bug work-around for ALU_PUSH_BEFORE\n");
520 BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
), TII
->get(R600::CF_PUSH_EG
))
523 MI
->setDesc(TII
->get(R600::CF_ALU
));
525 CFStack
.pushBranch(R600::CF_PUSH_EG
);
527 CFStack
.pushBranch(R600::CF_ALU_PUSH_BEFORE
);
531 AluClauses
.push_back(MakeALUClause(MBB
, I
));
532 LLVM_DEBUG(dbgs() << CfCount
<< ":"; MI
->dump(););
535 case R600::WHILELOOP
: {
537 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
538 getHWInstrDesc(CF_WHILE_LOOP
))
540 std::pair
<unsigned, std::set
<MachineInstr
*>> Pair(CfCount
,
541 std::set
<MachineInstr
*>());
542 Pair
.second
.insert(MIb
);
543 LoopStack
.push_back(std::move(Pair
));
544 MI
->eraseFromParent();
548 case R600::ENDLOOP
: {
550 std::pair
<unsigned, std::set
<MachineInstr
*>> Pair
=
551 std::move(LoopStack
.back());
552 LoopStack
.pop_back();
553 CounterPropagateAddr(Pair
.second
, CfCount
);
554 BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
), getHWInstrDesc(CF_END_LOOP
))
555 .addImm(Pair
.first
+ 1);
556 MI
->eraseFromParent();
560 case R600::IF_PREDICATE_SET
: {
561 LastAlu
.push_back(nullptr);
562 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
563 getHWInstrDesc(CF_JUMP
))
566 IfThenElseStack
.push_back(MIb
);
567 LLVM_DEBUG(dbgs() << CfCount
<< ":"; MIb
->dump(););
568 MI
->eraseFromParent();
573 MachineInstr
* JumpInst
= IfThenElseStack
.back();
574 IfThenElseStack
.pop_back();
575 CounterPropagateAddr(*JumpInst
, CfCount
);
576 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
577 getHWInstrDesc(CF_ELSE
))
580 LLVM_DEBUG(dbgs() << CfCount
<< ":"; MIb
->dump(););
581 IfThenElseStack
.push_back(MIb
);
582 MI
->eraseFromParent();
588 if (LastAlu
.back()) {
589 ToPopAfter
.push_back(LastAlu
.back());
591 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
592 getHWInstrDesc(CF_POP
))
596 LLVM_DEBUG(dbgs() << CfCount
<< ":"; MIb
->dump(););
600 MachineInstr
*IfOrElseInst
= IfThenElseStack
.back();
601 IfThenElseStack
.pop_back();
602 CounterPropagateAddr(*IfOrElseInst
, CfCount
);
603 IfOrElseInst
->getOperand(1).setImm(1);
605 MI
->eraseFromParent();
610 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
611 getHWInstrDesc(CF_LOOP_BREAK
))
613 LoopStack
.back().second
.insert(MIb
);
614 MI
->eraseFromParent();
617 case R600::CONTINUE
: {
618 MachineInstr
*MIb
= BuildMI(MBB
, MI
, MBB
.findDebugLoc(MI
),
619 getHWInstrDesc(CF_LOOP_CONTINUE
))
621 LoopStack
.back().second
.insert(MIb
);
622 MI
->eraseFromParent();
627 DebugLoc DL
= MBB
.findDebugLoc(MI
);
628 BuildMI(MBB
, MI
, DL
, getHWInstrDesc(CF_END
));
631 BuildMI(MBB
, I
, DL
, TII
->get(R600::PAD
));
634 MI
->eraseFromParent();
635 for (ClauseFile
&CF
: FetchClauses
)
636 EmitFetchClause(I
, DL
, CF
, CfCount
);
637 for (ClauseFile
&CF
: AluClauses
)
638 EmitALUClause(I
, DL
, CF
, CfCount
);
642 if (TII
->isExport(MI
->getOpcode())) {
643 LLVM_DEBUG(dbgs() << CfCount
<< ":"; MI
->dump(););
649 for (MachineInstr
*Alu
: ToPopAfter
) {
650 BuildMI(MBB
, Alu
, MBB
.findDebugLoc((MachineBasicBlock::iterator
)Alu
),
651 TII
->get(R600::CF_ALU_POP_AFTER
))
652 .addImm(Alu
->getOperand(0).getImm())
653 .addImm(Alu
->getOperand(1).getImm())
654 .addImm(Alu
->getOperand(2).getImm())
655 .addImm(Alu
->getOperand(3).getImm())
656 .addImm(Alu
->getOperand(4).getImm())
657 .addImm(Alu
->getOperand(5).getImm())
658 .addImm(Alu
->getOperand(6).getImm())
659 .addImm(Alu
->getOperand(7).getImm())
660 .addImm(Alu
->getOperand(8).getImm());
661 Alu
->eraseFromParent();
663 MFI
->CFStackSize
= CFStack
.MaxStackSize
;
669 StringRef
getPassName() const override
{
670 return "R600 Control Flow Finalizer Pass";
674 } // end anonymous namespace
676 INITIALIZE_PASS_BEGIN(R600ControlFlowFinalizer
, DEBUG_TYPE
,
677 "R600 Control Flow Finalizer", false, false)
678 INITIALIZE_PASS_END(R600ControlFlowFinalizer
, DEBUG_TYPE
,
679 "R600 Control Flow Finalizer", false, false)
681 char R600ControlFlowFinalizer::ID
= 0;
683 char &llvm::R600ControlFlowFinalizerID
= R600ControlFlowFinalizer::ID
;
685 FunctionPass
*llvm::createR600ControlFlowFinalizer() {
686 return new R600ControlFlowFinalizer();