1 //===--- X86DomainReassignment.cpp - Selectively switch register classes---===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass attempts to find instruction chains (closures) in one domain,
10 // and convert them to equivalent instructions in a different domain,
13 //===----------------------------------------------------------------------===//
16 #include "X86InstrInfo.h"
17 #include "X86Subtarget.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/DenseMapInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/TargetRegisterInfo.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/Printable.h"
33 #define DEBUG_TYPE "x86-domain-reassignment"
35 STATISTIC(NumClosuresConverted
, "Number of closures converted by the pass");
37 static cl::opt
<bool> DisableX86DomainReassignment(
38 "disable-x86-domain-reassignment", cl::Hidden
,
39 cl::desc("X86: Disable Virtual Register Reassignment."), cl::init(false));
42 enum RegDomain
{ NoDomain
= -1, GPRDomain
, MaskDomain
, OtherDomain
, NumDomains
};
44 static bool isGPR(const TargetRegisterClass
*RC
) {
45 return X86::GR64RegClass
.hasSubClassEq(RC
) ||
46 X86::GR32RegClass
.hasSubClassEq(RC
) ||
47 X86::GR16RegClass
.hasSubClassEq(RC
) ||
48 X86::GR8RegClass
.hasSubClassEq(RC
);
51 static bool isMask(const TargetRegisterClass
*RC
,
52 const TargetRegisterInfo
*TRI
) {
53 return X86::VK16RegClass
.hasSubClassEq(RC
);
56 static RegDomain
getDomain(const TargetRegisterClass
*RC
,
57 const TargetRegisterInfo
*TRI
) {
65 /// Return a register class equivalent to \p SrcRC, in \p Domain.
66 static const TargetRegisterClass
*getDstRC(const TargetRegisterClass
*SrcRC
,
68 assert(Domain
== MaskDomain
&& "add domain");
69 if (X86::GR8RegClass
.hasSubClassEq(SrcRC
))
70 return &X86::VK8RegClass
;
71 if (X86::GR16RegClass
.hasSubClassEq(SrcRC
))
72 return &X86::VK16RegClass
;
73 if (X86::GR32RegClass
.hasSubClassEq(SrcRC
))
74 return &X86::VK32RegClass
;
75 if (X86::GR64RegClass
.hasSubClassEq(SrcRC
))
76 return &X86::VK64RegClass
;
77 llvm_unreachable("add register class");
81 /// Abstract Instruction Converter class.
82 class InstrConverterBase
{
87 InstrConverterBase(unsigned SrcOpcode
) : SrcOpcode(SrcOpcode
) {}
89 virtual ~InstrConverterBase() {}
91 /// \returns true if \p MI is legal to convert.
92 virtual bool isLegal(const MachineInstr
*MI
,
93 const TargetInstrInfo
*TII
) const {
94 assert(MI
->getOpcode() == SrcOpcode
&&
95 "Wrong instruction passed to converter");
99 /// Applies conversion to \p MI.
101 /// \returns true if \p MI is no longer need, and can be deleted.
102 virtual bool convertInstr(MachineInstr
*MI
, const TargetInstrInfo
*TII
,
103 MachineRegisterInfo
*MRI
) const = 0;
105 /// \returns the cost increment incurred by converting \p MI.
106 virtual double getExtraCost(const MachineInstr
*MI
,
107 MachineRegisterInfo
*MRI
) const = 0;
110 /// An Instruction Converter which ignores the given instruction.
111 /// For example, PHI instructions can be safely ignored since only the registers
113 class InstrIgnore
: public InstrConverterBase
{
115 InstrIgnore(unsigned SrcOpcode
) : InstrConverterBase(SrcOpcode
) {}
117 bool convertInstr(MachineInstr
*MI
, const TargetInstrInfo
*TII
,
118 MachineRegisterInfo
*MRI
) const override
{
119 assert(isLegal(MI
, TII
) && "Cannot convert instruction");
123 double getExtraCost(const MachineInstr
*MI
,
124 MachineRegisterInfo
*MRI
) const override
{
129 /// An Instruction Converter which replaces an instruction with another.
130 class InstrReplacer
: public InstrConverterBase
{
132 /// Opcode of the destination instruction.
135 InstrReplacer(unsigned SrcOpcode
, unsigned DstOpcode
)
136 : InstrConverterBase(SrcOpcode
), DstOpcode(DstOpcode
) {}
138 bool isLegal(const MachineInstr
*MI
,
139 const TargetInstrInfo
*TII
) const override
{
140 if (!InstrConverterBase::isLegal(MI
, TII
))
142 // It's illegal to replace an instruction that implicitly defines a register
143 // with an instruction that doesn't, unless that register dead.
144 for (const auto &MO
: MI
->implicit_operands())
145 if (MO
.isReg() && MO
.isDef() && !MO
.isDead() &&
146 !TII
->get(DstOpcode
).hasImplicitDefOfPhysReg(MO
.getReg()))
151 bool convertInstr(MachineInstr
*MI
, const TargetInstrInfo
*TII
,
152 MachineRegisterInfo
*MRI
) const override
{
153 assert(isLegal(MI
, TII
) && "Cannot convert instruction");
154 MachineInstrBuilder Bld
=
155 BuildMI(*MI
->getParent(), MI
, MI
->getDebugLoc(), TII
->get(DstOpcode
));
156 // Transfer explicit operands from original instruction. Implicit operands
157 // are handled by BuildMI.
158 for (auto &Op
: MI
->explicit_operands())
163 double getExtraCost(const MachineInstr
*MI
,
164 MachineRegisterInfo
*MRI
) const override
{
165 // Assuming instructions have the same cost.
170 /// An Instruction Converter which replaces an instruction with another, and
171 /// adds a COPY from the new instruction's destination to the old one's.
172 class InstrReplacerDstCOPY
: public InstrConverterBase
{
176 InstrReplacerDstCOPY(unsigned SrcOpcode
, unsigned DstOpcode
)
177 : InstrConverterBase(SrcOpcode
), DstOpcode(DstOpcode
) {}
179 bool convertInstr(MachineInstr
*MI
, const TargetInstrInfo
*TII
,
180 MachineRegisterInfo
*MRI
) const override
{
181 assert(isLegal(MI
, TII
) && "Cannot convert instruction");
182 MachineBasicBlock
*MBB
= MI
->getParent();
183 const DebugLoc
&DL
= MI
->getDebugLoc();
185 Register Reg
= MRI
->createVirtualRegister(
186 TII
->getRegClass(TII
->get(DstOpcode
), 0, MRI
->getTargetRegisterInfo(),
188 MachineInstrBuilder Bld
= BuildMI(*MBB
, MI
, DL
, TII
->get(DstOpcode
), Reg
);
189 for (unsigned Idx
= 1, End
= MI
->getNumOperands(); Idx
< End
; ++Idx
)
190 Bld
.add(MI
->getOperand(Idx
));
192 BuildMI(*MBB
, MI
, DL
, TII
->get(TargetOpcode::COPY
))
193 .add(MI
->getOperand(0))
199 double getExtraCost(const MachineInstr
*MI
,
200 MachineRegisterInfo
*MRI
) const override
{
201 // Assuming instructions have the same cost, and that COPY is in the same
202 // domain so it will be eliminated.
207 /// An Instruction Converter for replacing COPY instructions.
208 class InstrCOPYReplacer
: public InstrReplacer
{
212 InstrCOPYReplacer(unsigned SrcOpcode
, RegDomain DstDomain
, unsigned DstOpcode
)
213 : InstrReplacer(SrcOpcode
, DstOpcode
), DstDomain(DstDomain
) {}
215 bool isLegal(const MachineInstr
*MI
,
216 const TargetInstrInfo
*TII
) const override
{
217 if (!InstrConverterBase::isLegal(MI
, TII
))
220 // Don't allow copies to/flow GR8/GR16 physical registers.
221 // FIXME: Is there some better way to support this?
222 Register DstReg
= MI
->getOperand(0).getReg();
223 if (DstReg
.isPhysical() && (X86::GR8RegClass
.contains(DstReg
) ||
224 X86::GR16RegClass
.contains(DstReg
)))
226 Register SrcReg
= MI
->getOperand(1).getReg();
227 if (SrcReg
.isPhysical() && (X86::GR8RegClass
.contains(SrcReg
) ||
228 X86::GR16RegClass
.contains(SrcReg
)))
234 double getExtraCost(const MachineInstr
*MI
,
235 MachineRegisterInfo
*MRI
) const override
{
236 assert(MI
->getOpcode() == TargetOpcode::COPY
&& "Expected a COPY");
238 for (const auto &MO
: MI
->operands()) {
239 // Physical registers will not be converted. Assume that converting the
240 // COPY to the destination domain will eventually result in a actual
242 if (Register::isPhysicalRegister(MO
.getReg()))
245 RegDomain OpDomain
= getDomain(MRI
->getRegClass(MO
.getReg()),
246 MRI
->getTargetRegisterInfo());
247 // Converting a cross domain COPY to a same domain COPY should eliminate
249 if (OpDomain
== DstDomain
)
256 /// An Instruction Converter which replaces an instruction with a COPY.
257 class InstrReplaceWithCopy
: public InstrConverterBase
{
259 // Source instruction operand Index, to be used as the COPY source.
262 InstrReplaceWithCopy(unsigned SrcOpcode
, unsigned SrcOpIdx
)
263 : InstrConverterBase(SrcOpcode
), SrcOpIdx(SrcOpIdx
) {}
265 bool convertInstr(MachineInstr
*MI
, const TargetInstrInfo
*TII
,
266 MachineRegisterInfo
*MRI
) const override
{
267 assert(isLegal(MI
, TII
) && "Cannot convert instruction");
268 BuildMI(*MI
->getParent(), MI
, MI
->getDebugLoc(),
269 TII
->get(TargetOpcode::COPY
))
270 .add({MI
->getOperand(0), MI
->getOperand(SrcOpIdx
)});
274 double getExtraCost(const MachineInstr
*MI
,
275 MachineRegisterInfo
*MRI
) const override
{
280 // Key type to be used by the Instruction Converters map.
281 // A converter is identified by <destination domain, source opcode>
282 typedef std::pair
<int, unsigned> InstrConverterBaseKeyTy
;
284 typedef DenseMap
<InstrConverterBaseKeyTy
, std::unique_ptr
<InstrConverterBase
>>
285 InstrConverterBaseMap
;
287 /// A closure is a set of virtual register representing all of the edges in
288 /// the closure, as well as all of the instructions connected by those edges.
290 /// A closure may encompass virtual registers in the same register bank that
291 /// have different widths. For example, it may contain 32-bit GPRs as well as
294 /// A closure that computes an address (i.e. defines a virtual register that is
295 /// used in a memory operand) excludes the instructions that contain memory
296 /// operands using the address. Such an instruction will be included in a
297 /// different closure that manipulates the loaded or stored value.
300 /// Virtual registers in the closure.
301 DenseSet
<Register
> Edges
;
303 /// Instructions in the closure.
304 SmallVector
<MachineInstr
*, 8> Instrs
;
306 /// Domains which this closure can legally be reassigned to.
307 std::bitset
<NumDomains
> LegalDstDomains
;
309 /// An ID to uniquely identify this closure, even when it gets
314 Closure(unsigned ID
, std::initializer_list
<RegDomain
> LegalDstDomainList
) : ID(ID
) {
315 for (RegDomain D
: LegalDstDomainList
)
316 LegalDstDomains
.set(D
);
319 /// Mark this closure as illegal for reassignment to all domains.
320 void setAllIllegal() { LegalDstDomains
.reset(); }
322 /// \returns true if this closure has domains which are legal to reassign to.
323 bool hasLegalDstDomain() const { return LegalDstDomains
.any(); }
325 /// \returns true if is legal to reassign this closure to domain \p RD.
326 bool isLegal(RegDomain RD
) const { return LegalDstDomains
[RD
]; }
328 /// Mark this closure as illegal for reassignment to domain \p RD.
329 void setIllegal(RegDomain RD
) { LegalDstDomains
[RD
] = false; }
331 bool empty() const { return Edges
.empty(); }
333 bool insertEdge(Register Reg
) { return Edges
.insert(Reg
).second
; }
335 using const_edge_iterator
= DenseSet
<Register
>::const_iterator
;
336 iterator_range
<const_edge_iterator
> edges() const {
337 return iterator_range
<const_edge_iterator
>(Edges
.begin(), Edges
.end());
340 void addInstruction(MachineInstr
*I
) {
344 ArrayRef
<MachineInstr
*> instructions() const {
348 LLVM_DUMP_METHOD
void dump(const MachineRegisterInfo
*MRI
) const {
349 dbgs() << "Registers: ";
351 for (Register Reg
: Edges
) {
355 dbgs() << printReg(Reg
, MRI
->getTargetRegisterInfo(), 0, MRI
);
357 dbgs() << "\n" << "Instructions:";
358 for (MachineInstr
*MI
: Instrs
) {
365 unsigned getID() const {
371 class X86DomainReassignment
: public MachineFunctionPass
{
372 const X86Subtarget
*STI
= nullptr;
373 MachineRegisterInfo
*MRI
= nullptr;
374 const X86InstrInfo
*TII
= nullptr;
376 /// All edges that are included in some closure
377 DenseSet
<unsigned> EnclosedEdges
;
379 /// All instructions that are included in some closure.
380 DenseMap
<MachineInstr
*, unsigned> EnclosedInstrs
;
385 X86DomainReassignment() : MachineFunctionPass(ID
) { }
387 bool runOnMachineFunction(MachineFunction
&MF
) override
;
389 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
390 AU
.setPreservesCFG();
391 MachineFunctionPass::getAnalysisUsage(AU
);
394 StringRef
getPassName() const override
{
395 return "X86 Domain Reassignment Pass";
399 /// A map of available Instruction Converters.
400 InstrConverterBaseMap Converters
;
402 /// Initialize Converters map.
403 void initConverters();
405 /// Starting from \Reg, expand the closure as much as possible.
406 void buildClosure(Closure
&, Register Reg
);
408 /// Enqueue \p Reg to be considered for addition to the closure.
409 void visitRegister(Closure
&, Register Reg
, RegDomain
&Domain
,
410 SmallVectorImpl
<unsigned> &Worklist
);
412 /// Reassign the closure to \p Domain.
413 void reassign(const Closure
&C
, RegDomain Domain
) const;
415 /// Add \p MI to the closure.
416 void encloseInstr(Closure
&C
, MachineInstr
*MI
);
418 /// /returns true if it is profitable to reassign the closure to \p Domain.
419 bool isReassignmentProfitable(const Closure
&C
, RegDomain Domain
) const;
421 /// Calculate the total cost of reassigning the closure to \p Domain.
422 double calculateCost(const Closure
&C
, RegDomain Domain
) const;
425 char X86DomainReassignment::ID
= 0;
427 } // End anonymous namespace.
429 void X86DomainReassignment::visitRegister(Closure
&C
, Register Reg
,
431 SmallVectorImpl
<unsigned> &Worklist
) {
432 if (EnclosedEdges
.count(Reg
))
435 if (!Reg
.isVirtual())
438 if (!MRI
->hasOneDef(Reg
))
441 RegDomain RD
= getDomain(MRI
->getRegClass(Reg
), MRI
->getTargetRegisterInfo());
442 // First edge in closure sets the domain.
443 if (Domain
== NoDomain
)
449 Worklist
.push_back(Reg
);
452 void X86DomainReassignment::encloseInstr(Closure
&C
, MachineInstr
*MI
) {
453 auto I
= EnclosedInstrs
.find(MI
);
454 if (I
!= EnclosedInstrs
.end()) {
455 if (I
->second
!= C
.getID())
456 // Instruction already belongs to another closure, avoid conflicts between
457 // closure and mark this closure as illegal.
462 EnclosedInstrs
[MI
] = C
.getID();
463 C
.addInstruction(MI
);
465 // Mark closure as illegal for reassignment to domains, if there is no
466 // converter for the instruction or if the converter cannot convert the
468 for (int i
= 0; i
!= NumDomains
; ++i
) {
469 if (C
.isLegal((RegDomain
)i
)) {
470 auto I
= Converters
.find({i
, MI
->getOpcode()});
471 if (I
== Converters
.end() || !I
->second
->isLegal(MI
, TII
))
472 C
.setIllegal((RegDomain
)i
);
477 double X86DomainReassignment::calculateCost(const Closure
&C
,
478 RegDomain DstDomain
) const {
479 assert(C
.isLegal(DstDomain
) && "Cannot calculate cost for illegal closure");
482 for (auto *MI
: C
.instructions())
483 Cost
+= Converters
.find({DstDomain
, MI
->getOpcode()})
484 ->second
->getExtraCost(MI
, MRI
);
488 bool X86DomainReassignment::isReassignmentProfitable(const Closure
&C
,
489 RegDomain Domain
) const {
490 return calculateCost(C
, Domain
) < 0.0;
493 void X86DomainReassignment::reassign(const Closure
&C
, RegDomain Domain
) const {
494 assert(C
.isLegal(Domain
) && "Cannot convert illegal closure");
496 // Iterate all instructions in the closure, convert each one using the
497 // appropriate converter.
498 SmallVector
<MachineInstr
*, 8> ToErase
;
499 for (auto *MI
: C
.instructions())
500 if (Converters
.find({Domain
, MI
->getOpcode()})
501 ->second
->convertInstr(MI
, TII
, MRI
))
502 ToErase
.push_back(MI
);
504 // Iterate all registers in the closure, replace them with registers in the
505 // destination domain.
506 for (Register Reg
: C
.edges()) {
507 MRI
->setRegClass(Reg
, getDstRC(MRI
->getRegClass(Reg
), Domain
));
508 for (auto &MO
: MRI
->use_operands(Reg
)) {
510 // Remove all subregister references as they are not valid in the
511 // destination domain.
516 for (auto *MI
: ToErase
)
517 MI
->eraseFromParent();
520 /// \returns true when \p Reg is used as part of an address calculation in \p
522 static bool usedAsAddr(const MachineInstr
&MI
, Register Reg
,
523 const TargetInstrInfo
*TII
) {
524 if (!MI
.mayLoadOrStore())
527 const MCInstrDesc
&Desc
= TII
->get(MI
.getOpcode());
528 int MemOpStart
= X86II::getMemoryOperandNo(Desc
.TSFlags
);
529 if (MemOpStart
== -1)
532 MemOpStart
+= X86II::getOperandBias(Desc
);
533 for (unsigned MemOpIdx
= MemOpStart
,
534 MemOpEnd
= MemOpStart
+ X86::AddrNumOperands
;
535 MemOpIdx
< MemOpEnd
; ++MemOpIdx
) {
536 const MachineOperand
&Op
= MI
.getOperand(MemOpIdx
);
537 if (Op
.isReg() && Op
.getReg() == Reg
)
543 void X86DomainReassignment::buildClosure(Closure
&C
, Register Reg
) {
544 SmallVector
<unsigned, 4> Worklist
;
545 RegDomain Domain
= NoDomain
;
546 visitRegister(C
, Reg
, Domain
, Worklist
);
547 while (!Worklist
.empty()) {
548 unsigned CurReg
= Worklist
.pop_back_val();
550 // Register already in this closure.
551 if (!C
.insertEdge(CurReg
))
553 EnclosedEdges
.insert(Reg
);
555 MachineInstr
*DefMI
= MRI
->getVRegDef(CurReg
);
556 encloseInstr(C
, DefMI
);
558 // Add register used by the defining MI to the worklist.
559 // Do not add registers which are used in address calculation, they will be
560 // added to a different closure.
561 int OpEnd
= DefMI
->getNumOperands();
562 const MCInstrDesc
&Desc
= DefMI
->getDesc();
563 int MemOp
= X86II::getMemoryOperandNo(Desc
.TSFlags
);
565 MemOp
+= X86II::getOperandBias(Desc
);
566 for (int OpIdx
= 0; OpIdx
< OpEnd
; ++OpIdx
) {
567 if (OpIdx
== MemOp
) {
568 // skip address calculation.
569 OpIdx
+= (X86::AddrNumOperands
- 1);
572 auto &Op
= DefMI
->getOperand(OpIdx
);
573 if (!Op
.isReg() || !Op
.isUse())
575 visitRegister(C
, Op
.getReg(), Domain
, Worklist
);
578 // Expand closure through register uses.
579 for (auto &UseMI
: MRI
->use_nodbg_instructions(CurReg
)) {
580 // We would like to avoid converting closures which calculare addresses,
581 // as this should remain in GPRs.
582 if (usedAsAddr(UseMI
, CurReg
, TII
)) {
586 encloseInstr(C
, &UseMI
);
588 for (auto &DefOp
: UseMI
.defs()) {
592 Register DefReg
= DefOp
.getReg();
593 if (!DefReg
.isVirtual()) {
597 visitRegister(C
, DefReg
, Domain
, Worklist
);
603 void X86DomainReassignment::initConverters() {
604 Converters
[{MaskDomain
, TargetOpcode::PHI
}] =
605 std::make_unique
<InstrIgnore
>(TargetOpcode::PHI
);
607 Converters
[{MaskDomain
, TargetOpcode::IMPLICIT_DEF
}] =
608 std::make_unique
<InstrIgnore
>(TargetOpcode::IMPLICIT_DEF
);
610 Converters
[{MaskDomain
, TargetOpcode::INSERT_SUBREG
}] =
611 std::make_unique
<InstrReplaceWithCopy
>(TargetOpcode::INSERT_SUBREG
, 2);
613 Converters
[{MaskDomain
, TargetOpcode::COPY
}] =
614 std::make_unique
<InstrCOPYReplacer
>(TargetOpcode::COPY
, MaskDomain
,
617 auto createReplacerDstCOPY
= [&](unsigned From
, unsigned To
) {
618 Converters
[{MaskDomain
, From
}] =
619 std::make_unique
<InstrReplacerDstCOPY
>(From
, To
);
622 createReplacerDstCOPY(X86::MOVZX32rm16
, X86::KMOVWkm
);
623 createReplacerDstCOPY(X86::MOVZX64rm16
, X86::KMOVWkm
);
625 createReplacerDstCOPY(X86::MOVZX32rr16
, X86::KMOVWkk
);
626 createReplacerDstCOPY(X86::MOVZX64rr16
, X86::KMOVWkk
);
629 createReplacerDstCOPY(X86::MOVZX16rm8
, X86::KMOVBkm
);
630 createReplacerDstCOPY(X86::MOVZX32rm8
, X86::KMOVBkm
);
631 createReplacerDstCOPY(X86::MOVZX64rm8
, X86::KMOVBkm
);
633 createReplacerDstCOPY(X86::MOVZX16rr8
, X86::KMOVBkk
);
634 createReplacerDstCOPY(X86::MOVZX32rr8
, X86::KMOVBkk
);
635 createReplacerDstCOPY(X86::MOVZX64rr8
, X86::KMOVBkk
);
638 auto createReplacer
= [&](unsigned From
, unsigned To
) {
639 Converters
[{MaskDomain
, From
}] = std::make_unique
<InstrReplacer
>(From
, To
);
642 createReplacer(X86::MOV16rm
, X86::KMOVWkm
);
643 createReplacer(X86::MOV16mr
, X86::KMOVWmk
);
644 createReplacer(X86::MOV16rr
, X86::KMOVWkk
);
645 createReplacer(X86::SHR16ri
, X86::KSHIFTRWri
);
646 createReplacer(X86::SHL16ri
, X86::KSHIFTLWri
);
647 createReplacer(X86::NOT16r
, X86::KNOTWrr
);
648 createReplacer(X86::OR16rr
, X86::KORWrr
);
649 createReplacer(X86::AND16rr
, X86::KANDWrr
);
650 createReplacer(X86::XOR16rr
, X86::KXORWrr
);
653 createReplacer(X86::MOV32rm
, X86::KMOVDkm
);
654 createReplacer(X86::MOV64rm
, X86::KMOVQkm
);
656 createReplacer(X86::MOV32mr
, X86::KMOVDmk
);
657 createReplacer(X86::MOV64mr
, X86::KMOVQmk
);
659 createReplacer(X86::MOV32rr
, X86::KMOVDkk
);
660 createReplacer(X86::MOV64rr
, X86::KMOVQkk
);
662 createReplacer(X86::SHR32ri
, X86::KSHIFTRDri
);
663 createReplacer(X86::SHR64ri
, X86::KSHIFTRQri
);
665 createReplacer(X86::SHL32ri
, X86::KSHIFTLDri
);
666 createReplacer(X86::SHL64ri
, X86::KSHIFTLQri
);
668 createReplacer(X86::ADD32rr
, X86::KADDDrr
);
669 createReplacer(X86::ADD64rr
, X86::KADDQrr
);
671 createReplacer(X86::NOT32r
, X86::KNOTDrr
);
672 createReplacer(X86::NOT64r
, X86::KNOTQrr
);
674 createReplacer(X86::OR32rr
, X86::KORDrr
);
675 createReplacer(X86::OR64rr
, X86::KORQrr
);
677 createReplacer(X86::AND32rr
, X86::KANDDrr
);
678 createReplacer(X86::AND64rr
, X86::KANDQrr
);
680 createReplacer(X86::ANDN32rr
, X86::KANDNDrr
);
681 createReplacer(X86::ANDN64rr
, X86::KANDNQrr
);
683 createReplacer(X86::XOR32rr
, X86::KXORDrr
);
684 createReplacer(X86::XOR64rr
, X86::KXORQrr
);
686 // TODO: KTEST is not a replacement for TEST due to flag differences. Need
687 // to prove only Z flag is used.
688 //createReplacer(X86::TEST32rr, X86::KTESTDrr);
689 //createReplacer(X86::TEST64rr, X86::KTESTQrr);
693 createReplacer(X86::ADD8rr
, X86::KADDBrr
);
694 createReplacer(X86::ADD16rr
, X86::KADDWrr
);
696 createReplacer(X86::AND8rr
, X86::KANDBrr
);
698 createReplacer(X86::MOV8rm
, X86::KMOVBkm
);
699 createReplacer(X86::MOV8mr
, X86::KMOVBmk
);
700 createReplacer(X86::MOV8rr
, X86::KMOVBkk
);
702 createReplacer(X86::NOT8r
, X86::KNOTBrr
);
704 createReplacer(X86::OR8rr
, X86::KORBrr
);
706 createReplacer(X86::SHR8ri
, X86::KSHIFTRBri
);
707 createReplacer(X86::SHL8ri
, X86::KSHIFTLBri
);
709 // TODO: KTEST is not a replacement for TEST due to flag differences. Need
710 // to prove only Z flag is used.
711 //createReplacer(X86::TEST8rr, X86::KTESTBrr);
712 //createReplacer(X86::TEST16rr, X86::KTESTWrr);
714 createReplacer(X86::XOR8rr
, X86::KXORBrr
);
718 bool X86DomainReassignment::runOnMachineFunction(MachineFunction
&MF
) {
719 if (skipFunction(MF
.getFunction()))
721 if (DisableX86DomainReassignment
)
725 dbgs() << "***** Machine Function before Domain Reassignment *****\n");
726 LLVM_DEBUG(MF
.print(dbgs()));
728 STI
= &MF
.getSubtarget
<X86Subtarget
>();
729 // GPR->K is the only transformation currently supported, bail out early if no
731 // TODO: We're also bailing of AVX512BW isn't supported since we use VK32 and
732 // VK64 for GR32/GR64, but those aren't legal classes on KNL. If the register
733 // coalescer doesn't clean it up and we generate a spill we will crash.
734 if (!STI
->hasAVX512() || !STI
->hasBWI())
737 MRI
= &MF
.getRegInfo();
738 assert(MRI
->isSSA() && "Expected MIR to be in SSA form");
740 TII
= STI
->getInstrInfo();
742 bool Changed
= false;
744 EnclosedEdges
.clear();
745 EnclosedInstrs
.clear();
747 std::vector
<Closure
> Closures
;
749 // Go over all virtual registers and calculate a closure.
750 unsigned ClosureID
= 0;
751 for (unsigned Idx
= 0; Idx
< MRI
->getNumVirtRegs(); ++Idx
) {
752 Register Reg
= Register::index2VirtReg(Idx
);
754 // GPR only current source domain supported.
755 if (!isGPR(MRI
->getRegClass(Reg
)))
758 // Register already in closure.
759 if (EnclosedEdges
.count(Reg
))
762 // Calculate closure starting with Reg.
763 Closure
C(ClosureID
++, {MaskDomain
});
764 buildClosure(C
, Reg
);
766 // Collect all closures that can potentially be converted.
767 if (!C
.empty() && C
.isLegal(MaskDomain
))
768 Closures
.push_back(std::move(C
));
771 for (Closure
&C
: Closures
) {
772 LLVM_DEBUG(C
.dump(MRI
));
773 if (isReassignmentProfitable(C
, MaskDomain
)) {
774 reassign(C
, MaskDomain
);
775 ++NumClosuresConverted
;
781 dbgs() << "***** Machine Function after Domain Reassignment *****\n");
782 LLVM_DEBUG(MF
.print(dbgs()));
787 INITIALIZE_PASS(X86DomainReassignment
, "x86-domain-reassignment",
788 "X86 Domain Reassignment Pass", false, false)
790 /// Returns an instance of the Domain Reassignment pass.
791 FunctionPass
*llvm::createX86DomainReassignmentPass() {
792 return new X86DomainReassignment();