[X86] Preserve volatile ATOMIC_LOAD_OR nodes
[llvm-project.git] / llvm / lib / Target / X86 / X86DomainReassignment.cpp
blob4e31bbc4345c921452fbb2a7f6b92c51ff530206
1 //===--- X86DomainReassignment.cpp - Selectively switch register classes---===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass attempts to find instruction chains (closures) in one domain,
10 // and convert them to equivalent instructions in a different domain,
11 // if profitable.
13 //===----------------------------------------------------------------------===//
15 #include "X86.h"
16 #include "X86InstrInfo.h"
17 #include "X86Subtarget.h"
18 #include "llvm/ADT/BitVector.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/DenseMapInfo.h"
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineInstrBuilder.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/TargetRegisterInfo.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/Printable.h"
30 #include <bitset>
32 using namespace llvm;
34 #define DEBUG_TYPE "x86-domain-reassignment"
36 STATISTIC(NumClosuresConverted, "Number of closures converted by the pass");
38 static cl::opt<bool> DisableX86DomainReassignment(
39 "disable-x86-domain-reassignment", cl::Hidden,
40 cl::desc("X86: Disable Virtual Register Reassignment."), cl::init(false));
42 namespace {
43 enum RegDomain { NoDomain = -1, GPRDomain, MaskDomain, OtherDomain, NumDomains };
45 static bool isGPR(const TargetRegisterClass *RC) {
46 return X86::GR64RegClass.hasSubClassEq(RC) ||
47 X86::GR32RegClass.hasSubClassEq(RC) ||
48 X86::GR16RegClass.hasSubClassEq(RC) ||
49 X86::GR8RegClass.hasSubClassEq(RC);
52 static bool isMask(const TargetRegisterClass *RC,
53 const TargetRegisterInfo *TRI) {
54 return X86::VK16RegClass.hasSubClassEq(RC);
57 static RegDomain getDomain(const TargetRegisterClass *RC,
58 const TargetRegisterInfo *TRI) {
59 if (isGPR(RC))
60 return GPRDomain;
61 if (isMask(RC, TRI))
62 return MaskDomain;
63 return OtherDomain;
66 /// Return a register class equivalent to \p SrcRC, in \p Domain.
67 static const TargetRegisterClass *getDstRC(const TargetRegisterClass *SrcRC,
68 RegDomain Domain) {
69 assert(Domain == MaskDomain && "add domain");
70 if (X86::GR8RegClass.hasSubClassEq(SrcRC))
71 return &X86::VK8RegClass;
72 if (X86::GR16RegClass.hasSubClassEq(SrcRC))
73 return &X86::VK16RegClass;
74 if (X86::GR32RegClass.hasSubClassEq(SrcRC))
75 return &X86::VK32RegClass;
76 if (X86::GR64RegClass.hasSubClassEq(SrcRC))
77 return &X86::VK64RegClass;
78 llvm_unreachable("add register class");
79 return nullptr;
82 /// Abstract Instruction Converter class.
83 class InstrConverterBase {
84 protected:
85 unsigned SrcOpcode;
87 public:
88 InstrConverterBase(unsigned SrcOpcode) : SrcOpcode(SrcOpcode) {}
90 virtual ~InstrConverterBase() = default;
92 /// \returns true if \p MI is legal to convert.
93 virtual bool isLegal(const MachineInstr *MI,
94 const TargetInstrInfo *TII) const {
95 assert(MI->getOpcode() == SrcOpcode &&
96 "Wrong instruction passed to converter");
97 return true;
100 /// Applies conversion to \p MI.
102 /// \returns true if \p MI is no longer need, and can be deleted.
103 virtual bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
104 MachineRegisterInfo *MRI) const = 0;
106 /// \returns the cost increment incurred by converting \p MI.
107 virtual double getExtraCost(const MachineInstr *MI,
108 MachineRegisterInfo *MRI) const = 0;
111 /// An Instruction Converter which ignores the given instruction.
112 /// For example, PHI instructions can be safely ignored since only the registers
113 /// need to change.
114 class InstrIgnore : public InstrConverterBase {
115 public:
116 InstrIgnore(unsigned SrcOpcode) : InstrConverterBase(SrcOpcode) {}
118 bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
119 MachineRegisterInfo *MRI) const override {
120 assert(isLegal(MI, TII) && "Cannot convert instruction");
121 return false;
124 double getExtraCost(const MachineInstr *MI,
125 MachineRegisterInfo *MRI) const override {
126 return 0;
130 /// An Instruction Converter which replaces an instruction with another.
131 class InstrReplacer : public InstrConverterBase {
132 public:
133 /// Opcode of the destination instruction.
134 unsigned DstOpcode;
136 InstrReplacer(unsigned SrcOpcode, unsigned DstOpcode)
137 : InstrConverterBase(SrcOpcode), DstOpcode(DstOpcode) {}
139 bool isLegal(const MachineInstr *MI,
140 const TargetInstrInfo *TII) const override {
141 if (!InstrConverterBase::isLegal(MI, TII))
142 return false;
143 // It's illegal to replace an instruction that implicitly defines a register
144 // with an instruction that doesn't, unless that register dead.
145 for (const auto &MO : MI->implicit_operands())
146 if (MO.isReg() && MO.isDef() && !MO.isDead() &&
147 !TII->get(DstOpcode).hasImplicitDefOfPhysReg(MO.getReg()))
148 return false;
149 return true;
152 bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
153 MachineRegisterInfo *MRI) const override {
154 assert(isLegal(MI, TII) && "Cannot convert instruction");
155 MachineInstrBuilder Bld =
156 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(), TII->get(DstOpcode));
157 // Transfer explicit operands from original instruction. Implicit operands
158 // are handled by BuildMI.
159 for (auto &Op : MI->explicit_operands())
160 Bld.add(Op);
161 return true;
164 double getExtraCost(const MachineInstr *MI,
165 MachineRegisterInfo *MRI) const override {
166 // Assuming instructions have the same cost.
167 return 0;
171 /// An Instruction Converter which replaces an instruction with another, and
172 /// adds a COPY from the new instruction's destination to the old one's.
173 class InstrReplacerDstCOPY : public InstrConverterBase {
174 public:
175 unsigned DstOpcode;
177 InstrReplacerDstCOPY(unsigned SrcOpcode, unsigned DstOpcode)
178 : InstrConverterBase(SrcOpcode), DstOpcode(DstOpcode) {}
180 bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
181 MachineRegisterInfo *MRI) const override {
182 assert(isLegal(MI, TII) && "Cannot convert instruction");
183 MachineBasicBlock *MBB = MI->getParent();
184 const DebugLoc &DL = MI->getDebugLoc();
186 Register Reg = MRI->createVirtualRegister(
187 TII->getRegClass(TII->get(DstOpcode), 0, MRI->getTargetRegisterInfo(),
188 *MBB->getParent()));
189 MachineInstrBuilder Bld = BuildMI(*MBB, MI, DL, TII->get(DstOpcode), Reg);
190 for (const MachineOperand &MO : llvm::drop_begin(MI->operands()))
191 Bld.add(MO);
193 BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY))
194 .add(MI->getOperand(0))
195 .addReg(Reg);
197 return true;
200 double getExtraCost(const MachineInstr *MI,
201 MachineRegisterInfo *MRI) const override {
202 // Assuming instructions have the same cost, and that COPY is in the same
203 // domain so it will be eliminated.
204 return 0;
208 /// An Instruction Converter for replacing COPY instructions.
209 class InstrCOPYReplacer : public InstrReplacer {
210 public:
211 RegDomain DstDomain;
213 InstrCOPYReplacer(unsigned SrcOpcode, RegDomain DstDomain, unsigned DstOpcode)
214 : InstrReplacer(SrcOpcode, DstOpcode), DstDomain(DstDomain) {}
216 bool isLegal(const MachineInstr *MI,
217 const TargetInstrInfo *TII) const override {
218 if (!InstrConverterBase::isLegal(MI, TII))
219 return false;
221 // Don't allow copies to/flow GR8/GR16 physical registers.
222 // FIXME: Is there some better way to support this?
223 Register DstReg = MI->getOperand(0).getReg();
224 if (DstReg.isPhysical() && (X86::GR8RegClass.contains(DstReg) ||
225 X86::GR16RegClass.contains(DstReg)))
226 return false;
227 Register SrcReg = MI->getOperand(1).getReg();
228 if (SrcReg.isPhysical() && (X86::GR8RegClass.contains(SrcReg) ||
229 X86::GR16RegClass.contains(SrcReg)))
230 return false;
232 return true;
235 double getExtraCost(const MachineInstr *MI,
236 MachineRegisterInfo *MRI) const override {
237 assert(MI->getOpcode() == TargetOpcode::COPY && "Expected a COPY");
239 for (const auto &MO : MI->operands()) {
240 // Physical registers will not be converted. Assume that converting the
241 // COPY to the destination domain will eventually result in a actual
242 // instruction.
243 if (MO.getReg().isPhysical())
244 return 1;
246 RegDomain OpDomain = getDomain(MRI->getRegClass(MO.getReg()),
247 MRI->getTargetRegisterInfo());
248 // Converting a cross domain COPY to a same domain COPY should eliminate
249 // an insturction
250 if (OpDomain == DstDomain)
251 return -1;
253 return 0;
257 /// An Instruction Converter which replaces an instruction with a COPY.
258 class InstrReplaceWithCopy : public InstrConverterBase {
259 public:
260 // Source instruction operand Index, to be used as the COPY source.
261 unsigned SrcOpIdx;
263 InstrReplaceWithCopy(unsigned SrcOpcode, unsigned SrcOpIdx)
264 : InstrConverterBase(SrcOpcode), SrcOpIdx(SrcOpIdx) {}
266 bool convertInstr(MachineInstr *MI, const TargetInstrInfo *TII,
267 MachineRegisterInfo *MRI) const override {
268 assert(isLegal(MI, TII) && "Cannot convert instruction");
269 BuildMI(*MI->getParent(), MI, MI->getDebugLoc(),
270 TII->get(TargetOpcode::COPY))
271 .add({MI->getOperand(0), MI->getOperand(SrcOpIdx)});
272 return true;
275 double getExtraCost(const MachineInstr *MI,
276 MachineRegisterInfo *MRI) const override {
277 return 0;
281 // Key type to be used by the Instruction Converters map.
282 // A converter is identified by <destination domain, source opcode>
283 typedef std::pair<int, unsigned> InstrConverterBaseKeyTy;
285 typedef DenseMap<InstrConverterBaseKeyTy, std::unique_ptr<InstrConverterBase>>
286 InstrConverterBaseMap;
288 /// A closure is a set of virtual register representing all of the edges in
289 /// the closure, as well as all of the instructions connected by those edges.
291 /// A closure may encompass virtual registers in the same register bank that
292 /// have different widths. For example, it may contain 32-bit GPRs as well as
293 /// 64-bit GPRs.
295 /// A closure that computes an address (i.e. defines a virtual register that is
296 /// used in a memory operand) excludes the instructions that contain memory
297 /// operands using the address. Such an instruction will be included in a
298 /// different closure that manipulates the loaded or stored value.
299 class Closure {
300 private:
301 /// Virtual registers in the closure.
302 DenseSet<Register> Edges;
304 /// Instructions in the closure.
305 SmallVector<MachineInstr *, 8> Instrs;
307 /// Domains which this closure can legally be reassigned to.
308 std::bitset<NumDomains> LegalDstDomains;
310 /// An ID to uniquely identify this closure, even when it gets
311 /// moved around
312 unsigned ID;
314 public:
315 Closure(unsigned ID, std::initializer_list<RegDomain> LegalDstDomainList) : ID(ID) {
316 for (RegDomain D : LegalDstDomainList)
317 LegalDstDomains.set(D);
320 /// Mark this closure as illegal for reassignment to all domains.
321 void setAllIllegal() { LegalDstDomains.reset(); }
323 /// \returns true if this closure has domains which are legal to reassign to.
324 bool hasLegalDstDomain() const { return LegalDstDomains.any(); }
326 /// \returns true if is legal to reassign this closure to domain \p RD.
327 bool isLegal(RegDomain RD) const { return LegalDstDomains[RD]; }
329 /// Mark this closure as illegal for reassignment to domain \p RD.
330 void setIllegal(RegDomain RD) { LegalDstDomains[RD] = false; }
332 bool empty() const { return Edges.empty(); }
334 bool insertEdge(Register Reg) { return Edges.insert(Reg).second; }
336 using const_edge_iterator = DenseSet<Register>::const_iterator;
337 iterator_range<const_edge_iterator> edges() const {
338 return iterator_range<const_edge_iterator>(Edges.begin(), Edges.end());
341 void addInstruction(MachineInstr *I) {
342 Instrs.push_back(I);
345 ArrayRef<MachineInstr *> instructions() const {
346 return Instrs;
349 LLVM_DUMP_METHOD void dump(const MachineRegisterInfo *MRI) const {
350 dbgs() << "Registers: ";
351 bool First = true;
352 for (Register Reg : Edges) {
353 if (!First)
354 dbgs() << ", ";
355 First = false;
356 dbgs() << printReg(Reg, MRI->getTargetRegisterInfo(), 0, MRI);
358 dbgs() << "\n" << "Instructions:";
359 for (MachineInstr *MI : Instrs) {
360 dbgs() << "\n ";
361 MI->print(dbgs());
363 dbgs() << "\n";
366 unsigned getID() const {
367 return ID;
372 class X86DomainReassignment : public MachineFunctionPass {
373 const X86Subtarget *STI = nullptr;
374 MachineRegisterInfo *MRI = nullptr;
375 const X86InstrInfo *TII = nullptr;
377 /// All edges that are included in some closure
378 BitVector EnclosedEdges{8, false};
380 /// All instructions that are included in some closure.
381 DenseMap<MachineInstr *, unsigned> EnclosedInstrs;
383 public:
384 static char ID;
386 X86DomainReassignment() : MachineFunctionPass(ID) { }
388 bool runOnMachineFunction(MachineFunction &MF) override;
390 void getAnalysisUsage(AnalysisUsage &AU) const override {
391 AU.setPreservesCFG();
392 MachineFunctionPass::getAnalysisUsage(AU);
395 StringRef getPassName() const override {
396 return "X86 Domain Reassignment Pass";
399 private:
400 /// A map of available Instruction Converters.
401 InstrConverterBaseMap Converters;
403 /// Initialize Converters map.
404 void initConverters();
406 /// Starting from \Reg, expand the closure as much as possible.
407 void buildClosure(Closure &, Register Reg);
409 /// Enqueue \p Reg to be considered for addition to the closure.
410 void visitRegister(Closure &, Register Reg, RegDomain &Domain,
411 SmallVectorImpl<unsigned> &Worklist);
413 /// Reassign the closure to \p Domain.
414 void reassign(const Closure &C, RegDomain Domain) const;
416 /// Add \p MI to the closure.
417 void encloseInstr(Closure &C, MachineInstr *MI);
419 /// /returns true if it is profitable to reassign the closure to \p Domain.
420 bool isReassignmentProfitable(const Closure &C, RegDomain Domain) const;
422 /// Calculate the total cost of reassigning the closure to \p Domain.
423 double calculateCost(const Closure &C, RegDomain Domain) const;
426 char X86DomainReassignment::ID = 0;
428 } // End anonymous namespace.
430 void X86DomainReassignment::visitRegister(Closure &C, Register Reg,
431 RegDomain &Domain,
432 SmallVectorImpl<unsigned> &Worklist) {
433 if (!Reg.isVirtual())
434 return;
436 if (EnclosedEdges.test(Register::virtReg2Index(Reg)))
437 return;
439 if (!MRI->hasOneDef(Reg))
440 return;
442 RegDomain RD = getDomain(MRI->getRegClass(Reg), MRI->getTargetRegisterInfo());
443 // First edge in closure sets the domain.
444 if (Domain == NoDomain)
445 Domain = RD;
447 if (Domain != RD)
448 return;
450 Worklist.push_back(Reg);
453 void X86DomainReassignment::encloseInstr(Closure &C, MachineInstr *MI) {
454 auto I = EnclosedInstrs.find(MI);
455 if (I != EnclosedInstrs.end()) {
456 if (I->second != C.getID())
457 // Instruction already belongs to another closure, avoid conflicts between
458 // closure and mark this closure as illegal.
459 C.setAllIllegal();
460 return;
463 EnclosedInstrs[MI] = C.getID();
464 C.addInstruction(MI);
466 // Mark closure as illegal for reassignment to domains, if there is no
467 // converter for the instruction or if the converter cannot convert the
468 // instruction.
469 for (int i = 0; i != NumDomains; ++i) {
470 if (C.isLegal((RegDomain)i)) {
471 auto I = Converters.find({i, MI->getOpcode()});
472 if (I == Converters.end() || !I->second->isLegal(MI, TII))
473 C.setIllegal((RegDomain)i);
478 double X86DomainReassignment::calculateCost(const Closure &C,
479 RegDomain DstDomain) const {
480 assert(C.isLegal(DstDomain) && "Cannot calculate cost for illegal closure");
482 double Cost = 0.0;
483 for (auto *MI : C.instructions())
484 Cost += Converters.find({DstDomain, MI->getOpcode()})
485 ->second->getExtraCost(MI, MRI);
486 return Cost;
489 bool X86DomainReassignment::isReassignmentProfitable(const Closure &C,
490 RegDomain Domain) const {
491 return calculateCost(C, Domain) < 0.0;
494 void X86DomainReassignment::reassign(const Closure &C, RegDomain Domain) const {
495 assert(C.isLegal(Domain) && "Cannot convert illegal closure");
497 // Iterate all instructions in the closure, convert each one using the
498 // appropriate converter.
499 SmallVector<MachineInstr *, 8> ToErase;
500 for (auto *MI : C.instructions())
501 if (Converters.find({Domain, MI->getOpcode()})
502 ->second->convertInstr(MI, TII, MRI))
503 ToErase.push_back(MI);
505 // Iterate all registers in the closure, replace them with registers in the
506 // destination domain.
507 for (Register Reg : C.edges()) {
508 MRI->setRegClass(Reg, getDstRC(MRI->getRegClass(Reg), Domain));
509 for (auto &MO : MRI->use_operands(Reg)) {
510 if (MO.isReg())
511 // Remove all subregister references as they are not valid in the
512 // destination domain.
513 MO.setSubReg(0);
517 for (auto *MI : ToErase)
518 MI->eraseFromParent();
521 /// \returns true when \p Reg is used as part of an address calculation in \p
522 /// MI.
523 static bool usedAsAddr(const MachineInstr &MI, Register Reg,
524 const TargetInstrInfo *TII) {
525 if (!MI.mayLoadOrStore())
526 return false;
528 const MCInstrDesc &Desc = TII->get(MI.getOpcode());
529 int MemOpStart = X86II::getMemoryOperandNo(Desc.TSFlags);
530 if (MemOpStart == -1)
531 return false;
533 MemOpStart += X86II::getOperandBias(Desc);
534 for (unsigned MemOpIdx = MemOpStart,
535 MemOpEnd = MemOpStart + X86::AddrNumOperands;
536 MemOpIdx < MemOpEnd; ++MemOpIdx) {
537 const MachineOperand &Op = MI.getOperand(MemOpIdx);
538 if (Op.isReg() && Op.getReg() == Reg)
539 return true;
541 return false;
544 void X86DomainReassignment::buildClosure(Closure &C, Register Reg) {
545 SmallVector<unsigned, 4> Worklist;
546 RegDomain Domain = NoDomain;
547 visitRegister(C, Reg, Domain, Worklist);
548 while (!Worklist.empty()) {
549 unsigned CurReg = Worklist.pop_back_val();
551 // Register already in this closure.
552 if (!C.insertEdge(CurReg))
553 continue;
554 EnclosedEdges.set(Register::virtReg2Index(Reg));
556 MachineInstr *DefMI = MRI->getVRegDef(CurReg);
557 encloseInstr(C, DefMI);
559 // Add register used by the defining MI to the worklist.
560 // Do not add registers which are used in address calculation, they will be
561 // added to a different closure.
562 int OpEnd = DefMI->getNumOperands();
563 const MCInstrDesc &Desc = DefMI->getDesc();
564 int MemOp = X86II::getMemoryOperandNo(Desc.TSFlags);
565 if (MemOp != -1)
566 MemOp += X86II::getOperandBias(Desc);
567 for (int OpIdx = 0; OpIdx < OpEnd; ++OpIdx) {
568 if (OpIdx == MemOp) {
569 // skip address calculation.
570 OpIdx += (X86::AddrNumOperands - 1);
571 continue;
573 auto &Op = DefMI->getOperand(OpIdx);
574 if (!Op.isReg() || !Op.isUse())
575 continue;
576 visitRegister(C, Op.getReg(), Domain, Worklist);
579 // Expand closure through register uses.
580 for (auto &UseMI : MRI->use_nodbg_instructions(CurReg)) {
581 // We would like to avoid converting closures which calculare addresses,
582 // as this should remain in GPRs.
583 if (usedAsAddr(UseMI, CurReg, TII)) {
584 C.setAllIllegal();
585 continue;
587 encloseInstr(C, &UseMI);
589 for (auto &DefOp : UseMI.defs()) {
590 if (!DefOp.isReg())
591 continue;
593 Register DefReg = DefOp.getReg();
594 if (!DefReg.isVirtual()) {
595 C.setAllIllegal();
596 continue;
598 visitRegister(C, DefReg, Domain, Worklist);
604 void X86DomainReassignment::initConverters() {
605 Converters[{MaskDomain, TargetOpcode::PHI}] =
606 std::make_unique<InstrIgnore>(TargetOpcode::PHI);
608 Converters[{MaskDomain, TargetOpcode::IMPLICIT_DEF}] =
609 std::make_unique<InstrIgnore>(TargetOpcode::IMPLICIT_DEF);
611 Converters[{MaskDomain, TargetOpcode::INSERT_SUBREG}] =
612 std::make_unique<InstrReplaceWithCopy>(TargetOpcode::INSERT_SUBREG, 2);
614 Converters[{MaskDomain, TargetOpcode::COPY}] =
615 std::make_unique<InstrCOPYReplacer>(TargetOpcode::COPY, MaskDomain,
616 TargetOpcode::COPY);
618 auto createReplacerDstCOPY = [&](unsigned From, unsigned To) {
619 Converters[{MaskDomain, From}] =
620 std::make_unique<InstrReplacerDstCOPY>(From, To);
623 createReplacerDstCOPY(X86::MOVZX32rm16, X86::KMOVWkm);
624 createReplacerDstCOPY(X86::MOVZX64rm16, X86::KMOVWkm);
626 createReplacerDstCOPY(X86::MOVZX32rr16, X86::KMOVWkk);
627 createReplacerDstCOPY(X86::MOVZX64rr16, X86::KMOVWkk);
629 if (STI->hasDQI()) {
630 createReplacerDstCOPY(X86::MOVZX16rm8, X86::KMOVBkm);
631 createReplacerDstCOPY(X86::MOVZX32rm8, X86::KMOVBkm);
632 createReplacerDstCOPY(X86::MOVZX64rm8, X86::KMOVBkm);
634 createReplacerDstCOPY(X86::MOVZX16rr8, X86::KMOVBkk);
635 createReplacerDstCOPY(X86::MOVZX32rr8, X86::KMOVBkk);
636 createReplacerDstCOPY(X86::MOVZX64rr8, X86::KMOVBkk);
639 auto createReplacer = [&](unsigned From, unsigned To) {
640 Converters[{MaskDomain, From}] = std::make_unique<InstrReplacer>(From, To);
643 createReplacer(X86::MOV16rm, X86::KMOVWkm);
644 createReplacer(X86::MOV16mr, X86::KMOVWmk);
645 createReplacer(X86::MOV16rr, X86::KMOVWkk);
646 createReplacer(X86::SHR16ri, X86::KSHIFTRWri);
647 createReplacer(X86::SHL16ri, X86::KSHIFTLWri);
648 createReplacer(X86::NOT16r, X86::KNOTWrr);
649 createReplacer(X86::OR16rr, X86::KORWrr);
650 createReplacer(X86::AND16rr, X86::KANDWrr);
651 createReplacer(X86::XOR16rr, X86::KXORWrr);
653 if (STI->hasBWI()) {
654 createReplacer(X86::MOV32rm, X86::KMOVDkm);
655 createReplacer(X86::MOV64rm, X86::KMOVQkm);
657 createReplacer(X86::MOV32mr, X86::KMOVDmk);
658 createReplacer(X86::MOV64mr, X86::KMOVQmk);
660 createReplacer(X86::MOV32rr, X86::KMOVDkk);
661 createReplacer(X86::MOV64rr, X86::KMOVQkk);
663 createReplacer(X86::SHR32ri, X86::KSHIFTRDri);
664 createReplacer(X86::SHR64ri, X86::KSHIFTRQri);
666 createReplacer(X86::SHL32ri, X86::KSHIFTLDri);
667 createReplacer(X86::SHL64ri, X86::KSHIFTLQri);
669 createReplacer(X86::ADD32rr, X86::KADDDrr);
670 createReplacer(X86::ADD64rr, X86::KADDQrr);
672 createReplacer(X86::NOT32r, X86::KNOTDrr);
673 createReplacer(X86::NOT64r, X86::KNOTQrr);
675 createReplacer(X86::OR32rr, X86::KORDrr);
676 createReplacer(X86::OR64rr, X86::KORQrr);
678 createReplacer(X86::AND32rr, X86::KANDDrr);
679 createReplacer(X86::AND64rr, X86::KANDQrr);
681 createReplacer(X86::ANDN32rr, X86::KANDNDrr);
682 createReplacer(X86::ANDN64rr, X86::KANDNQrr);
684 createReplacer(X86::XOR32rr, X86::KXORDrr);
685 createReplacer(X86::XOR64rr, X86::KXORQrr);
687 // TODO: KTEST is not a replacement for TEST due to flag differences. Need
688 // to prove only Z flag is used.
689 //createReplacer(X86::TEST32rr, X86::KTESTDrr);
690 //createReplacer(X86::TEST64rr, X86::KTESTQrr);
693 if (STI->hasDQI()) {
694 createReplacer(X86::ADD8rr, X86::KADDBrr);
695 createReplacer(X86::ADD16rr, X86::KADDWrr);
697 createReplacer(X86::AND8rr, X86::KANDBrr);
699 createReplacer(X86::MOV8rm, X86::KMOVBkm);
700 createReplacer(X86::MOV8mr, X86::KMOVBmk);
701 createReplacer(X86::MOV8rr, X86::KMOVBkk);
703 createReplacer(X86::NOT8r, X86::KNOTBrr);
705 createReplacer(X86::OR8rr, X86::KORBrr);
707 createReplacer(X86::SHR8ri, X86::KSHIFTRBri);
708 createReplacer(X86::SHL8ri, X86::KSHIFTLBri);
710 // TODO: KTEST is not a replacement for TEST due to flag differences. Need
711 // to prove only Z flag is used.
712 //createReplacer(X86::TEST8rr, X86::KTESTBrr);
713 //createReplacer(X86::TEST16rr, X86::KTESTWrr);
715 createReplacer(X86::XOR8rr, X86::KXORBrr);
719 bool X86DomainReassignment::runOnMachineFunction(MachineFunction &MF) {
720 if (skipFunction(MF.getFunction()))
721 return false;
722 if (DisableX86DomainReassignment)
723 return false;
725 LLVM_DEBUG(
726 dbgs() << "***** Machine Function before Domain Reassignment *****\n");
727 LLVM_DEBUG(MF.print(dbgs()));
729 STI = &MF.getSubtarget<X86Subtarget>();
730 // GPR->K is the only transformation currently supported, bail out early if no
731 // AVX512.
732 // TODO: We're also bailing of AVX512BW isn't supported since we use VK32 and
733 // VK64 for GR32/GR64, but those aren't legal classes on KNL. If the register
734 // coalescer doesn't clean it up and we generate a spill we will crash.
735 if (!STI->hasAVX512() || !STI->hasBWI())
736 return false;
738 MRI = &MF.getRegInfo();
739 assert(MRI->isSSA() && "Expected MIR to be in SSA form");
741 TII = STI->getInstrInfo();
742 initConverters();
743 bool Changed = false;
745 EnclosedEdges.clear();
746 EnclosedEdges.resize(MRI->getNumVirtRegs());
747 EnclosedInstrs.clear();
749 std::vector<Closure> Closures;
751 // Go over all virtual registers and calculate a closure.
752 unsigned ClosureID = 0;
753 for (unsigned Idx = 0; Idx < MRI->getNumVirtRegs(); ++Idx) {
754 Register Reg = Register::index2VirtReg(Idx);
756 // GPR only current source domain supported.
757 if (!isGPR(MRI->getRegClass(Reg)))
758 continue;
760 // Register already in closure.
761 if (EnclosedEdges.test(Idx))
762 continue;
764 // Calculate closure starting with Reg.
765 Closure C(ClosureID++, {MaskDomain});
766 buildClosure(C, Reg);
768 // Collect all closures that can potentially be converted.
769 if (!C.empty() && C.isLegal(MaskDomain))
770 Closures.push_back(std::move(C));
773 for (Closure &C : Closures) {
774 LLVM_DEBUG(C.dump(MRI));
775 if (isReassignmentProfitable(C, MaskDomain)) {
776 reassign(C, MaskDomain);
777 ++NumClosuresConverted;
778 Changed = true;
782 LLVM_DEBUG(
783 dbgs() << "***** Machine Function after Domain Reassignment *****\n");
784 LLVM_DEBUG(MF.print(dbgs()));
786 return Changed;
789 INITIALIZE_PASS(X86DomainReassignment, "x86-domain-reassignment",
790 "X86 Domain Reassignment Pass", false, false)
792 /// Returns an instance of the Domain Reassignment pass.
793 FunctionPass *llvm::createX86DomainReassignmentPass() {
794 return new X86DomainReassignment();