1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the SystemZ implementation of the TargetInstrInfo class.
12 //===----------------------------------------------------------------------===//
14 #include "SystemZInstrInfo.h"
15 #include "MCTargetDesc/SystemZMCTargetDesc.h"
17 #include "SystemZInstrBuilder.h"
18 #include "SystemZSubtarget.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/CodeGen/LiveInterval.h"
21 #include "llvm/CodeGen/LiveIntervals.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineBasicBlock.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstr.h"
27 #include "llvm/CodeGen/MachineMemOperand.h"
28 #include "llvm/CodeGen/MachineOperand.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/SlotIndexes.h"
31 #include "llvm/CodeGen/TargetInstrInfo.h"
32 #include "llvm/CodeGen/TargetSubtargetInfo.h"
33 #include "llvm/MC/MCInstrDesc.h"
34 #include "llvm/MC/MCRegisterInfo.h"
35 #include "llvm/Support/BranchProbability.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Target/TargetMachine.h"
45 #define GET_INSTRINFO_CTOR_DTOR
46 #define GET_INSTRMAP_INFO
47 #include "SystemZGenInstrInfo.inc"
49 #define DEBUG_TYPE "systemz-II"
50 STATISTIC(LOCRMuxJumps
, "Number of LOCRMux jump-sequences (lower is better)");
52 // Return a mask with Count low bits set.
53 static uint64_t allOnes(unsigned int Count
) {
54 return Count
== 0 ? 0 : (uint64_t(1) << (Count
- 1) << 1) - 1;
57 // Reg should be a 32-bit GPR. Return true if it is a high register rather
58 // than a low register.
59 static bool isHighReg(unsigned int Reg
) {
60 if (SystemZ::GRH32BitRegClass
.contains(Reg
))
62 assert(SystemZ::GR32BitRegClass
.contains(Reg
) && "Invalid GRX32");
66 // Pin the vtable to this file.
67 void SystemZInstrInfo::anchor() {}
69 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget
&sti
)
70 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN
, SystemZ::ADJCALLSTACKUP
),
74 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
75 // each having the opcode given by NewOpcode.
76 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI
,
77 unsigned NewOpcode
) const {
78 MachineBasicBlock
*MBB
= MI
->getParent();
79 MachineFunction
&MF
= *MBB
->getParent();
81 // Get two load or store instructions. Use the original instruction for one
82 // of them (arbitrarily the second here) and create a clone for the other.
83 MachineInstr
*EarlierMI
= MF
.CloneMachineInstr(&*MI
);
84 MBB
->insert(MI
, EarlierMI
);
86 // Set up the two 64-bit registers and remember super reg and its flags.
87 MachineOperand
&HighRegOp
= EarlierMI
->getOperand(0);
88 MachineOperand
&LowRegOp
= MI
->getOperand(0);
89 unsigned Reg128
= LowRegOp
.getReg();
90 unsigned Reg128Killed
= getKillRegState(LowRegOp
.isKill());
91 unsigned Reg128Undef
= getUndefRegState(LowRegOp
.isUndef());
92 HighRegOp
.setReg(RI
.getSubReg(HighRegOp
.getReg(), SystemZ::subreg_h64
));
93 LowRegOp
.setReg(RI
.getSubReg(LowRegOp
.getReg(), SystemZ::subreg_l64
));
96 // Add implicit uses of the super register in case one of the subregs is
97 // undefined. We could track liveness and skip storing an undefined
98 // subreg, but this is hopefully rare (discovered with llvm-stress).
99 // If Reg128 was killed, set kill flag on MI.
100 unsigned Reg128UndefImpl
= (Reg128Undef
| RegState::Implicit
);
101 MachineInstrBuilder(MF
, EarlierMI
).addReg(Reg128
, Reg128UndefImpl
);
102 MachineInstrBuilder(MF
, MI
).addReg(Reg128
, (Reg128UndefImpl
| Reg128Killed
));
105 // The address in the first (high) instruction is already correct.
106 // Adjust the offset in the second (low) instruction.
107 MachineOperand
&HighOffsetOp
= EarlierMI
->getOperand(2);
108 MachineOperand
&LowOffsetOp
= MI
->getOperand(2);
109 LowOffsetOp
.setImm(LowOffsetOp
.getImm() + 8);
111 // Clear the kill flags on the registers in the first instruction.
112 if (EarlierMI
->getOperand(0).isReg() && EarlierMI
->getOperand(0).isUse())
113 EarlierMI
->getOperand(0).setIsKill(false);
114 EarlierMI
->getOperand(1).setIsKill(false);
115 EarlierMI
->getOperand(3).setIsKill(false);
118 unsigned HighOpcode
= getOpcodeForOffset(NewOpcode
, HighOffsetOp
.getImm());
119 unsigned LowOpcode
= getOpcodeForOffset(NewOpcode
, LowOffsetOp
.getImm());
120 assert(HighOpcode
&& LowOpcode
&& "Both offsets should be in range");
122 EarlierMI
->setDesc(get(HighOpcode
));
123 MI
->setDesc(get(LowOpcode
));
126 // Split ADJDYNALLOC instruction MI.
127 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI
) const {
128 MachineBasicBlock
*MBB
= MI
->getParent();
129 MachineFunction
&MF
= *MBB
->getParent();
130 MachineFrameInfo
&MFFrame
= MF
.getFrameInfo();
131 MachineOperand
&OffsetMO
= MI
->getOperand(2);
133 uint64_t Offset
= (MFFrame
.getMaxCallFrameSize() +
134 SystemZMC::CallFrameSize
+
136 unsigned NewOpcode
= getOpcodeForOffset(SystemZ::LA
, Offset
);
137 assert(NewOpcode
&& "No support for huge argument lists yet");
138 MI
->setDesc(get(NewOpcode
));
139 OffsetMO
.setImm(Offset
);
142 // MI is an RI-style pseudo instruction. Replace it with LowOpcode
143 // if the first operand is a low GR32 and HighOpcode if the first operand
144 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
145 // and HighOpcode takes an unsigned 32-bit operand. In those cases,
146 // MI has the same kind of operand as LowOpcode, so needs to be converted
147 // if HighOpcode is used.
148 void SystemZInstrInfo::expandRIPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
150 bool ConvertHigh
) const {
151 unsigned Reg
= MI
.getOperand(0).getReg();
152 bool IsHigh
= isHighReg(Reg
);
153 MI
.setDesc(get(IsHigh
? HighOpcode
: LowOpcode
));
154 if (IsHigh
&& ConvertHigh
)
155 MI
.getOperand(1).setImm(uint32_t(MI
.getOperand(1).getImm()));
158 // MI is a three-operand RIE-style pseudo instruction. Replace it with
159 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
160 // followed by HighOpcode or LowOpcode, depending on whether the target
161 // is a high or low GR32.
162 void SystemZInstrInfo::expandRIEPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
164 unsigned HighOpcode
) const {
165 unsigned DestReg
= MI
.getOperand(0).getReg();
166 unsigned SrcReg
= MI
.getOperand(1).getReg();
167 bool DestIsHigh
= isHighReg(DestReg
);
168 bool SrcIsHigh
= isHighReg(SrcReg
);
169 if (!DestIsHigh
&& !SrcIsHigh
)
170 MI
.setDesc(get(LowOpcodeK
));
172 emitGRX32Move(*MI
.getParent(), MI
, MI
.getDebugLoc(), DestReg
, SrcReg
,
173 SystemZ::LR
, 32, MI
.getOperand(1).isKill(),
174 MI
.getOperand(1).isUndef());
175 MI
.setDesc(get(DestIsHigh
? HighOpcode
: LowOpcode
));
176 MI
.getOperand(1).setReg(DestReg
);
177 MI
.tieOperands(0, 1);
181 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode
182 // if the first operand is a low GR32 and HighOpcode if the first operand
184 void SystemZInstrInfo::expandRXYPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
185 unsigned HighOpcode
) const {
186 unsigned Reg
= MI
.getOperand(0).getReg();
187 unsigned Opcode
= getOpcodeForOffset(isHighReg(Reg
) ? HighOpcode
: LowOpcode
,
188 MI
.getOperand(2).getImm());
189 MI
.setDesc(get(Opcode
));
192 // MI is a load-on-condition pseudo instruction with a single register
193 // (source or destination) operand. Replace it with LowOpcode if the
194 // register is a low GR32 and HighOpcode if the register is a high GR32.
195 void SystemZInstrInfo::expandLOCPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
196 unsigned HighOpcode
) const {
197 unsigned Reg
= MI
.getOperand(0).getReg();
198 unsigned Opcode
= isHighReg(Reg
) ? HighOpcode
: LowOpcode
;
199 MI
.setDesc(get(Opcode
));
202 // MI is a load-register-on-condition pseudo instruction. Replace it with
203 // LowOpcode if source and destination are both low GR32s and HighOpcode if
204 // source and destination are both high GR32s.
205 void SystemZInstrInfo::expandLOCRPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
206 unsigned HighOpcode
) const {
207 unsigned DestReg
= MI
.getOperand(0).getReg();
208 unsigned SrcReg
= MI
.getOperand(2).getReg();
209 bool DestIsHigh
= isHighReg(DestReg
);
210 bool SrcIsHigh
= isHighReg(SrcReg
);
212 if (!DestIsHigh
&& !SrcIsHigh
)
213 MI
.setDesc(get(LowOpcode
));
214 else if (DestIsHigh
&& SrcIsHigh
)
215 MI
.setDesc(get(HighOpcode
));
219 // If we were unable to implement the pseudo with a single instruction, we
220 // need to convert it back into a branch sequence. This cannot be done here
221 // since the caller of expandPostRAPseudo does not handle changes to the CFG
222 // correctly. This change is defered to the SystemZExpandPseudo pass.
225 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
226 // of one GRX32 into another. Replace it with LowOpcode if both operands
227 // are low registers, otherwise use RISB[LH]G.
228 void SystemZInstrInfo::expandZExtPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
229 unsigned Size
) const {
230 MachineInstrBuilder MIB
=
231 emitGRX32Move(*MI
.getParent(), MI
, MI
.getDebugLoc(),
232 MI
.getOperand(0).getReg(), MI
.getOperand(1).getReg(), LowOpcode
,
233 Size
, MI
.getOperand(1).isKill(), MI
.getOperand(1).isUndef());
235 // Keep the remaining operands as-is.
236 for (unsigned I
= 2; I
< MI
.getNumOperands(); ++I
)
237 MIB
.add(MI
.getOperand(I
));
239 MI
.eraseFromParent();
242 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr
*MI
) const {
243 MachineBasicBlock
*MBB
= MI
->getParent();
244 MachineFunction
&MF
= *MBB
->getParent();
245 const unsigned Reg64
= MI
->getOperand(0).getReg();
246 const unsigned Reg32
= RI
.getSubReg(Reg64
, SystemZ::subreg_l32
);
248 // EAR can only load the low subregister so us a shift for %a0 to produce
249 // the GR containing %a0 and %a1.
252 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::EAR
), Reg32
)
254 .addReg(Reg64
, RegState::ImplicitDefine
);
256 // sllg <reg>, <reg>, 32
257 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::SLLG
), Reg64
)
263 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::EAR
), Reg32
)
264 .addReg(SystemZ::A1
);
266 // lg <reg>, 40(<reg>)
267 MI
->setDesc(get(SystemZ::LG
));
268 MachineInstrBuilder(MF
, MI
).addReg(Reg64
).addImm(40).addReg(0);
271 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
272 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
273 // are low registers, otherwise use RISB[LH]G. Size is the number of bits
274 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
275 // KillSrc is true if this move is the last use of SrcReg.
277 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock
&MBB
,
278 MachineBasicBlock::iterator MBBI
,
279 const DebugLoc
&DL
, unsigned DestReg
,
280 unsigned SrcReg
, unsigned LowLowOpcode
,
281 unsigned Size
, bool KillSrc
,
282 bool UndefSrc
) const {
284 bool DestIsHigh
= isHighReg(DestReg
);
285 bool SrcIsHigh
= isHighReg(SrcReg
);
286 if (DestIsHigh
&& SrcIsHigh
)
287 Opcode
= SystemZ::RISBHH
;
288 else if (DestIsHigh
&& !SrcIsHigh
)
289 Opcode
= SystemZ::RISBHL
;
290 else if (!DestIsHigh
&& SrcIsHigh
)
291 Opcode
= SystemZ::RISBLH
;
293 return BuildMI(MBB
, MBBI
, DL
, get(LowLowOpcode
), DestReg
)
294 .addReg(SrcReg
, getKillRegState(KillSrc
) | getUndefRegState(UndefSrc
));
296 unsigned Rotate
= (DestIsHigh
!= SrcIsHigh
? 32 : 0);
297 return BuildMI(MBB
, MBBI
, DL
, get(Opcode
), DestReg
)
298 .addReg(DestReg
, RegState::Undef
)
299 .addReg(SrcReg
, getKillRegState(KillSrc
) | getUndefRegState(UndefSrc
))
300 .addImm(32 - Size
).addImm(128 + 31).addImm(Rotate
);
303 MachineInstr
*SystemZInstrInfo::commuteInstructionImpl(MachineInstr
&MI
,
306 unsigned OpIdx2
) const {
307 auto cloneIfNew
= [NewMI
](MachineInstr
&MI
) -> MachineInstr
& {
309 return *MI
.getParent()->getParent()->CloneMachineInstr(&MI
);
313 switch (MI
.getOpcode()) {
314 case SystemZ::LOCRMux
:
315 case SystemZ::LOCFHR
:
317 case SystemZ::LOCGR
: {
318 auto &WorkingMI
= cloneIfNew(MI
);
320 unsigned CCValid
= WorkingMI
.getOperand(3).getImm();
321 unsigned CCMask
= WorkingMI
.getOperand(4).getImm();
322 WorkingMI
.getOperand(4).setImm(CCMask
^ CCValid
);
323 return TargetInstrInfo::commuteInstructionImpl(WorkingMI
, /*NewMI=*/false,
327 return TargetInstrInfo::commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
331 // If MI is a simple load or store for a frame object, return the register
332 // it loads or stores and set FrameIndex to the index of the frame object.
333 // Return 0 otherwise.
335 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
336 static int isSimpleMove(const MachineInstr
&MI
, int &FrameIndex
,
338 const MCInstrDesc
&MCID
= MI
.getDesc();
339 if ((MCID
.TSFlags
& Flag
) && MI
.getOperand(1).isFI() &&
340 MI
.getOperand(2).getImm() == 0 && MI
.getOperand(3).getReg() == 0) {
341 FrameIndex
= MI
.getOperand(1).getIndex();
342 return MI
.getOperand(0).getReg();
347 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr
&MI
,
348 int &FrameIndex
) const {
349 return isSimpleMove(MI
, FrameIndex
, SystemZII::SimpleBDXLoad
);
352 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr
&MI
,
353 int &FrameIndex
) const {
354 return isSimpleMove(MI
, FrameIndex
, SystemZII::SimpleBDXStore
);
357 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr
&MI
,
359 int &SrcFrameIndex
) const {
360 // Check for MVC 0(Length,FI1),0(FI2)
361 const MachineFrameInfo
&MFI
= MI
.getParent()->getParent()->getFrameInfo();
362 if (MI
.getOpcode() != SystemZ::MVC
|| !MI
.getOperand(0).isFI() ||
363 MI
.getOperand(1).getImm() != 0 || !MI
.getOperand(3).isFI() ||
364 MI
.getOperand(4).getImm() != 0)
367 // Check that Length covers the full slots.
368 int64_t Length
= MI
.getOperand(2).getImm();
369 unsigned FI1
= MI
.getOperand(0).getIndex();
370 unsigned FI2
= MI
.getOperand(3).getIndex();
371 if (MFI
.getObjectSize(FI1
) != Length
||
372 MFI
.getObjectSize(FI2
) != Length
)
375 DestFrameIndex
= FI1
;
380 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock
&MBB
,
381 MachineBasicBlock
*&TBB
,
382 MachineBasicBlock
*&FBB
,
383 SmallVectorImpl
<MachineOperand
> &Cond
,
384 bool AllowModify
) const {
385 // Most of the code and comments here are boilerplate.
387 // Start from the bottom of the block and work up, examining the
388 // terminator instructions.
389 MachineBasicBlock::iterator I
= MBB
.end();
390 while (I
!= MBB
.begin()) {
392 if (I
->isDebugInstr())
395 // Working from the bottom, when we see a non-terminator instruction, we're
397 if (!isUnpredicatedTerminator(*I
))
400 // A terminator that isn't a branch can't easily be handled by this
405 // Can't handle indirect branches.
406 SystemZII::Branch
Branch(getBranchInfo(*I
));
407 if (!Branch
.Target
->isMBB())
410 // Punt on compound branches.
411 if (Branch
.Type
!= SystemZII::BranchNormal
)
414 if (Branch
.CCMask
== SystemZ::CCMASK_ANY
) {
415 // Handle unconditional branches.
417 TBB
= Branch
.Target
->getMBB();
421 // If the block has any instructions after a JMP, delete them.
422 while (std::next(I
) != MBB
.end())
423 std::next(I
)->eraseFromParent();
428 // Delete the JMP if it's equivalent to a fall-through.
429 if (MBB
.isLayoutSuccessor(Branch
.Target
->getMBB())) {
431 I
->eraseFromParent();
436 // TBB is used to indicate the unconditinal destination.
437 TBB
= Branch
.Target
->getMBB();
441 // Working from the bottom, handle the first conditional branch.
443 // FIXME: add X86-style branch swap
445 TBB
= Branch
.Target
->getMBB();
446 Cond
.push_back(MachineOperand::CreateImm(Branch
.CCValid
));
447 Cond
.push_back(MachineOperand::CreateImm(Branch
.CCMask
));
451 // Handle subsequent conditional branches.
452 assert(Cond
.size() == 2 && TBB
&& "Should have seen a conditional branch");
454 // Only handle the case where all conditional branches branch to the same
456 if (TBB
!= Branch
.Target
->getMBB())
459 // If the conditions are the same, we can leave them alone.
460 unsigned OldCCValid
= Cond
[0].getImm();
461 unsigned OldCCMask
= Cond
[1].getImm();
462 if (OldCCValid
== Branch
.CCValid
&& OldCCMask
== Branch
.CCMask
)
465 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
472 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock
&MBB
,
473 int *BytesRemoved
) const {
474 assert(!BytesRemoved
&& "code size not handled");
476 // Most of the code and comments here are boilerplate.
477 MachineBasicBlock::iterator I
= MBB
.end();
480 while (I
!= MBB
.begin()) {
482 if (I
->isDebugInstr())
486 if (!getBranchInfo(*I
).Target
->isMBB())
488 // Remove the branch.
489 I
->eraseFromParent();
497 bool SystemZInstrInfo::
498 reverseBranchCondition(SmallVectorImpl
<MachineOperand
> &Cond
) const {
499 assert(Cond
.size() == 2 && "Invalid condition");
500 Cond
[1].setImm(Cond
[1].getImm() ^ Cond
[0].getImm());
504 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock
&MBB
,
505 MachineBasicBlock
*TBB
,
506 MachineBasicBlock
*FBB
,
507 ArrayRef
<MachineOperand
> Cond
,
509 int *BytesAdded
) const {
510 // In this function we output 32-bit branches, which should always
511 // have enough range. They can be shortened and relaxed by later code
512 // in the pipeline, if desired.
514 // Shouldn't be a fall through.
515 assert(TBB
&& "insertBranch must not be told to insert a fallthrough");
516 assert((Cond
.size() == 2 || Cond
.size() == 0) &&
517 "SystemZ branch conditions have one component!");
518 assert(!BytesAdded
&& "code size not handled");
521 // Unconditional branch?
522 assert(!FBB
&& "Unconditional branch with multiple successors!");
523 BuildMI(&MBB
, DL
, get(SystemZ::J
)).addMBB(TBB
);
527 // Conditional branch.
529 unsigned CCValid
= Cond
[0].getImm();
530 unsigned CCMask
= Cond
[1].getImm();
531 BuildMI(&MBB
, DL
, get(SystemZ::BRC
))
532 .addImm(CCValid
).addImm(CCMask
).addMBB(TBB
);
536 // Two-way Conditional branch. Insert the second branch.
537 BuildMI(&MBB
, DL
, get(SystemZ::J
)).addMBB(FBB
);
543 bool SystemZInstrInfo::analyzeCompare(const MachineInstr
&MI
, unsigned &SrcReg
,
544 unsigned &SrcReg2
, int &Mask
,
546 assert(MI
.isCompare() && "Caller should have checked for a comparison");
548 if (MI
.getNumExplicitOperands() == 2 && MI
.getOperand(0).isReg() &&
549 MI
.getOperand(1).isImm()) {
550 SrcReg
= MI
.getOperand(0).getReg();
552 Value
= MI
.getOperand(1).getImm();
560 // If Reg is a virtual register, return its definition, otherwise return null.
561 static MachineInstr
*getDef(unsigned Reg
,
562 const MachineRegisterInfo
*MRI
) {
563 if (TargetRegisterInfo::isPhysicalRegister(Reg
))
565 return MRI
->getUniqueVRegDef(Reg
);
568 // Return true if MI is a shift of type Opcode by Imm bits.
569 static bool isShift(MachineInstr
*MI
, unsigned Opcode
, int64_t Imm
) {
570 return (MI
->getOpcode() == Opcode
&&
571 !MI
->getOperand(2).getReg() &&
572 MI
->getOperand(3).getImm() == Imm
);
575 // If the destination of MI has no uses, delete it as dead.
576 static void eraseIfDead(MachineInstr
*MI
, const MachineRegisterInfo
*MRI
) {
577 if (MRI
->use_nodbg_empty(MI
->getOperand(0).getReg()))
578 MI
->eraseFromParent();
581 // Compare compares SrcReg against zero. Check whether SrcReg contains
582 // the result of an IPM sequence whose input CC survives until Compare,
583 // and whether Compare is therefore redundant. Delete it and return
585 static bool removeIPMBasedCompare(MachineInstr
&Compare
, unsigned SrcReg
,
586 const MachineRegisterInfo
*MRI
,
587 const TargetRegisterInfo
*TRI
) {
588 MachineInstr
*LGFR
= nullptr;
589 MachineInstr
*RLL
= getDef(SrcReg
, MRI
);
590 if (RLL
&& RLL
->getOpcode() == SystemZ::LGFR
) {
592 RLL
= getDef(LGFR
->getOperand(1).getReg(), MRI
);
594 if (!RLL
|| !isShift(RLL
, SystemZ::RLL
, 31))
597 MachineInstr
*SRL
= getDef(RLL
->getOperand(1).getReg(), MRI
);
598 if (!SRL
|| !isShift(SRL
, SystemZ::SRL
, SystemZ::IPM_CC
))
601 MachineInstr
*IPM
= getDef(SRL
->getOperand(1).getReg(), MRI
);
602 if (!IPM
|| IPM
->getOpcode() != SystemZ::IPM
)
605 // Check that there are no assignments to CC between the IPM and Compare,
606 if (IPM
->getParent() != Compare
.getParent())
608 MachineBasicBlock::iterator MBBI
= IPM
, MBBE
= Compare
.getIterator();
609 for (++MBBI
; MBBI
!= MBBE
; ++MBBI
) {
610 MachineInstr
&MI
= *MBBI
;
611 if (MI
.modifiesRegister(SystemZ::CC
, TRI
))
615 Compare
.eraseFromParent();
617 eraseIfDead(LGFR
, MRI
);
618 eraseIfDead(RLL
, MRI
);
619 eraseIfDead(SRL
, MRI
);
620 eraseIfDead(IPM
, MRI
);
625 bool SystemZInstrInfo::optimizeCompareInstr(
626 MachineInstr
&Compare
, unsigned SrcReg
, unsigned SrcReg2
, int Mask
,
627 int Value
, const MachineRegisterInfo
*MRI
) const {
628 assert(!SrcReg2
&& "Only optimizing constant comparisons so far");
629 bool IsLogical
= (Compare
.getDesc().TSFlags
& SystemZII::IsLogical
) != 0;
630 return Value
== 0 && !IsLogical
&&
631 removeIPMBasedCompare(Compare
, SrcReg
, MRI
, &RI
);
634 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock
&MBB
,
635 ArrayRef
<MachineOperand
> Pred
,
636 unsigned TrueReg
, unsigned FalseReg
,
637 int &CondCycles
, int &TrueCycles
,
638 int &FalseCycles
) const {
639 // Not all subtargets have LOCR instructions.
640 if (!STI
.hasLoadStoreOnCond())
642 if (Pred
.size() != 2)
645 // Check register classes.
646 const MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
647 const TargetRegisterClass
*RC
=
648 RI
.getCommonSubClass(MRI
.getRegClass(TrueReg
), MRI
.getRegClass(FalseReg
));
652 // We have LOCR instructions for 32 and 64 bit general purpose registers.
653 if ((STI
.hasLoadStoreOnCond2() &&
654 SystemZ::GRX32BitRegClass
.hasSubClassEq(RC
)) ||
655 SystemZ::GR32BitRegClass
.hasSubClassEq(RC
) ||
656 SystemZ::GR64BitRegClass
.hasSubClassEq(RC
)) {
663 // Can't do anything else.
667 void SystemZInstrInfo::insertSelect(MachineBasicBlock
&MBB
,
668 MachineBasicBlock::iterator I
,
669 const DebugLoc
&DL
, unsigned DstReg
,
670 ArrayRef
<MachineOperand
> Pred
,
672 unsigned FalseReg
) const {
673 MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
674 const TargetRegisterClass
*RC
= MRI
.getRegClass(DstReg
);
676 assert(Pred
.size() == 2 && "Invalid condition");
677 unsigned CCValid
= Pred
[0].getImm();
678 unsigned CCMask
= Pred
[1].getImm();
681 if (SystemZ::GRX32BitRegClass
.hasSubClassEq(RC
)) {
682 if (STI
.hasLoadStoreOnCond2())
683 Opc
= SystemZ::LOCRMux
;
686 MRI
.constrainRegClass(DstReg
, &SystemZ::GR32BitRegClass
);
687 unsigned TReg
= MRI
.createVirtualRegister(&SystemZ::GR32BitRegClass
);
688 unsigned FReg
= MRI
.createVirtualRegister(&SystemZ::GR32BitRegClass
);
689 BuildMI(MBB
, I
, DL
, get(TargetOpcode::COPY
), TReg
).addReg(TrueReg
);
690 BuildMI(MBB
, I
, DL
, get(TargetOpcode::COPY
), FReg
).addReg(FalseReg
);
694 } else if (SystemZ::GR64BitRegClass
.hasSubClassEq(RC
))
695 Opc
= SystemZ::LOCGR
;
697 llvm_unreachable("Invalid register class");
699 BuildMI(MBB
, I
, DL
, get(Opc
), DstReg
)
700 .addReg(FalseReg
).addReg(TrueReg
)
701 .addImm(CCValid
).addImm(CCMask
);
704 bool SystemZInstrInfo::FoldImmediate(MachineInstr
&UseMI
, MachineInstr
&DefMI
,
706 MachineRegisterInfo
*MRI
) const {
707 unsigned DefOpc
= DefMI
.getOpcode();
708 if (DefOpc
!= SystemZ::LHIMux
&& DefOpc
!= SystemZ::LHI
&&
709 DefOpc
!= SystemZ::LGHI
)
711 if (DefMI
.getOperand(0).getReg() != Reg
)
713 int32_t ImmVal
= (int32_t)DefMI
.getOperand(1).getImm();
715 unsigned UseOpc
= UseMI
.getOpcode();
720 case SystemZ::LOCRMux
:
721 if (!STI
.hasLoadStoreOnCond2())
723 NewUseOpc
= SystemZ::LOCHIMux
;
724 if (UseMI
.getOperand(2).getReg() == Reg
)
726 else if (UseMI
.getOperand(1).getReg() == Reg
)
727 UseIdx
= 2, CommuteIdx
= 1;
732 if (!STI
.hasLoadStoreOnCond2())
734 NewUseOpc
= SystemZ::LOCGHI
;
735 if (UseMI
.getOperand(2).getReg() == Reg
)
737 else if (UseMI
.getOperand(1).getReg() == Reg
)
738 UseIdx
= 2, CommuteIdx
= 1;
746 if (CommuteIdx
!= -1)
747 if (!commuteInstruction(UseMI
, false, CommuteIdx
, UseIdx
))
750 bool DeleteDef
= MRI
->hasOneNonDBGUse(Reg
);
751 UseMI
.setDesc(get(NewUseOpc
));
752 UseMI
.getOperand(UseIdx
).ChangeToImmediate(ImmVal
);
754 DefMI
.eraseFromParent();
759 bool SystemZInstrInfo::isPredicable(const MachineInstr
&MI
) const {
760 unsigned Opcode
= MI
.getOpcode();
761 if (Opcode
== SystemZ::Return
||
762 Opcode
== SystemZ::Trap
||
763 Opcode
== SystemZ::CallJG
||
764 Opcode
== SystemZ::CallBR
)
769 bool SystemZInstrInfo::
770 isProfitableToIfCvt(MachineBasicBlock
&MBB
,
771 unsigned NumCycles
, unsigned ExtraPredCycles
,
772 BranchProbability Probability
) const {
773 // Avoid using conditional returns at the end of a loop (since then
774 // we'd need to emit an unconditional branch to the beginning anyway,
775 // making the loop body longer). This doesn't apply for low-probability
776 // loops (eg. compare-and-swap retry), so just decide based on branch
777 // probability instead of looping structure.
778 // However, since Compare and Trap instructions cost the same as a regular
779 // Compare instruction, we should allow the if conversion to convert this
780 // into a Conditional Compare regardless of the branch probability.
781 if (MBB
.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap
&&
782 MBB
.succ_empty() && Probability
< BranchProbability(1, 8))
784 // For now only convert single instructions.
785 return NumCycles
== 1;
788 bool SystemZInstrInfo::
789 isProfitableToIfCvt(MachineBasicBlock
&TMBB
,
790 unsigned NumCyclesT
, unsigned ExtraPredCyclesT
,
791 MachineBasicBlock
&FMBB
,
792 unsigned NumCyclesF
, unsigned ExtraPredCyclesF
,
793 BranchProbability Probability
) const {
794 // For now avoid converting mutually-exclusive cases.
798 bool SystemZInstrInfo::
799 isProfitableToDupForIfCvt(MachineBasicBlock
&MBB
, unsigned NumCycles
,
800 BranchProbability Probability
) const {
801 // For now only duplicate single instructions.
802 return NumCycles
== 1;
805 bool SystemZInstrInfo::PredicateInstruction(
806 MachineInstr
&MI
, ArrayRef
<MachineOperand
> Pred
) const {
807 assert(Pred
.size() == 2 && "Invalid condition");
808 unsigned CCValid
= Pred
[0].getImm();
809 unsigned CCMask
= Pred
[1].getImm();
810 assert(CCMask
> 0 && CCMask
< 15 && "Invalid predicate");
811 unsigned Opcode
= MI
.getOpcode();
812 if (Opcode
== SystemZ::Trap
) {
813 MI
.setDesc(get(SystemZ::CondTrap
));
814 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
815 .addImm(CCValid
).addImm(CCMask
)
816 .addReg(SystemZ::CC
, RegState::Implicit
);
819 if (Opcode
== SystemZ::Return
) {
820 MI
.setDesc(get(SystemZ::CondReturn
));
821 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
822 .addImm(CCValid
).addImm(CCMask
)
823 .addReg(SystemZ::CC
, RegState::Implicit
);
826 if (Opcode
== SystemZ::CallJG
) {
827 MachineOperand FirstOp
= MI
.getOperand(0);
828 const uint32_t *RegMask
= MI
.getOperand(1).getRegMask();
831 MI
.setDesc(get(SystemZ::CallBRCL
));
832 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
837 .addReg(SystemZ::CC
, RegState::Implicit
);
840 if (Opcode
== SystemZ::CallBR
) {
841 const uint32_t *RegMask
= MI
.getOperand(0).getRegMask();
843 MI
.setDesc(get(SystemZ::CallBCR
));
844 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
845 .addImm(CCValid
).addImm(CCMask
)
847 .addReg(SystemZ::CC
, RegState::Implicit
);
853 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock
&MBB
,
854 MachineBasicBlock::iterator MBBI
,
855 const DebugLoc
&DL
, unsigned DestReg
,
856 unsigned SrcReg
, bool KillSrc
) const {
857 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
858 // super register in case one of the subregs is undefined.
859 // This handles ADDR128 too.
860 if (SystemZ::GR128BitRegClass
.contains(DestReg
, SrcReg
)) {
861 copyPhysReg(MBB
, MBBI
, DL
, RI
.getSubReg(DestReg
, SystemZ::subreg_h64
),
862 RI
.getSubReg(SrcReg
, SystemZ::subreg_h64
), KillSrc
);
863 MachineInstrBuilder(*MBB
.getParent(), std::prev(MBBI
))
864 .addReg(SrcReg
, RegState::Implicit
);
865 copyPhysReg(MBB
, MBBI
, DL
, RI
.getSubReg(DestReg
, SystemZ::subreg_l64
),
866 RI
.getSubReg(SrcReg
, SystemZ::subreg_l64
), KillSrc
);
867 MachineInstrBuilder(*MBB
.getParent(), std::prev(MBBI
))
868 .addReg(SrcReg
, (getKillRegState(KillSrc
) | RegState::Implicit
));
872 if (SystemZ::GRX32BitRegClass
.contains(DestReg
, SrcReg
)) {
873 emitGRX32Move(MBB
, MBBI
, DL
, DestReg
, SrcReg
, SystemZ::LR
, 32, KillSrc
,
878 // Move 128-bit floating-point values between VR128 and FP128.
879 if (SystemZ::VR128BitRegClass
.contains(DestReg
) &&
880 SystemZ::FP128BitRegClass
.contains(SrcReg
)) {
882 RI
.getMatchingSuperReg(RI
.getSubReg(SrcReg
, SystemZ::subreg_h64
),
883 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
885 RI
.getMatchingSuperReg(RI
.getSubReg(SrcReg
, SystemZ::subreg_l64
),
886 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
888 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::VMRHG
), DestReg
)
889 .addReg(SrcRegHi
, getKillRegState(KillSrc
))
890 .addReg(SrcRegLo
, getKillRegState(KillSrc
));
893 if (SystemZ::FP128BitRegClass
.contains(DestReg
) &&
894 SystemZ::VR128BitRegClass
.contains(SrcReg
)) {
896 RI
.getMatchingSuperReg(RI
.getSubReg(DestReg
, SystemZ::subreg_h64
),
897 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
899 RI
.getMatchingSuperReg(RI
.getSubReg(DestReg
, SystemZ::subreg_l64
),
900 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
902 if (DestRegHi
!= SrcReg
)
903 copyPhysReg(MBB
, MBBI
, DL
, DestRegHi
, SrcReg
, false);
904 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::VREPG
), DestRegLo
)
905 .addReg(SrcReg
, getKillRegState(KillSrc
)).addImm(1);
909 // Move CC value from/to a GR32.
910 if (SrcReg
== SystemZ::CC
) {
911 auto MIB
= BuildMI(MBB
, MBBI
, DL
, get(SystemZ::IPM
), DestReg
);
913 const MachineFunction
*MF
= MBB
.getParent();
914 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
915 MIB
->addRegisterKilled(SrcReg
, TRI
);
919 if (DestReg
== SystemZ::CC
) {
920 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::TMLH
))
921 .addReg(SrcReg
, getKillRegState(KillSrc
))
922 .addImm(3 << (SystemZ::IPM_CC
- 16));
926 // Everything else needs only one instruction.
928 if (SystemZ::GR64BitRegClass
.contains(DestReg
, SrcReg
))
929 Opcode
= SystemZ::LGR
;
930 else if (SystemZ::FP32BitRegClass
.contains(DestReg
, SrcReg
))
931 // For z13 we prefer LDR over LER to avoid partial register dependencies.
932 Opcode
= STI
.hasVector() ? SystemZ::LDR32
: SystemZ::LER
;
933 else if (SystemZ::FP64BitRegClass
.contains(DestReg
, SrcReg
))
934 Opcode
= SystemZ::LDR
;
935 else if (SystemZ::FP128BitRegClass
.contains(DestReg
, SrcReg
))
936 Opcode
= SystemZ::LXR
;
937 else if (SystemZ::VR32BitRegClass
.contains(DestReg
, SrcReg
))
938 Opcode
= SystemZ::VLR32
;
939 else if (SystemZ::VR64BitRegClass
.contains(DestReg
, SrcReg
))
940 Opcode
= SystemZ::VLR64
;
941 else if (SystemZ::VR128BitRegClass
.contains(DestReg
, SrcReg
))
942 Opcode
= SystemZ::VLR
;
943 else if (SystemZ::AR32BitRegClass
.contains(DestReg
, SrcReg
))
944 Opcode
= SystemZ::CPYA
;
945 else if (SystemZ::AR32BitRegClass
.contains(DestReg
) &&
946 SystemZ::GR32BitRegClass
.contains(SrcReg
))
947 Opcode
= SystemZ::SAR
;
948 else if (SystemZ::GR32BitRegClass
.contains(DestReg
) &&
949 SystemZ::AR32BitRegClass
.contains(SrcReg
))
950 Opcode
= SystemZ::EAR
;
952 llvm_unreachable("Impossible reg-to-reg copy");
954 BuildMI(MBB
, MBBI
, DL
, get(Opcode
), DestReg
)
955 .addReg(SrcReg
, getKillRegState(KillSrc
));
958 void SystemZInstrInfo::storeRegToStackSlot(
959 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned SrcReg
,
960 bool isKill
, int FrameIdx
, const TargetRegisterClass
*RC
,
961 const TargetRegisterInfo
*TRI
) const {
962 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
964 // Callers may expect a single instruction, so keep 128-bit moves
965 // together for now and lower them after register allocation.
966 unsigned LoadOpcode
, StoreOpcode
;
967 getLoadStoreOpcodes(RC
, LoadOpcode
, StoreOpcode
);
968 addFrameReference(BuildMI(MBB
, MBBI
, DL
, get(StoreOpcode
))
969 .addReg(SrcReg
, getKillRegState(isKill
)),
973 void SystemZInstrInfo::loadRegFromStackSlot(
974 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned DestReg
,
975 int FrameIdx
, const TargetRegisterClass
*RC
,
976 const TargetRegisterInfo
*TRI
) const {
977 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
979 // Callers may expect a single instruction, so keep 128-bit moves
980 // together for now and lower them after register allocation.
981 unsigned LoadOpcode
, StoreOpcode
;
982 getLoadStoreOpcodes(RC
, LoadOpcode
, StoreOpcode
);
983 addFrameReference(BuildMI(MBB
, MBBI
, DL
, get(LoadOpcode
), DestReg
),
987 // Return true if MI is a simple load or store with a 12-bit displacement
988 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
989 static bool isSimpleBD12Move(const MachineInstr
*MI
, unsigned Flag
) {
990 const MCInstrDesc
&MCID
= MI
->getDesc();
991 return ((MCID
.TSFlags
& Flag
) &&
992 isUInt
<12>(MI
->getOperand(2).getImm()) &&
993 MI
->getOperand(3).getReg() == 0);
1000 LogicOp(unsigned regSize
, unsigned immLSB
, unsigned immSize
)
1001 : RegSize(regSize
), ImmLSB(immLSB
), ImmSize(immSize
) {}
1003 explicit operator bool() const { return RegSize
; }
1005 unsigned RegSize
= 0;
1006 unsigned ImmLSB
= 0;
1007 unsigned ImmSize
= 0;
1010 } // end anonymous namespace
1012 static LogicOp
interpretAndImmediate(unsigned Opcode
) {
1014 case SystemZ::NILMux
: return LogicOp(32, 0, 16);
1015 case SystemZ::NIHMux
: return LogicOp(32, 16, 16);
1016 case SystemZ::NILL64
: return LogicOp(64, 0, 16);
1017 case SystemZ::NILH64
: return LogicOp(64, 16, 16);
1018 case SystemZ::NIHL64
: return LogicOp(64, 32, 16);
1019 case SystemZ::NIHH64
: return LogicOp(64, 48, 16);
1020 case SystemZ::NIFMux
: return LogicOp(32, 0, 32);
1021 case SystemZ::NILF64
: return LogicOp(64, 0, 32);
1022 case SystemZ::NIHF64
: return LogicOp(64, 32, 32);
1023 default: return LogicOp();
1027 static void transferDeadCC(MachineInstr
*OldMI
, MachineInstr
*NewMI
) {
1028 if (OldMI
->registerDefIsDead(SystemZ::CC
)) {
1029 MachineOperand
*CCDef
= NewMI
->findRegisterDefOperand(SystemZ::CC
);
1030 if (CCDef
!= nullptr)
1031 CCDef
->setIsDead(true);
1035 // Used to return from convertToThreeAddress after replacing two-address
1036 // instruction OldMI with three-address instruction NewMI.
1037 static MachineInstr
*finishConvertToThreeAddress(MachineInstr
*OldMI
,
1038 MachineInstr
*NewMI
,
1039 LiveVariables
*LV
) {
1041 unsigned NumOps
= OldMI
->getNumOperands();
1042 for (unsigned I
= 1; I
< NumOps
; ++I
) {
1043 MachineOperand
&Op
= OldMI
->getOperand(I
);
1044 if (Op
.isReg() && Op
.isKill())
1045 LV
->replaceKillInstruction(Op
.getReg(), *OldMI
, *NewMI
);
1048 transferDeadCC(OldMI
, NewMI
);
1052 MachineInstr
*SystemZInstrInfo::convertToThreeAddress(
1053 MachineFunction::iterator
&MFI
, MachineInstr
&MI
, LiveVariables
*LV
) const {
1054 MachineBasicBlock
*MBB
= MI
.getParent();
1055 MachineFunction
*MF
= MBB
->getParent();
1056 MachineRegisterInfo
&MRI
= MF
->getRegInfo();
1058 unsigned Opcode
= MI
.getOpcode();
1059 unsigned NumOps
= MI
.getNumOperands();
1061 // Try to convert something like SLL into SLLK, if supported.
1062 // We prefer to keep the two-operand form where possible both
1063 // because it tends to be shorter and because some instructions
1064 // have memory forms that can be used during spilling.
1065 if (STI
.hasDistinctOps()) {
1066 MachineOperand
&Dest
= MI
.getOperand(0);
1067 MachineOperand
&Src
= MI
.getOperand(1);
1068 unsigned DestReg
= Dest
.getReg();
1069 unsigned SrcReg
= Src
.getReg();
1070 // AHIMux is only really a three-operand instruction when both operands
1071 // are low registers. Try to constrain both operands to be low if
1073 if (Opcode
== SystemZ::AHIMux
&&
1074 TargetRegisterInfo::isVirtualRegister(DestReg
) &&
1075 TargetRegisterInfo::isVirtualRegister(SrcReg
) &&
1076 MRI
.getRegClass(DestReg
)->contains(SystemZ::R1L
) &&
1077 MRI
.getRegClass(SrcReg
)->contains(SystemZ::R1L
)) {
1078 MRI
.constrainRegClass(DestReg
, &SystemZ::GR32BitRegClass
);
1079 MRI
.constrainRegClass(SrcReg
, &SystemZ::GR32BitRegClass
);
1081 int ThreeOperandOpcode
= SystemZ::getThreeOperandOpcode(Opcode
);
1082 if (ThreeOperandOpcode
>= 0) {
1083 // Create three address instruction without adding the implicit
1084 // operands. Those will instead be copied over from the original
1085 // instruction by the loop below.
1086 MachineInstrBuilder
MIB(
1087 *MF
, MF
->CreateMachineInstr(get(ThreeOperandOpcode
), MI
.getDebugLoc(),
1088 /*NoImplicit=*/true));
1090 // Keep the kill state, but drop the tied flag.
1091 MIB
.addReg(Src
.getReg(), getKillRegState(Src
.isKill()), Src
.getSubReg());
1092 // Keep the remaining operands as-is.
1093 for (unsigned I
= 2; I
< NumOps
; ++I
)
1094 MIB
.add(MI
.getOperand(I
));
1095 MBB
->insert(MI
, MIB
);
1096 return finishConvertToThreeAddress(&MI
, MIB
, LV
);
1100 // Try to convert an AND into an RISBG-type instruction.
1101 if (LogicOp And
= interpretAndImmediate(Opcode
)) {
1102 uint64_t Imm
= MI
.getOperand(2).getImm() << And
.ImmLSB
;
1103 // AND IMMEDIATE leaves the other bits of the register unchanged.
1104 Imm
|= allOnes(And
.RegSize
) & ~(allOnes(And
.ImmSize
) << And
.ImmLSB
);
1105 unsigned Start
, End
;
1106 if (isRxSBGMask(Imm
, And
.RegSize
, Start
, End
)) {
1108 if (And
.RegSize
== 64) {
1109 NewOpcode
= SystemZ::RISBG
;
1110 // Prefer RISBGN if available, since it does not clobber CC.
1111 if (STI
.hasMiscellaneousExtensions())
1112 NewOpcode
= SystemZ::RISBGN
;
1114 NewOpcode
= SystemZ::RISBMux
;
1118 MachineOperand
&Dest
= MI
.getOperand(0);
1119 MachineOperand
&Src
= MI
.getOperand(1);
1120 MachineInstrBuilder MIB
=
1121 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), get(NewOpcode
))
1124 .addReg(Src
.getReg(), getKillRegState(Src
.isKill()),
1129 return finishConvertToThreeAddress(&MI
, MIB
, LV
);
1135 MachineInstr
*SystemZInstrInfo::foldMemoryOperandImpl(
1136 MachineFunction
&MF
, MachineInstr
&MI
, ArrayRef
<unsigned> Ops
,
1137 MachineBasicBlock::iterator InsertPt
, int FrameIndex
,
1138 LiveIntervals
*LIS
) const {
1139 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1140 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1141 unsigned Size
= MFI
.getObjectSize(FrameIndex
);
1142 unsigned Opcode
= MI
.getOpcode();
1144 if (Ops
.size() == 2 && Ops
[0] == 0 && Ops
[1] == 1) {
1145 if (LIS
!= nullptr && (Opcode
== SystemZ::LA
|| Opcode
== SystemZ::LAY
) &&
1146 isInt
<8>(MI
.getOperand(2).getImm()) && !MI
.getOperand(3).getReg()) {
1148 // Check CC liveness, since new instruction introduces a dead
1150 MCRegUnitIterator
CCUnit(SystemZ::CC
, TRI
);
1151 LiveRange
&CCLiveRange
= LIS
->getRegUnit(*CCUnit
);
1153 assert(!CCUnit
.isValid() && "CC only has one reg unit.");
1155 LIS
->getSlotIndexes()->getInstructionIndex(MI
).getRegSlot();
1156 if (!CCLiveRange
.liveAt(MISlot
)) {
1157 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1158 MachineInstr
*BuiltMI
= BuildMI(*InsertPt
->getParent(), InsertPt
,
1159 MI
.getDebugLoc(), get(SystemZ::AGSI
))
1160 .addFrameIndex(FrameIndex
)
1162 .addImm(MI
.getOperand(2).getImm());
1163 BuiltMI
->findRegisterDefOperand(SystemZ::CC
)->setIsDead(true);
1164 CCLiveRange
.createDeadDef(MISlot
, LIS
->getVNInfoAllocator());
1171 // All other cases require a single operand.
1172 if (Ops
.size() != 1)
1175 unsigned OpNum
= Ops
[0];
1177 TRI
->getRegSizeInBits(*MF
.getRegInfo()
1178 .getRegClass(MI
.getOperand(OpNum
).getReg())) &&
1179 "Invalid size combination");
1181 if ((Opcode
== SystemZ::AHI
|| Opcode
== SystemZ::AGHI
) && OpNum
== 0 &&
1182 isInt
<8>(MI
.getOperand(2).getImm())) {
1183 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1184 Opcode
= (Opcode
== SystemZ::AHI
? SystemZ::ASI
: SystemZ::AGSI
);
1185 MachineInstr
*BuiltMI
=
1186 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1187 .addFrameIndex(FrameIndex
)
1189 .addImm(MI
.getOperand(2).getImm());
1190 transferDeadCC(&MI
, BuiltMI
);
1194 if ((Opcode
== SystemZ::ALFI
&& OpNum
== 0 &&
1195 isInt
<8>((int32_t)MI
.getOperand(2).getImm())) ||
1196 (Opcode
== SystemZ::ALGFI
&& OpNum
== 0 &&
1197 isInt
<8>((int64_t)MI
.getOperand(2).getImm()))) {
1198 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1199 Opcode
= (Opcode
== SystemZ::ALFI
? SystemZ::ALSI
: SystemZ::ALGSI
);
1200 MachineInstr
*BuiltMI
=
1201 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1202 .addFrameIndex(FrameIndex
)
1204 .addImm((int8_t)MI
.getOperand(2).getImm());
1205 transferDeadCC(&MI
, BuiltMI
);
1209 if ((Opcode
== SystemZ::SLFI
&& OpNum
== 0 &&
1210 isInt
<8>((int32_t)-MI
.getOperand(2).getImm())) ||
1211 (Opcode
== SystemZ::SLGFI
&& OpNum
== 0 &&
1212 isInt
<8>((int64_t)-MI
.getOperand(2).getImm()))) {
1213 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1214 Opcode
= (Opcode
== SystemZ::SLFI
? SystemZ::ALSI
: SystemZ::ALGSI
);
1215 MachineInstr
*BuiltMI
=
1216 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1217 .addFrameIndex(FrameIndex
)
1219 .addImm((int8_t)-MI
.getOperand(2).getImm());
1220 transferDeadCC(&MI
, BuiltMI
);
1224 if (Opcode
== SystemZ::LGDR
|| Opcode
== SystemZ::LDGR
) {
1225 bool Op0IsGPR
= (Opcode
== SystemZ::LGDR
);
1226 bool Op1IsGPR
= (Opcode
== SystemZ::LDGR
);
1227 // If we're spilling the destination of an LDGR or LGDR, store the
1228 // source register instead.
1230 unsigned StoreOpcode
= Op1IsGPR
? SystemZ::STG
: SystemZ::STD
;
1231 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1233 .add(MI
.getOperand(1))
1234 .addFrameIndex(FrameIndex
)
1238 // If we're spilling the source of an LDGR or LGDR, load the
1239 // destination register instead.
1241 unsigned LoadOpcode
= Op0IsGPR
? SystemZ::LG
: SystemZ::LD
;
1242 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1244 .add(MI
.getOperand(0))
1245 .addFrameIndex(FrameIndex
)
1251 // Look for cases where the source of a simple store or the destination
1252 // of a simple load is being spilled. Try to use MVC instead.
1254 // Although MVC is in practice a fast choice in these cases, it is still
1255 // logically a bytewise copy. This means that we cannot use it if the
1256 // load or store is volatile. We also wouldn't be able to use MVC if
1257 // the two memories partially overlap, but that case cannot occur here,
1258 // because we know that one of the memories is a full frame index.
1260 // For performance reasons, we also want to avoid using MVC if the addresses
1261 // might be equal. We don't worry about that case here, because spill slot
1262 // coloring happens later, and because we have special code to remove
1263 // MVCs that turn out to be redundant.
1264 if (OpNum
== 0 && MI
.hasOneMemOperand()) {
1265 MachineMemOperand
*MMO
= *MI
.memoperands_begin();
1266 if (MMO
->getSize() == Size
&& !MMO
->isVolatile()) {
1267 // Handle conversion of loads.
1268 if (isSimpleBD12Move(&MI
, SystemZII::SimpleBDXLoad
)) {
1269 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1271 .addFrameIndex(FrameIndex
)
1274 .add(MI
.getOperand(1))
1275 .addImm(MI
.getOperand(2).getImm())
1276 .addMemOperand(MMO
);
1278 // Handle conversion of stores.
1279 if (isSimpleBD12Move(&MI
, SystemZII::SimpleBDXStore
)) {
1280 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1282 .add(MI
.getOperand(1))
1283 .addImm(MI
.getOperand(2).getImm())
1285 .addFrameIndex(FrameIndex
)
1287 .addMemOperand(MMO
);
1292 // If the spilled operand is the final one, try to change <INSN>R
1294 int MemOpcode
= SystemZ::getMemOpcode(Opcode
);
1295 if (MemOpcode
>= 0) {
1296 unsigned NumOps
= MI
.getNumExplicitOperands();
1297 if (OpNum
== NumOps
- 1) {
1298 const MCInstrDesc
&MemDesc
= get(MemOpcode
);
1299 uint64_t AccessBytes
= SystemZII::getAccessSize(MemDesc
.TSFlags
);
1300 assert(AccessBytes
!= 0 && "Size of access should be known");
1301 assert(AccessBytes
<= Size
&& "Access outside the frame index");
1302 uint64_t Offset
= Size
- AccessBytes
;
1303 MachineInstrBuilder MIB
= BuildMI(*InsertPt
->getParent(), InsertPt
,
1304 MI
.getDebugLoc(), get(MemOpcode
));
1305 for (unsigned I
= 0; I
< OpNum
; ++I
)
1306 MIB
.add(MI
.getOperand(I
));
1307 MIB
.addFrameIndex(FrameIndex
).addImm(Offset
);
1308 if (MemDesc
.TSFlags
& SystemZII::HasIndex
)
1310 transferDeadCC(&MI
, MIB
);
1318 MachineInstr
*SystemZInstrInfo::foldMemoryOperandImpl(
1319 MachineFunction
&MF
, MachineInstr
&MI
, ArrayRef
<unsigned> Ops
,
1320 MachineBasicBlock::iterator InsertPt
, MachineInstr
&LoadMI
,
1321 LiveIntervals
*LIS
) const {
1325 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr
&MI
) const {
1326 switch (MI
.getOpcode()) {
1328 splitMove(MI
, SystemZ::LG
);
1331 case SystemZ::ST128
:
1332 splitMove(MI
, SystemZ::STG
);
1336 splitMove(MI
, SystemZ::LD
);
1340 splitMove(MI
, SystemZ::STD
);
1343 case SystemZ::LBMux
:
1344 expandRXYPseudo(MI
, SystemZ::LB
, SystemZ::LBH
);
1347 case SystemZ::LHMux
:
1348 expandRXYPseudo(MI
, SystemZ::LH
, SystemZ::LHH
);
1351 case SystemZ::LLCRMux
:
1352 expandZExtPseudo(MI
, SystemZ::LLCR
, 8);
1355 case SystemZ::LLHRMux
:
1356 expandZExtPseudo(MI
, SystemZ::LLHR
, 16);
1359 case SystemZ::LLCMux
:
1360 expandRXYPseudo(MI
, SystemZ::LLC
, SystemZ::LLCH
);
1363 case SystemZ::LLHMux
:
1364 expandRXYPseudo(MI
, SystemZ::LLH
, SystemZ::LLHH
);
1368 expandRXYPseudo(MI
, SystemZ::L
, SystemZ::LFH
);
1371 case SystemZ::LOCMux
:
1372 expandLOCPseudo(MI
, SystemZ::LOC
, SystemZ::LOCFH
);
1375 case SystemZ::LOCHIMux
:
1376 expandLOCPseudo(MI
, SystemZ::LOCHI
, SystemZ::LOCHHI
);
1379 case SystemZ::LOCRMux
:
1380 expandLOCRPseudo(MI
, SystemZ::LOCR
, SystemZ::LOCFHR
);
1383 case SystemZ::STCMux
:
1384 expandRXYPseudo(MI
, SystemZ::STC
, SystemZ::STCH
);
1387 case SystemZ::STHMux
:
1388 expandRXYPseudo(MI
, SystemZ::STH
, SystemZ::STHH
);
1391 case SystemZ::STMux
:
1392 expandRXYPseudo(MI
, SystemZ::ST
, SystemZ::STFH
);
1395 case SystemZ::STOCMux
:
1396 expandLOCPseudo(MI
, SystemZ::STOC
, SystemZ::STOCFH
);
1399 case SystemZ::LHIMux
:
1400 expandRIPseudo(MI
, SystemZ::LHI
, SystemZ::IIHF
, true);
1403 case SystemZ::IIFMux
:
1404 expandRIPseudo(MI
, SystemZ::IILF
, SystemZ::IIHF
, false);
1407 case SystemZ::IILMux
:
1408 expandRIPseudo(MI
, SystemZ::IILL
, SystemZ::IIHL
, false);
1411 case SystemZ::IIHMux
:
1412 expandRIPseudo(MI
, SystemZ::IILH
, SystemZ::IIHH
, false);
1415 case SystemZ::NIFMux
:
1416 expandRIPseudo(MI
, SystemZ::NILF
, SystemZ::NIHF
, false);
1419 case SystemZ::NILMux
:
1420 expandRIPseudo(MI
, SystemZ::NILL
, SystemZ::NIHL
, false);
1423 case SystemZ::NIHMux
:
1424 expandRIPseudo(MI
, SystemZ::NILH
, SystemZ::NIHH
, false);
1427 case SystemZ::OIFMux
:
1428 expandRIPseudo(MI
, SystemZ::OILF
, SystemZ::OIHF
, false);
1431 case SystemZ::OILMux
:
1432 expandRIPseudo(MI
, SystemZ::OILL
, SystemZ::OIHL
, false);
1435 case SystemZ::OIHMux
:
1436 expandRIPseudo(MI
, SystemZ::OILH
, SystemZ::OIHH
, false);
1439 case SystemZ::XIFMux
:
1440 expandRIPseudo(MI
, SystemZ::XILF
, SystemZ::XIHF
, false);
1443 case SystemZ::TMLMux
:
1444 expandRIPseudo(MI
, SystemZ::TMLL
, SystemZ::TMHL
, false);
1447 case SystemZ::TMHMux
:
1448 expandRIPseudo(MI
, SystemZ::TMLH
, SystemZ::TMHH
, false);
1451 case SystemZ::AHIMux
:
1452 expandRIPseudo(MI
, SystemZ::AHI
, SystemZ::AIH
, false);
1455 case SystemZ::AHIMuxK
:
1456 expandRIEPseudo(MI
, SystemZ::AHI
, SystemZ::AHIK
, SystemZ::AIH
);
1459 case SystemZ::AFIMux
:
1460 expandRIPseudo(MI
, SystemZ::AFI
, SystemZ::AIH
, false);
1463 case SystemZ::CHIMux
:
1464 expandRIPseudo(MI
, SystemZ::CHI
, SystemZ::CIH
, false);
1467 case SystemZ::CFIMux
:
1468 expandRIPseudo(MI
, SystemZ::CFI
, SystemZ::CIH
, false);
1471 case SystemZ::CLFIMux
:
1472 expandRIPseudo(MI
, SystemZ::CLFI
, SystemZ::CLIH
, false);
1476 expandRXYPseudo(MI
, SystemZ::C
, SystemZ::CHF
);
1479 case SystemZ::CLMux
:
1480 expandRXYPseudo(MI
, SystemZ::CL
, SystemZ::CLHF
);
1483 case SystemZ::RISBMux
: {
1484 bool DestIsHigh
= isHighReg(MI
.getOperand(0).getReg());
1485 bool SrcIsHigh
= isHighReg(MI
.getOperand(2).getReg());
1486 if (SrcIsHigh
== DestIsHigh
)
1487 MI
.setDesc(get(DestIsHigh
? SystemZ::RISBHH
: SystemZ::RISBLL
));
1489 MI
.setDesc(get(DestIsHigh
? SystemZ::RISBHL
: SystemZ::RISBLH
));
1490 MI
.getOperand(5).setImm(MI
.getOperand(5).getImm() ^ 32);
1495 case SystemZ::ADJDYNALLOC
:
1496 splitAdjDynAlloc(MI
);
1499 case TargetOpcode::LOAD_STACK_GUARD
:
1500 expandLoadStackGuard(&MI
);
1508 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr
&MI
) const {
1509 if (MI
.getOpcode() == TargetOpcode::INLINEASM
) {
1510 const MachineFunction
*MF
= MI
.getParent()->getParent();
1511 const char *AsmStr
= MI
.getOperand(0).getSymbolName();
1512 return getInlineAsmLength(AsmStr
, *MF
->getTarget().getMCAsmInfo());
1514 return MI
.getDesc().getSize();
1518 SystemZInstrInfo::getBranchInfo(const MachineInstr
&MI
) const {
1519 switch (MI
.getOpcode()) {
1524 return SystemZII::Branch(SystemZII::BranchNormal
, SystemZ::CCMASK_ANY
,
1525 SystemZ::CCMASK_ANY
, &MI
.getOperand(0));
1529 return SystemZII::Branch(SystemZII::BranchNormal
, MI
.getOperand(0).getImm(),
1530 MI
.getOperand(1).getImm(), &MI
.getOperand(2));
1533 case SystemZ::BRCTH
:
1534 return SystemZII::Branch(SystemZII::BranchCT
, SystemZ::CCMASK_ICMP
,
1535 SystemZ::CCMASK_CMP_NE
, &MI
.getOperand(2));
1537 case SystemZ::BRCTG
:
1538 return SystemZII::Branch(SystemZII::BranchCTG
, SystemZ::CCMASK_ICMP
,
1539 SystemZ::CCMASK_CMP_NE
, &MI
.getOperand(2));
1543 return SystemZII::Branch(SystemZII::BranchC
, SystemZ::CCMASK_ICMP
,
1544 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1548 return SystemZII::Branch(SystemZII::BranchCL
, SystemZ::CCMASK_ICMP
,
1549 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1553 return SystemZII::Branch(SystemZII::BranchCG
, SystemZ::CCMASK_ICMP
,
1554 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1556 case SystemZ::CLGIJ
:
1557 case SystemZ::CLGRJ
:
1558 return SystemZII::Branch(SystemZII::BranchCLG
, SystemZ::CCMASK_ICMP
,
1559 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1562 llvm_unreachable("Unrecognized branch opcode");
1566 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass
*RC
,
1567 unsigned &LoadOpcode
,
1568 unsigned &StoreOpcode
) const {
1569 if (RC
== &SystemZ::GR32BitRegClass
|| RC
== &SystemZ::ADDR32BitRegClass
) {
1570 LoadOpcode
= SystemZ::L
;
1571 StoreOpcode
= SystemZ::ST
;
1572 } else if (RC
== &SystemZ::GRH32BitRegClass
) {
1573 LoadOpcode
= SystemZ::LFH
;
1574 StoreOpcode
= SystemZ::STFH
;
1575 } else if (RC
== &SystemZ::GRX32BitRegClass
) {
1576 LoadOpcode
= SystemZ::LMux
;
1577 StoreOpcode
= SystemZ::STMux
;
1578 } else if (RC
== &SystemZ::GR64BitRegClass
||
1579 RC
== &SystemZ::ADDR64BitRegClass
) {
1580 LoadOpcode
= SystemZ::LG
;
1581 StoreOpcode
= SystemZ::STG
;
1582 } else if (RC
== &SystemZ::GR128BitRegClass
||
1583 RC
== &SystemZ::ADDR128BitRegClass
) {
1584 LoadOpcode
= SystemZ::L128
;
1585 StoreOpcode
= SystemZ::ST128
;
1586 } else if (RC
== &SystemZ::FP32BitRegClass
) {
1587 LoadOpcode
= SystemZ::LE
;
1588 StoreOpcode
= SystemZ::STE
;
1589 } else if (RC
== &SystemZ::FP64BitRegClass
) {
1590 LoadOpcode
= SystemZ::LD
;
1591 StoreOpcode
= SystemZ::STD
;
1592 } else if (RC
== &SystemZ::FP128BitRegClass
) {
1593 LoadOpcode
= SystemZ::LX
;
1594 StoreOpcode
= SystemZ::STX
;
1595 } else if (RC
== &SystemZ::VR32BitRegClass
) {
1596 LoadOpcode
= SystemZ::VL32
;
1597 StoreOpcode
= SystemZ::VST32
;
1598 } else if (RC
== &SystemZ::VR64BitRegClass
) {
1599 LoadOpcode
= SystemZ::VL64
;
1600 StoreOpcode
= SystemZ::VST64
;
1601 } else if (RC
== &SystemZ::VF128BitRegClass
||
1602 RC
== &SystemZ::VR128BitRegClass
) {
1603 LoadOpcode
= SystemZ::VL
;
1604 StoreOpcode
= SystemZ::VST
;
1606 llvm_unreachable("Unsupported regclass to load or store");
1609 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode
,
1610 int64_t Offset
) const {
1611 const MCInstrDesc
&MCID
= get(Opcode
);
1612 int64_t Offset2
= (MCID
.TSFlags
& SystemZII::Is128Bit
? Offset
+ 8 : Offset
);
1613 if (isUInt
<12>(Offset
) && isUInt
<12>(Offset2
)) {
1614 // Get the instruction to use for unsigned 12-bit displacements.
1615 int Disp12Opcode
= SystemZ::getDisp12Opcode(Opcode
);
1616 if (Disp12Opcode
>= 0)
1617 return Disp12Opcode
;
1619 // All address-related instructions can use unsigned 12-bit
1623 if (isInt
<20>(Offset
) && isInt
<20>(Offset2
)) {
1624 // Get the instruction to use for signed 20-bit displacements.
1625 int Disp20Opcode
= SystemZ::getDisp20Opcode(Opcode
);
1626 if (Disp20Opcode
>= 0)
1627 return Disp20Opcode
;
1629 // Check whether Opcode allows signed 20-bit displacements.
1630 if (MCID
.TSFlags
& SystemZII::Has20BitOffset
)
1636 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode
) const {
1638 case SystemZ::L
: return SystemZ::LT
;
1639 case SystemZ::LY
: return SystemZ::LT
;
1640 case SystemZ::LG
: return SystemZ::LTG
;
1641 case SystemZ::LGF
: return SystemZ::LTGF
;
1642 case SystemZ::LR
: return SystemZ::LTR
;
1643 case SystemZ::LGFR
: return SystemZ::LTGFR
;
1644 case SystemZ::LGR
: return SystemZ::LTGR
;
1645 case SystemZ::LER
: return SystemZ::LTEBR
;
1646 case SystemZ::LDR
: return SystemZ::LTDBR
;
1647 case SystemZ::LXR
: return SystemZ::LTXBR
;
1648 case SystemZ::LCDFR
: return SystemZ::LCDBR
;
1649 case SystemZ::LPDFR
: return SystemZ::LPDBR
;
1650 case SystemZ::LNDFR
: return SystemZ::LNDBR
;
1651 case SystemZ::LCDFR_32
: return SystemZ::LCEBR
;
1652 case SystemZ::LPDFR_32
: return SystemZ::LPEBR
;
1653 case SystemZ::LNDFR_32
: return SystemZ::LNEBR
;
1654 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1655 // actually use the condition code, we may turn it back into RISGB.
1656 // Note that RISBG is not really a "load-and-test" instruction,
1657 // but sets the same condition code values, so is OK to use here.
1658 case SystemZ::RISBGN
: return SystemZ::RISBG
;
1663 // Return true if Mask matches the regexp 0*1+0*, given that zero masks
1664 // have already been filtered out. Store the first set bit in LSB and
1665 // the number of set bits in Length if so.
1666 static bool isStringOfOnes(uint64_t Mask
, unsigned &LSB
, unsigned &Length
) {
1667 unsigned First
= findFirstSet(Mask
);
1668 uint64_t Top
= (Mask
>> First
) + 1;
1669 if ((Top
& -Top
) == Top
) {
1671 Length
= findFirstSet(Top
);
1677 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask
, unsigned BitSize
,
1678 unsigned &Start
, unsigned &End
) const {
1679 // Reject trivial all-zero masks.
1680 Mask
&= allOnes(BitSize
);
1684 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1685 // the msb and End specifies the index of the lsb.
1686 unsigned LSB
, Length
;
1687 if (isStringOfOnes(Mask
, LSB
, Length
)) {
1688 Start
= 63 - (LSB
+ Length
- 1);
1693 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1694 // of the low 1s and End specifies the lsb of the high 1s.
1695 if (isStringOfOnes(Mask
^ allOnes(BitSize
), LSB
, Length
)) {
1696 assert(LSB
> 0 && "Bottom bit must be set");
1697 assert(LSB
+ Length
< BitSize
&& "Top bit must be set");
1698 Start
= 63 - (LSB
- 1);
1699 End
= 63 - (LSB
+ Length
);
1706 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode
,
1707 SystemZII::FusedCompareType Type
,
1708 const MachineInstr
*MI
) const {
1712 if (!(MI
&& isInt
<8>(MI
->getOperand(1).getImm())))
1716 case SystemZ::CLGFI
:
1717 if (!(MI
&& isUInt
<8>(MI
->getOperand(1).getImm())))
1722 if (!STI
.hasMiscellaneousExtensions())
1724 if (!(MI
&& MI
->getOperand(3).getReg() == 0))
1729 case SystemZII::CompareAndBranch
:
1732 return SystemZ::CRJ
;
1734 return SystemZ::CGRJ
;
1736 return SystemZ::CIJ
;
1738 return SystemZ::CGIJ
;
1740 return SystemZ::CLRJ
;
1742 return SystemZ::CLGRJ
;
1744 return SystemZ::CLIJ
;
1745 case SystemZ::CLGFI
:
1746 return SystemZ::CLGIJ
;
1750 case SystemZII::CompareAndReturn
:
1753 return SystemZ::CRBReturn
;
1755 return SystemZ::CGRBReturn
;
1757 return SystemZ::CIBReturn
;
1759 return SystemZ::CGIBReturn
;
1761 return SystemZ::CLRBReturn
;
1763 return SystemZ::CLGRBReturn
;
1765 return SystemZ::CLIBReturn
;
1766 case SystemZ::CLGFI
:
1767 return SystemZ::CLGIBReturn
;
1771 case SystemZII::CompareAndSibcall
:
1774 return SystemZ::CRBCall
;
1776 return SystemZ::CGRBCall
;
1778 return SystemZ::CIBCall
;
1780 return SystemZ::CGIBCall
;
1782 return SystemZ::CLRBCall
;
1784 return SystemZ::CLGRBCall
;
1786 return SystemZ::CLIBCall
;
1787 case SystemZ::CLGFI
:
1788 return SystemZ::CLGIBCall
;
1792 case SystemZII::CompareAndTrap
:
1795 return SystemZ::CRT
;
1797 return SystemZ::CGRT
;
1799 return SystemZ::CIT
;
1801 return SystemZ::CGIT
;
1803 return SystemZ::CLRT
;
1805 return SystemZ::CLGRT
;
1807 return SystemZ::CLFIT
;
1808 case SystemZ::CLGFI
:
1809 return SystemZ::CLGIT
;
1811 return SystemZ::CLT
;
1813 return SystemZ::CLGT
;
1821 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode
) const {
1822 if (!STI
.hasLoadAndTrap())
1827 return SystemZ::LAT
;
1829 return SystemZ::LGAT
;
1831 return SystemZ::LFHAT
;
1833 return SystemZ::LLGFAT
;
1835 return SystemZ::LLGTAT
;
1840 void SystemZInstrInfo::loadImmediate(MachineBasicBlock
&MBB
,
1841 MachineBasicBlock::iterator MBBI
,
1842 unsigned Reg
, uint64_t Value
) const {
1843 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
1845 if (isInt
<16>(Value
))
1846 Opcode
= SystemZ::LGHI
;
1847 else if (SystemZ::isImmLL(Value
))
1848 Opcode
= SystemZ::LLILL
;
1849 else if (SystemZ::isImmLH(Value
)) {
1850 Opcode
= SystemZ::LLILH
;
1853 assert(isInt
<32>(Value
) && "Huge values not handled yet");
1854 Opcode
= SystemZ::LGFI
;
1856 BuildMI(MBB
, MBBI
, DL
, get(Opcode
), Reg
).addImm(Value
);
1859 bool SystemZInstrInfo::
1860 areMemAccessesTriviallyDisjoint(MachineInstr
&MIa
, MachineInstr
&MIb
,
1861 AliasAnalysis
*AA
) const {
1863 if (!MIa
.hasOneMemOperand() || !MIb
.hasOneMemOperand())
1866 // If mem-operands show that the same address Value is used by both
1867 // instructions, check for non-overlapping offsets and widths. Not
1868 // sure if a register based analysis would be an improvement...
1870 MachineMemOperand
*MMOa
= *MIa
.memoperands_begin();
1871 MachineMemOperand
*MMOb
= *MIb
.memoperands_begin();
1872 const Value
*VALa
= MMOa
->getValue();
1873 const Value
*VALb
= MMOb
->getValue();
1874 bool SameVal
= (VALa
&& VALb
&& (VALa
== VALb
));
1876 const PseudoSourceValue
*PSVa
= MMOa
->getPseudoValue();
1877 const PseudoSourceValue
*PSVb
= MMOb
->getPseudoValue();
1878 if (PSVa
&& PSVb
&& (PSVa
== PSVb
))
1882 int OffsetA
= MMOa
->getOffset(), OffsetB
= MMOb
->getOffset();
1883 int WidthA
= MMOa
->getSize(), WidthB
= MMOb
->getSize();
1884 int LowOffset
= OffsetA
< OffsetB
? OffsetA
: OffsetB
;
1885 int HighOffset
= OffsetA
< OffsetB
? OffsetB
: OffsetA
;
1886 int LowWidth
= (LowOffset
== OffsetA
) ? WidthA
: WidthB
;
1887 if (LowOffset
+ LowWidth
<= HighOffset
)