1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the SystemZ implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "SystemZInstrInfo.h"
14 #include "MCTargetDesc/SystemZMCTargetDesc.h"
16 #include "SystemZInstrBuilder.h"
17 #include "SystemZSubtarget.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/LiveInterval.h"
20 #include "llvm/CodeGen/LiveIntervals.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/SlotIndexes.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/CodeGen/TargetSubtargetInfo.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/BranchProbability.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Target/TargetMachine.h"
44 #define GET_INSTRINFO_CTOR_DTOR
45 #define GET_INSTRMAP_INFO
46 #include "SystemZGenInstrInfo.inc"
48 #define DEBUG_TYPE "systemz-II"
50 // Return a mask with Count low bits set.
51 static uint64_t allOnes(unsigned int Count
) {
52 return Count
== 0 ? 0 : (uint64_t(1) << (Count
- 1) << 1) - 1;
55 // Pin the vtable to this file.
56 void SystemZInstrInfo::anchor() {}
58 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget
&sti
)
59 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN
, SystemZ::ADJCALLSTACKUP
),
63 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
64 // each having the opcode given by NewOpcode.
65 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI
,
66 unsigned NewOpcode
) const {
67 MachineBasicBlock
*MBB
= MI
->getParent();
68 MachineFunction
&MF
= *MBB
->getParent();
70 // Get two load or store instructions. Use the original instruction for one
71 // of them (arbitrarily the second here) and create a clone for the other.
72 MachineInstr
*EarlierMI
= MF
.CloneMachineInstr(&*MI
);
73 MBB
->insert(MI
, EarlierMI
);
75 // Set up the two 64-bit registers and remember super reg and its flags.
76 MachineOperand
&HighRegOp
= EarlierMI
->getOperand(0);
77 MachineOperand
&LowRegOp
= MI
->getOperand(0);
78 Register Reg128
= LowRegOp
.getReg();
79 unsigned Reg128Killed
= getKillRegState(LowRegOp
.isKill());
80 unsigned Reg128Undef
= getUndefRegState(LowRegOp
.isUndef());
81 HighRegOp
.setReg(RI
.getSubReg(HighRegOp
.getReg(), SystemZ::subreg_h64
));
82 LowRegOp
.setReg(RI
.getSubReg(LowRegOp
.getReg(), SystemZ::subreg_l64
));
85 // Add implicit uses of the super register in case one of the subregs is
86 // undefined. We could track liveness and skip storing an undefined
87 // subreg, but this is hopefully rare (discovered with llvm-stress).
88 // If Reg128 was killed, set kill flag on MI.
89 unsigned Reg128UndefImpl
= (Reg128Undef
| RegState::Implicit
);
90 MachineInstrBuilder(MF
, EarlierMI
).addReg(Reg128
, Reg128UndefImpl
);
91 MachineInstrBuilder(MF
, MI
).addReg(Reg128
, (Reg128UndefImpl
| Reg128Killed
));
94 // The address in the first (high) instruction is already correct.
95 // Adjust the offset in the second (low) instruction.
96 MachineOperand
&HighOffsetOp
= EarlierMI
->getOperand(2);
97 MachineOperand
&LowOffsetOp
= MI
->getOperand(2);
98 LowOffsetOp
.setImm(LowOffsetOp
.getImm() + 8);
100 // Clear the kill flags on the registers in the first instruction.
101 if (EarlierMI
->getOperand(0).isReg() && EarlierMI
->getOperand(0).isUse())
102 EarlierMI
->getOperand(0).setIsKill(false);
103 EarlierMI
->getOperand(1).setIsKill(false);
104 EarlierMI
->getOperand(3).setIsKill(false);
107 unsigned HighOpcode
= getOpcodeForOffset(NewOpcode
, HighOffsetOp
.getImm());
108 unsigned LowOpcode
= getOpcodeForOffset(NewOpcode
, LowOffsetOp
.getImm());
109 assert(HighOpcode
&& LowOpcode
&& "Both offsets should be in range");
111 EarlierMI
->setDesc(get(HighOpcode
));
112 MI
->setDesc(get(LowOpcode
));
115 // Split ADJDYNALLOC instruction MI.
116 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI
) const {
117 MachineBasicBlock
*MBB
= MI
->getParent();
118 MachineFunction
&MF
= *MBB
->getParent();
119 MachineFrameInfo
&MFFrame
= MF
.getFrameInfo();
120 MachineOperand
&OffsetMO
= MI
->getOperand(2);
122 uint64_t Offset
= (MFFrame
.getMaxCallFrameSize() +
123 SystemZMC::CallFrameSize
+
125 unsigned NewOpcode
= getOpcodeForOffset(SystemZ::LA
, Offset
);
126 assert(NewOpcode
&& "No support for huge argument lists yet");
127 MI
->setDesc(get(NewOpcode
));
128 OffsetMO
.setImm(Offset
);
131 // MI is an RI-style pseudo instruction. Replace it with LowOpcode
132 // if the first operand is a low GR32 and HighOpcode if the first operand
133 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
134 // and HighOpcode takes an unsigned 32-bit operand. In those cases,
135 // MI has the same kind of operand as LowOpcode, so needs to be converted
136 // if HighOpcode is used.
137 void SystemZInstrInfo::expandRIPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
139 bool ConvertHigh
) const {
140 Register Reg
= MI
.getOperand(0).getReg();
141 bool IsHigh
= SystemZ::isHighReg(Reg
);
142 MI
.setDesc(get(IsHigh
? HighOpcode
: LowOpcode
));
143 if (IsHigh
&& ConvertHigh
)
144 MI
.getOperand(1).setImm(uint32_t(MI
.getOperand(1).getImm()));
147 // MI is a three-operand RIE-style pseudo instruction. Replace it with
148 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
149 // followed by HighOpcode or LowOpcode, depending on whether the target
150 // is a high or low GR32.
151 void SystemZInstrInfo::expandRIEPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
153 unsigned HighOpcode
) const {
154 Register DestReg
= MI
.getOperand(0).getReg();
155 Register SrcReg
= MI
.getOperand(1).getReg();
156 bool DestIsHigh
= SystemZ::isHighReg(DestReg
);
157 bool SrcIsHigh
= SystemZ::isHighReg(SrcReg
);
158 if (!DestIsHigh
&& !SrcIsHigh
)
159 MI
.setDesc(get(LowOpcodeK
));
161 if (DestReg
!= SrcReg
) {
162 emitGRX32Move(*MI
.getParent(), MI
, MI
.getDebugLoc(), DestReg
, SrcReg
,
163 SystemZ::LR
, 32, MI
.getOperand(1).isKill(),
164 MI
.getOperand(1).isUndef());
165 MI
.getOperand(1).setReg(DestReg
);
167 MI
.setDesc(get(DestIsHigh
? HighOpcode
: LowOpcode
));
168 MI
.tieOperands(0, 1);
172 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode
173 // if the first operand is a low GR32 and HighOpcode if the first operand
175 void SystemZInstrInfo::expandRXYPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
176 unsigned HighOpcode
) const {
177 Register Reg
= MI
.getOperand(0).getReg();
178 unsigned Opcode
= getOpcodeForOffset(
179 SystemZ::isHighReg(Reg
) ? HighOpcode
: LowOpcode
,
180 MI
.getOperand(2).getImm());
181 MI
.setDesc(get(Opcode
));
184 // MI is a load-on-condition pseudo instruction with a single register
185 // (source or destination) operand. Replace it with LowOpcode if the
186 // register is a low GR32 and HighOpcode if the register is a high GR32.
187 void SystemZInstrInfo::expandLOCPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
188 unsigned HighOpcode
) const {
189 Register Reg
= MI
.getOperand(0).getReg();
190 unsigned Opcode
= SystemZ::isHighReg(Reg
) ? HighOpcode
: LowOpcode
;
191 MI
.setDesc(get(Opcode
));
194 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
195 // of one GRX32 into another. Replace it with LowOpcode if both operands
196 // are low registers, otherwise use RISB[LH]G.
197 void SystemZInstrInfo::expandZExtPseudo(MachineInstr
&MI
, unsigned LowOpcode
,
198 unsigned Size
) const {
199 MachineInstrBuilder MIB
=
200 emitGRX32Move(*MI
.getParent(), MI
, MI
.getDebugLoc(),
201 MI
.getOperand(0).getReg(), MI
.getOperand(1).getReg(), LowOpcode
,
202 Size
, MI
.getOperand(1).isKill(), MI
.getOperand(1).isUndef());
204 // Keep the remaining operands as-is.
205 for (unsigned I
= 2; I
< MI
.getNumOperands(); ++I
)
206 MIB
.add(MI
.getOperand(I
));
208 MI
.eraseFromParent();
211 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr
*MI
) const {
212 MachineBasicBlock
*MBB
= MI
->getParent();
213 MachineFunction
&MF
= *MBB
->getParent();
214 const Register Reg64
= MI
->getOperand(0).getReg();
215 const Register Reg32
= RI
.getSubReg(Reg64
, SystemZ::subreg_l32
);
217 // EAR can only load the low subregister so us a shift for %a0 to produce
218 // the GR containing %a0 and %a1.
221 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::EAR
), Reg32
)
223 .addReg(Reg64
, RegState::ImplicitDefine
);
225 // sllg <reg>, <reg>, 32
226 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::SLLG
), Reg64
)
232 BuildMI(*MBB
, MI
, MI
->getDebugLoc(), get(SystemZ::EAR
), Reg32
)
233 .addReg(SystemZ::A1
);
235 // lg <reg>, 40(<reg>)
236 MI
->setDesc(get(SystemZ::LG
));
237 MachineInstrBuilder(MF
, MI
).addReg(Reg64
).addImm(40).addReg(0);
240 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
241 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
242 // are low registers, otherwise use RISB[LH]G. Size is the number of bits
243 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
244 // KillSrc is true if this move is the last use of SrcReg.
246 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock
&MBB
,
247 MachineBasicBlock::iterator MBBI
,
248 const DebugLoc
&DL
, unsigned DestReg
,
249 unsigned SrcReg
, unsigned LowLowOpcode
,
250 unsigned Size
, bool KillSrc
,
251 bool UndefSrc
) const {
253 bool DestIsHigh
= SystemZ::isHighReg(DestReg
);
254 bool SrcIsHigh
= SystemZ::isHighReg(SrcReg
);
255 if (DestIsHigh
&& SrcIsHigh
)
256 Opcode
= SystemZ::RISBHH
;
257 else if (DestIsHigh
&& !SrcIsHigh
)
258 Opcode
= SystemZ::RISBHL
;
259 else if (!DestIsHigh
&& SrcIsHigh
)
260 Opcode
= SystemZ::RISBLH
;
262 return BuildMI(MBB
, MBBI
, DL
, get(LowLowOpcode
), DestReg
)
263 .addReg(SrcReg
, getKillRegState(KillSrc
) | getUndefRegState(UndefSrc
));
265 unsigned Rotate
= (DestIsHigh
!= SrcIsHigh
? 32 : 0);
266 return BuildMI(MBB
, MBBI
, DL
, get(Opcode
), DestReg
)
267 .addReg(DestReg
, RegState::Undef
)
268 .addReg(SrcReg
, getKillRegState(KillSrc
) | getUndefRegState(UndefSrc
))
269 .addImm(32 - Size
).addImm(128 + 31).addImm(Rotate
);
272 MachineInstr
*SystemZInstrInfo::commuteInstructionImpl(MachineInstr
&MI
,
275 unsigned OpIdx2
) const {
276 auto cloneIfNew
= [NewMI
](MachineInstr
&MI
) -> MachineInstr
& {
278 return *MI
.getParent()->getParent()->CloneMachineInstr(&MI
);
282 switch (MI
.getOpcode()) {
283 case SystemZ::SELRMux
:
284 case SystemZ::SELFHR
:
287 case SystemZ::LOCRMux
:
288 case SystemZ::LOCFHR
:
290 case SystemZ::LOCGR
: {
291 auto &WorkingMI
= cloneIfNew(MI
);
293 unsigned CCValid
= WorkingMI
.getOperand(3).getImm();
294 unsigned CCMask
= WorkingMI
.getOperand(4).getImm();
295 WorkingMI
.getOperand(4).setImm(CCMask
^ CCValid
);
296 return TargetInstrInfo::commuteInstructionImpl(WorkingMI
, /*NewMI=*/false,
300 return TargetInstrInfo::commuteInstructionImpl(MI
, NewMI
, OpIdx1
, OpIdx2
);
304 // If MI is a simple load or store for a frame object, return the register
305 // it loads or stores and set FrameIndex to the index of the frame object.
306 // Return 0 otherwise.
308 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
309 static int isSimpleMove(const MachineInstr
&MI
, int &FrameIndex
,
311 const MCInstrDesc
&MCID
= MI
.getDesc();
312 if ((MCID
.TSFlags
& Flag
) && MI
.getOperand(1).isFI() &&
313 MI
.getOperand(2).getImm() == 0 && MI
.getOperand(3).getReg() == 0) {
314 FrameIndex
= MI
.getOperand(1).getIndex();
315 return MI
.getOperand(0).getReg();
320 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr
&MI
,
321 int &FrameIndex
) const {
322 return isSimpleMove(MI
, FrameIndex
, SystemZII::SimpleBDXLoad
);
325 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr
&MI
,
326 int &FrameIndex
) const {
327 return isSimpleMove(MI
, FrameIndex
, SystemZII::SimpleBDXStore
);
330 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr
&MI
,
332 int &SrcFrameIndex
) const {
333 // Check for MVC 0(Length,FI1),0(FI2)
334 const MachineFrameInfo
&MFI
= MI
.getParent()->getParent()->getFrameInfo();
335 if (MI
.getOpcode() != SystemZ::MVC
|| !MI
.getOperand(0).isFI() ||
336 MI
.getOperand(1).getImm() != 0 || !MI
.getOperand(3).isFI() ||
337 MI
.getOperand(4).getImm() != 0)
340 // Check that Length covers the full slots.
341 int64_t Length
= MI
.getOperand(2).getImm();
342 unsigned FI1
= MI
.getOperand(0).getIndex();
343 unsigned FI2
= MI
.getOperand(3).getIndex();
344 if (MFI
.getObjectSize(FI1
) != Length
||
345 MFI
.getObjectSize(FI2
) != Length
)
348 DestFrameIndex
= FI1
;
353 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock
&MBB
,
354 MachineBasicBlock
*&TBB
,
355 MachineBasicBlock
*&FBB
,
356 SmallVectorImpl
<MachineOperand
> &Cond
,
357 bool AllowModify
) const {
358 // Most of the code and comments here are boilerplate.
360 // Start from the bottom of the block and work up, examining the
361 // terminator instructions.
362 MachineBasicBlock::iterator I
= MBB
.end();
363 while (I
!= MBB
.begin()) {
365 if (I
->isDebugInstr())
368 // Working from the bottom, when we see a non-terminator instruction, we're
370 if (!isUnpredicatedTerminator(*I
))
373 // A terminator that isn't a branch can't easily be handled by this
378 // Can't handle indirect branches.
379 SystemZII::Branch
Branch(getBranchInfo(*I
));
380 if (!Branch
.hasMBBTarget())
383 // Punt on compound branches.
384 if (Branch
.Type
!= SystemZII::BranchNormal
)
387 if (Branch
.CCMask
== SystemZ::CCMASK_ANY
) {
388 // Handle unconditional branches.
390 TBB
= Branch
.getMBBTarget();
394 // If the block has any instructions after a JMP, delete them.
395 while (std::next(I
) != MBB
.end())
396 std::next(I
)->eraseFromParent();
401 // Delete the JMP if it's equivalent to a fall-through.
402 if (MBB
.isLayoutSuccessor(Branch
.getMBBTarget())) {
404 I
->eraseFromParent();
409 // TBB is used to indicate the unconditinal destination.
410 TBB
= Branch
.getMBBTarget();
414 // Working from the bottom, handle the first conditional branch.
416 // FIXME: add X86-style branch swap
418 TBB
= Branch
.getMBBTarget();
419 Cond
.push_back(MachineOperand::CreateImm(Branch
.CCValid
));
420 Cond
.push_back(MachineOperand::CreateImm(Branch
.CCMask
));
424 // Handle subsequent conditional branches.
425 assert(Cond
.size() == 2 && TBB
&& "Should have seen a conditional branch");
427 // Only handle the case where all conditional branches branch to the same
429 if (TBB
!= Branch
.getMBBTarget())
432 // If the conditions are the same, we can leave them alone.
433 unsigned OldCCValid
= Cond
[0].getImm();
434 unsigned OldCCMask
= Cond
[1].getImm();
435 if (OldCCValid
== Branch
.CCValid
&& OldCCMask
== Branch
.CCMask
)
438 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
445 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock
&MBB
,
446 int *BytesRemoved
) const {
447 assert(!BytesRemoved
&& "code size not handled");
449 // Most of the code and comments here are boilerplate.
450 MachineBasicBlock::iterator I
= MBB
.end();
453 while (I
!= MBB
.begin()) {
455 if (I
->isDebugInstr())
459 if (!getBranchInfo(*I
).hasMBBTarget())
461 // Remove the branch.
462 I
->eraseFromParent();
470 bool SystemZInstrInfo::
471 reverseBranchCondition(SmallVectorImpl
<MachineOperand
> &Cond
) const {
472 assert(Cond
.size() == 2 && "Invalid condition");
473 Cond
[1].setImm(Cond
[1].getImm() ^ Cond
[0].getImm());
477 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock
&MBB
,
478 MachineBasicBlock
*TBB
,
479 MachineBasicBlock
*FBB
,
480 ArrayRef
<MachineOperand
> Cond
,
482 int *BytesAdded
) const {
483 // In this function we output 32-bit branches, which should always
484 // have enough range. They can be shortened and relaxed by later code
485 // in the pipeline, if desired.
487 // Shouldn't be a fall through.
488 assert(TBB
&& "insertBranch must not be told to insert a fallthrough");
489 assert((Cond
.size() == 2 || Cond
.size() == 0) &&
490 "SystemZ branch conditions have one component!");
491 assert(!BytesAdded
&& "code size not handled");
494 // Unconditional branch?
495 assert(!FBB
&& "Unconditional branch with multiple successors!");
496 BuildMI(&MBB
, DL
, get(SystemZ::J
)).addMBB(TBB
);
500 // Conditional branch.
502 unsigned CCValid
= Cond
[0].getImm();
503 unsigned CCMask
= Cond
[1].getImm();
504 BuildMI(&MBB
, DL
, get(SystemZ::BRC
))
505 .addImm(CCValid
).addImm(CCMask
).addMBB(TBB
);
509 // Two-way Conditional branch. Insert the second branch.
510 BuildMI(&MBB
, DL
, get(SystemZ::J
)).addMBB(FBB
);
516 bool SystemZInstrInfo::analyzeCompare(const MachineInstr
&MI
, unsigned &SrcReg
,
517 unsigned &SrcReg2
, int &Mask
,
519 assert(MI
.isCompare() && "Caller should have checked for a comparison");
521 if (MI
.getNumExplicitOperands() == 2 && MI
.getOperand(0).isReg() &&
522 MI
.getOperand(1).isImm()) {
523 SrcReg
= MI
.getOperand(0).getReg();
525 Value
= MI
.getOperand(1).getImm();
533 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock
&MBB
,
534 ArrayRef
<MachineOperand
> Pred
,
535 unsigned TrueReg
, unsigned FalseReg
,
536 int &CondCycles
, int &TrueCycles
,
537 int &FalseCycles
) const {
538 // Not all subtargets have LOCR instructions.
539 if (!STI
.hasLoadStoreOnCond())
541 if (Pred
.size() != 2)
544 // Check register classes.
545 const MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
546 const TargetRegisterClass
*RC
=
547 RI
.getCommonSubClass(MRI
.getRegClass(TrueReg
), MRI
.getRegClass(FalseReg
));
551 // We have LOCR instructions for 32 and 64 bit general purpose registers.
552 if ((STI
.hasLoadStoreOnCond2() &&
553 SystemZ::GRX32BitRegClass
.hasSubClassEq(RC
)) ||
554 SystemZ::GR32BitRegClass
.hasSubClassEq(RC
) ||
555 SystemZ::GR64BitRegClass
.hasSubClassEq(RC
)) {
562 // Can't do anything else.
566 void SystemZInstrInfo::insertSelect(MachineBasicBlock
&MBB
,
567 MachineBasicBlock::iterator I
,
568 const DebugLoc
&DL
, unsigned DstReg
,
569 ArrayRef
<MachineOperand
> Pred
,
571 unsigned FalseReg
) const {
572 MachineRegisterInfo
&MRI
= MBB
.getParent()->getRegInfo();
573 const TargetRegisterClass
*RC
= MRI
.getRegClass(DstReg
);
575 assert(Pred
.size() == 2 && "Invalid condition");
576 unsigned CCValid
= Pred
[0].getImm();
577 unsigned CCMask
= Pred
[1].getImm();
580 if (SystemZ::GRX32BitRegClass
.hasSubClassEq(RC
)) {
581 if (STI
.hasMiscellaneousExtensions3())
582 Opc
= SystemZ::SELRMux
;
583 else if (STI
.hasLoadStoreOnCond2())
584 Opc
= SystemZ::LOCRMux
;
587 MRI
.constrainRegClass(DstReg
, &SystemZ::GR32BitRegClass
);
588 Register TReg
= MRI
.createVirtualRegister(&SystemZ::GR32BitRegClass
);
589 Register FReg
= MRI
.createVirtualRegister(&SystemZ::GR32BitRegClass
);
590 BuildMI(MBB
, I
, DL
, get(TargetOpcode::COPY
), TReg
).addReg(TrueReg
);
591 BuildMI(MBB
, I
, DL
, get(TargetOpcode::COPY
), FReg
).addReg(FalseReg
);
595 } else if (SystemZ::GR64BitRegClass
.hasSubClassEq(RC
)) {
596 if (STI
.hasMiscellaneousExtensions3())
597 Opc
= SystemZ::SELGR
;
599 Opc
= SystemZ::LOCGR
;
601 llvm_unreachable("Invalid register class");
603 BuildMI(MBB
, I
, DL
, get(Opc
), DstReg
)
604 .addReg(FalseReg
).addReg(TrueReg
)
605 .addImm(CCValid
).addImm(CCMask
);
608 bool SystemZInstrInfo::FoldImmediate(MachineInstr
&UseMI
, MachineInstr
&DefMI
,
610 MachineRegisterInfo
*MRI
) const {
611 unsigned DefOpc
= DefMI
.getOpcode();
612 if (DefOpc
!= SystemZ::LHIMux
&& DefOpc
!= SystemZ::LHI
&&
613 DefOpc
!= SystemZ::LGHI
)
615 if (DefMI
.getOperand(0).getReg() != Reg
)
617 int32_t ImmVal
= (int32_t)DefMI
.getOperand(1).getImm();
619 unsigned UseOpc
= UseMI
.getOpcode();
625 case SystemZ::SELRMux
:
628 case SystemZ::LOCRMux
:
629 if (!STI
.hasLoadStoreOnCond2())
631 NewUseOpc
= SystemZ::LOCHIMux
;
632 if (UseMI
.getOperand(2).getReg() == Reg
)
634 else if (UseMI
.getOperand(1).getReg() == Reg
)
635 UseIdx
= 2, CommuteIdx
= 1;
643 if (!STI
.hasLoadStoreOnCond2())
645 NewUseOpc
= SystemZ::LOCGHI
;
646 if (UseMI
.getOperand(2).getReg() == Reg
)
648 else if (UseMI
.getOperand(1).getReg() == Reg
)
649 UseIdx
= 2, CommuteIdx
= 1;
657 if (CommuteIdx
!= -1)
658 if (!commuteInstruction(UseMI
, false, CommuteIdx
, UseIdx
))
661 bool DeleteDef
= MRI
->hasOneNonDBGUse(Reg
);
662 UseMI
.setDesc(get(NewUseOpc
));
664 UseMI
.tieOperands(0, 1);
665 UseMI
.getOperand(UseIdx
).ChangeToImmediate(ImmVal
);
667 DefMI
.eraseFromParent();
672 bool SystemZInstrInfo::isPredicable(const MachineInstr
&MI
) const {
673 unsigned Opcode
= MI
.getOpcode();
674 if (Opcode
== SystemZ::Return
||
675 Opcode
== SystemZ::Trap
||
676 Opcode
== SystemZ::CallJG
||
677 Opcode
== SystemZ::CallBR
)
682 bool SystemZInstrInfo::
683 isProfitableToIfCvt(MachineBasicBlock
&MBB
,
684 unsigned NumCycles
, unsigned ExtraPredCycles
,
685 BranchProbability Probability
) const {
686 // Avoid using conditional returns at the end of a loop (since then
687 // we'd need to emit an unconditional branch to the beginning anyway,
688 // making the loop body longer). This doesn't apply for low-probability
689 // loops (eg. compare-and-swap retry), so just decide based on branch
690 // probability instead of looping structure.
691 // However, since Compare and Trap instructions cost the same as a regular
692 // Compare instruction, we should allow the if conversion to convert this
693 // into a Conditional Compare regardless of the branch probability.
694 if (MBB
.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap
&&
695 MBB
.succ_empty() && Probability
< BranchProbability(1, 8))
697 // For now only convert single instructions.
698 return NumCycles
== 1;
701 bool SystemZInstrInfo::
702 isProfitableToIfCvt(MachineBasicBlock
&TMBB
,
703 unsigned NumCyclesT
, unsigned ExtraPredCyclesT
,
704 MachineBasicBlock
&FMBB
,
705 unsigned NumCyclesF
, unsigned ExtraPredCyclesF
,
706 BranchProbability Probability
) const {
707 // For now avoid converting mutually-exclusive cases.
711 bool SystemZInstrInfo::
712 isProfitableToDupForIfCvt(MachineBasicBlock
&MBB
, unsigned NumCycles
,
713 BranchProbability Probability
) const {
714 // For now only duplicate single instructions.
715 return NumCycles
== 1;
718 bool SystemZInstrInfo::PredicateInstruction(
719 MachineInstr
&MI
, ArrayRef
<MachineOperand
> Pred
) const {
720 assert(Pred
.size() == 2 && "Invalid condition");
721 unsigned CCValid
= Pred
[0].getImm();
722 unsigned CCMask
= Pred
[1].getImm();
723 assert(CCMask
> 0 && CCMask
< 15 && "Invalid predicate");
724 unsigned Opcode
= MI
.getOpcode();
725 if (Opcode
== SystemZ::Trap
) {
726 MI
.setDesc(get(SystemZ::CondTrap
));
727 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
728 .addImm(CCValid
).addImm(CCMask
)
729 .addReg(SystemZ::CC
, RegState::Implicit
);
732 if (Opcode
== SystemZ::Return
) {
733 MI
.setDesc(get(SystemZ::CondReturn
));
734 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
735 .addImm(CCValid
).addImm(CCMask
)
736 .addReg(SystemZ::CC
, RegState::Implicit
);
739 if (Opcode
== SystemZ::CallJG
) {
740 MachineOperand FirstOp
= MI
.getOperand(0);
741 const uint32_t *RegMask
= MI
.getOperand(1).getRegMask();
744 MI
.setDesc(get(SystemZ::CallBRCL
));
745 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
750 .addReg(SystemZ::CC
, RegState::Implicit
);
753 if (Opcode
== SystemZ::CallBR
) {
754 const uint32_t *RegMask
= MI
.getOperand(0).getRegMask();
756 MI
.setDesc(get(SystemZ::CallBCR
));
757 MachineInstrBuilder(*MI
.getParent()->getParent(), MI
)
758 .addImm(CCValid
).addImm(CCMask
)
760 .addReg(SystemZ::CC
, RegState::Implicit
);
766 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock
&MBB
,
767 MachineBasicBlock::iterator MBBI
,
768 const DebugLoc
&DL
, unsigned DestReg
,
769 unsigned SrcReg
, bool KillSrc
) const {
770 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
771 // super register in case one of the subregs is undefined.
772 // This handles ADDR128 too.
773 if (SystemZ::GR128BitRegClass
.contains(DestReg
, SrcReg
)) {
774 copyPhysReg(MBB
, MBBI
, DL
, RI
.getSubReg(DestReg
, SystemZ::subreg_h64
),
775 RI
.getSubReg(SrcReg
, SystemZ::subreg_h64
), KillSrc
);
776 MachineInstrBuilder(*MBB
.getParent(), std::prev(MBBI
))
777 .addReg(SrcReg
, RegState::Implicit
);
778 copyPhysReg(MBB
, MBBI
, DL
, RI
.getSubReg(DestReg
, SystemZ::subreg_l64
),
779 RI
.getSubReg(SrcReg
, SystemZ::subreg_l64
), KillSrc
);
780 MachineInstrBuilder(*MBB
.getParent(), std::prev(MBBI
))
781 .addReg(SrcReg
, (getKillRegState(KillSrc
) | RegState::Implicit
));
785 if (SystemZ::GRX32BitRegClass
.contains(DestReg
, SrcReg
)) {
786 emitGRX32Move(MBB
, MBBI
, DL
, DestReg
, SrcReg
, SystemZ::LR
, 32, KillSrc
,
791 // Move 128-bit floating-point values between VR128 and FP128.
792 if (SystemZ::VR128BitRegClass
.contains(DestReg
) &&
793 SystemZ::FP128BitRegClass
.contains(SrcReg
)) {
795 RI
.getMatchingSuperReg(RI
.getSubReg(SrcReg
, SystemZ::subreg_h64
),
796 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
798 RI
.getMatchingSuperReg(RI
.getSubReg(SrcReg
, SystemZ::subreg_l64
),
799 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
801 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::VMRHG
), DestReg
)
802 .addReg(SrcRegHi
, getKillRegState(KillSrc
))
803 .addReg(SrcRegLo
, getKillRegState(KillSrc
));
806 if (SystemZ::FP128BitRegClass
.contains(DestReg
) &&
807 SystemZ::VR128BitRegClass
.contains(SrcReg
)) {
809 RI
.getMatchingSuperReg(RI
.getSubReg(DestReg
, SystemZ::subreg_h64
),
810 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
812 RI
.getMatchingSuperReg(RI
.getSubReg(DestReg
, SystemZ::subreg_l64
),
813 SystemZ::subreg_h64
, &SystemZ::VR128BitRegClass
);
815 if (DestRegHi
!= SrcReg
)
816 copyPhysReg(MBB
, MBBI
, DL
, DestRegHi
, SrcReg
, false);
817 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::VREPG
), DestRegLo
)
818 .addReg(SrcReg
, getKillRegState(KillSrc
)).addImm(1);
822 // Move CC value from/to a GR32.
823 if (SrcReg
== SystemZ::CC
) {
824 auto MIB
= BuildMI(MBB
, MBBI
, DL
, get(SystemZ::IPM
), DestReg
);
826 const MachineFunction
*MF
= MBB
.getParent();
827 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
828 MIB
->addRegisterKilled(SrcReg
, TRI
);
832 if (DestReg
== SystemZ::CC
) {
833 BuildMI(MBB
, MBBI
, DL
, get(SystemZ::TMLH
))
834 .addReg(SrcReg
, getKillRegState(KillSrc
))
835 .addImm(3 << (SystemZ::IPM_CC
- 16));
839 // Everything else needs only one instruction.
841 if (SystemZ::GR64BitRegClass
.contains(DestReg
, SrcReg
))
842 Opcode
= SystemZ::LGR
;
843 else if (SystemZ::FP32BitRegClass
.contains(DestReg
, SrcReg
))
844 // For z13 we prefer LDR over LER to avoid partial register dependencies.
845 Opcode
= STI
.hasVector() ? SystemZ::LDR32
: SystemZ::LER
;
846 else if (SystemZ::FP64BitRegClass
.contains(DestReg
, SrcReg
))
847 Opcode
= SystemZ::LDR
;
848 else if (SystemZ::FP128BitRegClass
.contains(DestReg
, SrcReg
))
849 Opcode
= SystemZ::LXR
;
850 else if (SystemZ::VR32BitRegClass
.contains(DestReg
, SrcReg
))
851 Opcode
= SystemZ::VLR32
;
852 else if (SystemZ::VR64BitRegClass
.contains(DestReg
, SrcReg
))
853 Opcode
= SystemZ::VLR64
;
854 else if (SystemZ::VR128BitRegClass
.contains(DestReg
, SrcReg
))
855 Opcode
= SystemZ::VLR
;
856 else if (SystemZ::AR32BitRegClass
.contains(DestReg
, SrcReg
))
857 Opcode
= SystemZ::CPYA
;
858 else if (SystemZ::AR32BitRegClass
.contains(DestReg
) &&
859 SystemZ::GR32BitRegClass
.contains(SrcReg
))
860 Opcode
= SystemZ::SAR
;
861 else if (SystemZ::GR32BitRegClass
.contains(DestReg
) &&
862 SystemZ::AR32BitRegClass
.contains(SrcReg
))
863 Opcode
= SystemZ::EAR
;
865 llvm_unreachable("Impossible reg-to-reg copy");
867 BuildMI(MBB
, MBBI
, DL
, get(Opcode
), DestReg
)
868 .addReg(SrcReg
, getKillRegState(KillSrc
));
871 void SystemZInstrInfo::storeRegToStackSlot(
872 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned SrcReg
,
873 bool isKill
, int FrameIdx
, const TargetRegisterClass
*RC
,
874 const TargetRegisterInfo
*TRI
) const {
875 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
877 // Callers may expect a single instruction, so keep 128-bit moves
878 // together for now and lower them after register allocation.
879 unsigned LoadOpcode
, StoreOpcode
;
880 getLoadStoreOpcodes(RC
, LoadOpcode
, StoreOpcode
);
881 addFrameReference(BuildMI(MBB
, MBBI
, DL
, get(StoreOpcode
))
882 .addReg(SrcReg
, getKillRegState(isKill
)),
886 void SystemZInstrInfo::loadRegFromStackSlot(
887 MachineBasicBlock
&MBB
, MachineBasicBlock::iterator MBBI
, unsigned DestReg
,
888 int FrameIdx
, const TargetRegisterClass
*RC
,
889 const TargetRegisterInfo
*TRI
) const {
890 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
892 // Callers may expect a single instruction, so keep 128-bit moves
893 // together for now and lower them after register allocation.
894 unsigned LoadOpcode
, StoreOpcode
;
895 getLoadStoreOpcodes(RC
, LoadOpcode
, StoreOpcode
);
896 addFrameReference(BuildMI(MBB
, MBBI
, DL
, get(LoadOpcode
), DestReg
),
900 // Return true if MI is a simple load or store with a 12-bit displacement
901 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
902 static bool isSimpleBD12Move(const MachineInstr
*MI
, unsigned Flag
) {
903 const MCInstrDesc
&MCID
= MI
->getDesc();
904 return ((MCID
.TSFlags
& Flag
) &&
905 isUInt
<12>(MI
->getOperand(2).getImm()) &&
906 MI
->getOperand(3).getReg() == 0);
913 LogicOp(unsigned regSize
, unsigned immLSB
, unsigned immSize
)
914 : RegSize(regSize
), ImmLSB(immLSB
), ImmSize(immSize
) {}
916 explicit operator bool() const { return RegSize
; }
918 unsigned RegSize
= 0;
920 unsigned ImmSize
= 0;
923 } // end anonymous namespace
925 static LogicOp
interpretAndImmediate(unsigned Opcode
) {
927 case SystemZ::NILMux
: return LogicOp(32, 0, 16);
928 case SystemZ::NIHMux
: return LogicOp(32, 16, 16);
929 case SystemZ::NILL64
: return LogicOp(64, 0, 16);
930 case SystemZ::NILH64
: return LogicOp(64, 16, 16);
931 case SystemZ::NIHL64
: return LogicOp(64, 32, 16);
932 case SystemZ::NIHH64
: return LogicOp(64, 48, 16);
933 case SystemZ::NIFMux
: return LogicOp(32, 0, 32);
934 case SystemZ::NILF64
: return LogicOp(64, 0, 32);
935 case SystemZ::NIHF64
: return LogicOp(64, 32, 32);
936 default: return LogicOp();
940 static void transferDeadCC(MachineInstr
*OldMI
, MachineInstr
*NewMI
) {
941 if (OldMI
->registerDefIsDead(SystemZ::CC
)) {
942 MachineOperand
*CCDef
= NewMI
->findRegisterDefOperand(SystemZ::CC
);
943 if (CCDef
!= nullptr)
944 CCDef
->setIsDead(true);
948 MachineInstr
*SystemZInstrInfo::convertToThreeAddress(
949 MachineFunction::iterator
&MFI
, MachineInstr
&MI
, LiveVariables
*LV
) const {
950 MachineBasicBlock
*MBB
= MI
.getParent();
952 // Try to convert an AND into an RISBG-type instruction.
953 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
954 if (LogicOp And
= interpretAndImmediate(MI
.getOpcode())) {
955 uint64_t Imm
= MI
.getOperand(2).getImm() << And
.ImmLSB
;
956 // AND IMMEDIATE leaves the other bits of the register unchanged.
957 Imm
|= allOnes(And
.RegSize
) & ~(allOnes(And
.ImmSize
) << And
.ImmLSB
);
959 if (isRxSBGMask(Imm
, And
.RegSize
, Start
, End
)) {
961 if (And
.RegSize
== 64) {
962 NewOpcode
= SystemZ::RISBG
;
963 // Prefer RISBGN if available, since it does not clobber CC.
964 if (STI
.hasMiscellaneousExtensions())
965 NewOpcode
= SystemZ::RISBGN
;
967 NewOpcode
= SystemZ::RISBMux
;
971 MachineOperand
&Dest
= MI
.getOperand(0);
972 MachineOperand
&Src
= MI
.getOperand(1);
973 MachineInstrBuilder MIB
=
974 BuildMI(*MBB
, MI
, MI
.getDebugLoc(), get(NewOpcode
))
977 .addReg(Src
.getReg(), getKillRegState(Src
.isKill()),
983 unsigned NumOps
= MI
.getNumOperands();
984 for (unsigned I
= 1; I
< NumOps
; ++I
) {
985 MachineOperand
&Op
= MI
.getOperand(I
);
986 if (Op
.isReg() && Op
.isKill())
987 LV
->replaceKillInstruction(Op
.getReg(), MI
, *MIB
);
990 transferDeadCC(&MI
, MIB
);
997 MachineInstr
*SystemZInstrInfo::foldMemoryOperandImpl(
998 MachineFunction
&MF
, MachineInstr
&MI
, ArrayRef
<unsigned> Ops
,
999 MachineBasicBlock::iterator InsertPt
, int FrameIndex
,
1000 LiveIntervals
*LIS
, VirtRegMap
*VRM
) const {
1001 const TargetRegisterInfo
*TRI
= MF
.getSubtarget().getRegisterInfo();
1002 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1003 unsigned Size
= MFI
.getObjectSize(FrameIndex
);
1004 unsigned Opcode
= MI
.getOpcode();
1006 if (Ops
.size() == 2 && Ops
[0] == 0 && Ops
[1] == 1) {
1007 if (LIS
!= nullptr && (Opcode
== SystemZ::LA
|| Opcode
== SystemZ::LAY
) &&
1008 isInt
<8>(MI
.getOperand(2).getImm()) && !MI
.getOperand(3).getReg()) {
1010 // Check CC liveness, since new instruction introduces a dead
1012 MCRegUnitIterator
CCUnit(SystemZ::CC
, TRI
);
1013 LiveRange
&CCLiveRange
= LIS
->getRegUnit(*CCUnit
);
1015 assert(!CCUnit
.isValid() && "CC only has one reg unit.");
1017 LIS
->getSlotIndexes()->getInstructionIndex(MI
).getRegSlot();
1018 if (!CCLiveRange
.liveAt(MISlot
)) {
1019 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1020 MachineInstr
*BuiltMI
= BuildMI(*InsertPt
->getParent(), InsertPt
,
1021 MI
.getDebugLoc(), get(SystemZ::AGSI
))
1022 .addFrameIndex(FrameIndex
)
1024 .addImm(MI
.getOperand(2).getImm());
1025 BuiltMI
->findRegisterDefOperand(SystemZ::CC
)->setIsDead(true);
1026 CCLiveRange
.createDeadDef(MISlot
, LIS
->getVNInfoAllocator());
1033 // All other cases require a single operand.
1034 if (Ops
.size() != 1)
1037 unsigned OpNum
= Ops
[0];
1039 TRI
->getRegSizeInBits(*MF
.getRegInfo()
1040 .getRegClass(MI
.getOperand(OpNum
).getReg())) &&
1041 "Invalid size combination");
1043 if ((Opcode
== SystemZ::AHI
|| Opcode
== SystemZ::AGHI
) && OpNum
== 0 &&
1044 isInt
<8>(MI
.getOperand(2).getImm())) {
1045 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1046 Opcode
= (Opcode
== SystemZ::AHI
? SystemZ::ASI
: SystemZ::AGSI
);
1047 MachineInstr
*BuiltMI
=
1048 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1049 .addFrameIndex(FrameIndex
)
1051 .addImm(MI
.getOperand(2).getImm());
1052 transferDeadCC(&MI
, BuiltMI
);
1056 if ((Opcode
== SystemZ::ALFI
&& OpNum
== 0 &&
1057 isInt
<8>((int32_t)MI
.getOperand(2).getImm())) ||
1058 (Opcode
== SystemZ::ALGFI
&& OpNum
== 0 &&
1059 isInt
<8>((int64_t)MI
.getOperand(2).getImm()))) {
1060 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1061 Opcode
= (Opcode
== SystemZ::ALFI
? SystemZ::ALSI
: SystemZ::ALGSI
);
1062 MachineInstr
*BuiltMI
=
1063 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1064 .addFrameIndex(FrameIndex
)
1066 .addImm((int8_t)MI
.getOperand(2).getImm());
1067 transferDeadCC(&MI
, BuiltMI
);
1071 if ((Opcode
== SystemZ::SLFI
&& OpNum
== 0 &&
1072 isInt
<8>((int32_t)-MI
.getOperand(2).getImm())) ||
1073 (Opcode
== SystemZ::SLGFI
&& OpNum
== 0 &&
1074 isInt
<8>((int64_t)-MI
.getOperand(2).getImm()))) {
1075 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1076 Opcode
= (Opcode
== SystemZ::SLFI
? SystemZ::ALSI
: SystemZ::ALGSI
);
1077 MachineInstr
*BuiltMI
=
1078 BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(), get(Opcode
))
1079 .addFrameIndex(FrameIndex
)
1081 .addImm((int8_t)-MI
.getOperand(2).getImm());
1082 transferDeadCC(&MI
, BuiltMI
);
1086 if (Opcode
== SystemZ::LGDR
|| Opcode
== SystemZ::LDGR
) {
1087 bool Op0IsGPR
= (Opcode
== SystemZ::LGDR
);
1088 bool Op1IsGPR
= (Opcode
== SystemZ::LDGR
);
1089 // If we're spilling the destination of an LDGR or LGDR, store the
1090 // source register instead.
1092 unsigned StoreOpcode
= Op1IsGPR
? SystemZ::STG
: SystemZ::STD
;
1093 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1095 .add(MI
.getOperand(1))
1096 .addFrameIndex(FrameIndex
)
1100 // If we're spilling the source of an LDGR or LGDR, load the
1101 // destination register instead.
1103 unsigned LoadOpcode
= Op0IsGPR
? SystemZ::LG
: SystemZ::LD
;
1104 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1106 .add(MI
.getOperand(0))
1107 .addFrameIndex(FrameIndex
)
1113 // Look for cases where the source of a simple store or the destination
1114 // of a simple load is being spilled. Try to use MVC instead.
1116 // Although MVC is in practice a fast choice in these cases, it is still
1117 // logically a bytewise copy. This means that we cannot use it if the
1118 // load or store is volatile. We also wouldn't be able to use MVC if
1119 // the two memories partially overlap, but that case cannot occur here,
1120 // because we know that one of the memories is a full frame index.
1122 // For performance reasons, we also want to avoid using MVC if the addresses
1123 // might be equal. We don't worry about that case here, because spill slot
1124 // coloring happens later, and because we have special code to remove
1125 // MVCs that turn out to be redundant.
1126 if (OpNum
== 0 && MI
.hasOneMemOperand()) {
1127 MachineMemOperand
*MMO
= *MI
.memoperands_begin();
1128 if (MMO
->getSize() == Size
&& !MMO
->isVolatile() && !MMO
->isAtomic()) {
1129 // Handle conversion of loads.
1130 if (isSimpleBD12Move(&MI
, SystemZII::SimpleBDXLoad
)) {
1131 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1133 .addFrameIndex(FrameIndex
)
1136 .add(MI
.getOperand(1))
1137 .addImm(MI
.getOperand(2).getImm())
1138 .addMemOperand(MMO
);
1140 // Handle conversion of stores.
1141 if (isSimpleBD12Move(&MI
, SystemZII::SimpleBDXStore
)) {
1142 return BuildMI(*InsertPt
->getParent(), InsertPt
, MI
.getDebugLoc(),
1144 .add(MI
.getOperand(1))
1145 .addImm(MI
.getOperand(2).getImm())
1147 .addFrameIndex(FrameIndex
)
1149 .addMemOperand(MMO
);
1154 // If the spilled operand is the final one or the instruction is
1155 // commutable, try to change <INSN>R into <INSN>.
1156 unsigned NumOps
= MI
.getNumExplicitOperands();
1157 int MemOpcode
= SystemZ::getMemOpcode(Opcode
);
1159 // See if this is a 3-address instruction that is convertible to 2-address
1160 // and suitable for folding below. Only try this with virtual registers
1161 // and a provided VRM (during regalloc).
1162 bool NeedsCommute
= false;
1163 if (SystemZ::getTwoOperandOpcode(Opcode
) != -1 && MemOpcode
!= -1) {
1167 assert(NumOps
== 3 && "Expected two source registers.");
1168 Register DstReg
= MI
.getOperand(0).getReg();
1170 (Register::isVirtualRegister(DstReg
) ? VRM
->getPhys(DstReg
) : DstReg
);
1171 Register SrcReg
= (OpNum
== 2 ? MI
.getOperand(1).getReg()
1172 : ((OpNum
== 1 && MI
.isCommutable())
1173 ? MI
.getOperand(2).getReg()
1175 if (DstPhys
&& !SystemZ::GRH32BitRegClass
.contains(DstPhys
) && SrcReg
&&
1176 Register::isVirtualRegister(SrcReg
) &&
1177 DstPhys
== VRM
->getPhys(SrcReg
))
1178 NeedsCommute
= (OpNum
== 1);
1184 if (MemOpcode
>= 0) {
1185 if ((OpNum
== NumOps
- 1) || NeedsCommute
) {
1186 const MCInstrDesc
&MemDesc
= get(MemOpcode
);
1187 uint64_t AccessBytes
= SystemZII::getAccessSize(MemDesc
.TSFlags
);
1188 assert(AccessBytes
!= 0 && "Size of access should be known");
1189 assert(AccessBytes
<= Size
&& "Access outside the frame index");
1190 uint64_t Offset
= Size
- AccessBytes
;
1191 MachineInstrBuilder MIB
= BuildMI(*InsertPt
->getParent(), InsertPt
,
1192 MI
.getDebugLoc(), get(MemOpcode
));
1193 MIB
.add(MI
.getOperand(0));
1195 MIB
.add(MI
.getOperand(2));
1197 for (unsigned I
= 1; I
< OpNum
; ++I
)
1198 MIB
.add(MI
.getOperand(I
));
1199 MIB
.addFrameIndex(FrameIndex
).addImm(Offset
);
1200 if (MemDesc
.TSFlags
& SystemZII::HasIndex
)
1202 transferDeadCC(&MI
, MIB
);
1210 MachineInstr
*SystemZInstrInfo::foldMemoryOperandImpl(
1211 MachineFunction
&MF
, MachineInstr
&MI
, ArrayRef
<unsigned> Ops
,
1212 MachineBasicBlock::iterator InsertPt
, MachineInstr
&LoadMI
,
1213 LiveIntervals
*LIS
) const {
1217 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr
&MI
) const {
1218 switch (MI
.getOpcode()) {
1220 splitMove(MI
, SystemZ::LG
);
1223 case SystemZ::ST128
:
1224 splitMove(MI
, SystemZ::STG
);
1228 splitMove(MI
, SystemZ::LD
);
1232 splitMove(MI
, SystemZ::STD
);
1235 case SystemZ::LBMux
:
1236 expandRXYPseudo(MI
, SystemZ::LB
, SystemZ::LBH
);
1239 case SystemZ::LHMux
:
1240 expandRXYPseudo(MI
, SystemZ::LH
, SystemZ::LHH
);
1243 case SystemZ::LLCRMux
:
1244 expandZExtPseudo(MI
, SystemZ::LLCR
, 8);
1247 case SystemZ::LLHRMux
:
1248 expandZExtPseudo(MI
, SystemZ::LLHR
, 16);
1251 case SystemZ::LLCMux
:
1252 expandRXYPseudo(MI
, SystemZ::LLC
, SystemZ::LLCH
);
1255 case SystemZ::LLHMux
:
1256 expandRXYPseudo(MI
, SystemZ::LLH
, SystemZ::LLHH
);
1260 expandRXYPseudo(MI
, SystemZ::L
, SystemZ::LFH
);
1263 case SystemZ::LOCMux
:
1264 expandLOCPseudo(MI
, SystemZ::LOC
, SystemZ::LOCFH
);
1267 case SystemZ::LOCHIMux
:
1268 expandLOCPseudo(MI
, SystemZ::LOCHI
, SystemZ::LOCHHI
);
1271 case SystemZ::STCMux
:
1272 expandRXYPseudo(MI
, SystemZ::STC
, SystemZ::STCH
);
1275 case SystemZ::STHMux
:
1276 expandRXYPseudo(MI
, SystemZ::STH
, SystemZ::STHH
);
1279 case SystemZ::STMux
:
1280 expandRXYPseudo(MI
, SystemZ::ST
, SystemZ::STFH
);
1283 case SystemZ::STOCMux
:
1284 expandLOCPseudo(MI
, SystemZ::STOC
, SystemZ::STOCFH
);
1287 case SystemZ::LHIMux
:
1288 expandRIPseudo(MI
, SystemZ::LHI
, SystemZ::IIHF
, true);
1291 case SystemZ::IIFMux
:
1292 expandRIPseudo(MI
, SystemZ::IILF
, SystemZ::IIHF
, false);
1295 case SystemZ::IILMux
:
1296 expandRIPseudo(MI
, SystemZ::IILL
, SystemZ::IIHL
, false);
1299 case SystemZ::IIHMux
:
1300 expandRIPseudo(MI
, SystemZ::IILH
, SystemZ::IIHH
, false);
1303 case SystemZ::NIFMux
:
1304 expandRIPseudo(MI
, SystemZ::NILF
, SystemZ::NIHF
, false);
1307 case SystemZ::NILMux
:
1308 expandRIPseudo(MI
, SystemZ::NILL
, SystemZ::NIHL
, false);
1311 case SystemZ::NIHMux
:
1312 expandRIPseudo(MI
, SystemZ::NILH
, SystemZ::NIHH
, false);
1315 case SystemZ::OIFMux
:
1316 expandRIPseudo(MI
, SystemZ::OILF
, SystemZ::OIHF
, false);
1319 case SystemZ::OILMux
:
1320 expandRIPseudo(MI
, SystemZ::OILL
, SystemZ::OIHL
, false);
1323 case SystemZ::OIHMux
:
1324 expandRIPseudo(MI
, SystemZ::OILH
, SystemZ::OIHH
, false);
1327 case SystemZ::XIFMux
:
1328 expandRIPseudo(MI
, SystemZ::XILF
, SystemZ::XIHF
, false);
1331 case SystemZ::TMLMux
:
1332 expandRIPseudo(MI
, SystemZ::TMLL
, SystemZ::TMHL
, false);
1335 case SystemZ::TMHMux
:
1336 expandRIPseudo(MI
, SystemZ::TMLH
, SystemZ::TMHH
, false);
1339 case SystemZ::AHIMux
:
1340 expandRIPseudo(MI
, SystemZ::AHI
, SystemZ::AIH
, false);
1343 case SystemZ::AHIMuxK
:
1344 expandRIEPseudo(MI
, SystemZ::AHI
, SystemZ::AHIK
, SystemZ::AIH
);
1347 case SystemZ::AFIMux
:
1348 expandRIPseudo(MI
, SystemZ::AFI
, SystemZ::AIH
, false);
1351 case SystemZ::CHIMux
:
1352 expandRIPseudo(MI
, SystemZ::CHI
, SystemZ::CIH
, false);
1355 case SystemZ::CFIMux
:
1356 expandRIPseudo(MI
, SystemZ::CFI
, SystemZ::CIH
, false);
1359 case SystemZ::CLFIMux
:
1360 expandRIPseudo(MI
, SystemZ::CLFI
, SystemZ::CLIH
, false);
1364 expandRXYPseudo(MI
, SystemZ::C
, SystemZ::CHF
);
1367 case SystemZ::CLMux
:
1368 expandRXYPseudo(MI
, SystemZ::CL
, SystemZ::CLHF
);
1371 case SystemZ::RISBMux
: {
1372 bool DestIsHigh
= SystemZ::isHighReg(MI
.getOperand(0).getReg());
1373 bool SrcIsHigh
= SystemZ::isHighReg(MI
.getOperand(2).getReg());
1374 if (SrcIsHigh
== DestIsHigh
)
1375 MI
.setDesc(get(DestIsHigh
? SystemZ::RISBHH
: SystemZ::RISBLL
));
1377 MI
.setDesc(get(DestIsHigh
? SystemZ::RISBHL
: SystemZ::RISBLH
));
1378 MI
.getOperand(5).setImm(MI
.getOperand(5).getImm() ^ 32);
1383 case SystemZ::ADJDYNALLOC
:
1384 splitAdjDynAlloc(MI
);
1387 case TargetOpcode::LOAD_STACK_GUARD
:
1388 expandLoadStackGuard(&MI
);
1396 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr
&MI
) const {
1397 if (MI
.isInlineAsm()) {
1398 const MachineFunction
*MF
= MI
.getParent()->getParent();
1399 const char *AsmStr
= MI
.getOperand(0).getSymbolName();
1400 return getInlineAsmLength(AsmStr
, *MF
->getTarget().getMCAsmInfo());
1402 return MI
.getDesc().getSize();
1406 SystemZInstrInfo::getBranchInfo(const MachineInstr
&MI
) const {
1407 switch (MI
.getOpcode()) {
1412 return SystemZII::Branch(SystemZII::BranchNormal
, SystemZ::CCMASK_ANY
,
1413 SystemZ::CCMASK_ANY
, &MI
.getOperand(0));
1417 return SystemZII::Branch(SystemZII::BranchNormal
, MI
.getOperand(0).getImm(),
1418 MI
.getOperand(1).getImm(), &MI
.getOperand(2));
1421 case SystemZ::BRCTH
:
1422 return SystemZII::Branch(SystemZII::BranchCT
, SystemZ::CCMASK_ICMP
,
1423 SystemZ::CCMASK_CMP_NE
, &MI
.getOperand(2));
1425 case SystemZ::BRCTG
:
1426 return SystemZII::Branch(SystemZII::BranchCTG
, SystemZ::CCMASK_ICMP
,
1427 SystemZ::CCMASK_CMP_NE
, &MI
.getOperand(2));
1431 return SystemZII::Branch(SystemZII::BranchC
, SystemZ::CCMASK_ICMP
,
1432 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1436 return SystemZII::Branch(SystemZII::BranchCL
, SystemZ::CCMASK_ICMP
,
1437 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1441 return SystemZII::Branch(SystemZII::BranchCG
, SystemZ::CCMASK_ICMP
,
1442 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1444 case SystemZ::CLGIJ
:
1445 case SystemZ::CLGRJ
:
1446 return SystemZII::Branch(SystemZII::BranchCLG
, SystemZ::CCMASK_ICMP
,
1447 MI
.getOperand(2).getImm(), &MI
.getOperand(3));
1449 case SystemZ::INLINEASM_BR
:
1450 // Don't try to analyze asm goto, so pass nullptr as branch target argument.
1451 return SystemZII::Branch(SystemZII::AsmGoto
, 0, 0, nullptr);
1454 llvm_unreachable("Unrecognized branch opcode");
1458 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass
*RC
,
1459 unsigned &LoadOpcode
,
1460 unsigned &StoreOpcode
) const {
1461 if (RC
== &SystemZ::GR32BitRegClass
|| RC
== &SystemZ::ADDR32BitRegClass
) {
1462 LoadOpcode
= SystemZ::L
;
1463 StoreOpcode
= SystemZ::ST
;
1464 } else if (RC
== &SystemZ::GRH32BitRegClass
) {
1465 LoadOpcode
= SystemZ::LFH
;
1466 StoreOpcode
= SystemZ::STFH
;
1467 } else if (RC
== &SystemZ::GRX32BitRegClass
) {
1468 LoadOpcode
= SystemZ::LMux
;
1469 StoreOpcode
= SystemZ::STMux
;
1470 } else if (RC
== &SystemZ::GR64BitRegClass
||
1471 RC
== &SystemZ::ADDR64BitRegClass
) {
1472 LoadOpcode
= SystemZ::LG
;
1473 StoreOpcode
= SystemZ::STG
;
1474 } else if (RC
== &SystemZ::GR128BitRegClass
||
1475 RC
== &SystemZ::ADDR128BitRegClass
) {
1476 LoadOpcode
= SystemZ::L128
;
1477 StoreOpcode
= SystemZ::ST128
;
1478 } else if (RC
== &SystemZ::FP32BitRegClass
) {
1479 LoadOpcode
= SystemZ::LE
;
1480 StoreOpcode
= SystemZ::STE
;
1481 } else if (RC
== &SystemZ::FP64BitRegClass
) {
1482 LoadOpcode
= SystemZ::LD
;
1483 StoreOpcode
= SystemZ::STD
;
1484 } else if (RC
== &SystemZ::FP128BitRegClass
) {
1485 LoadOpcode
= SystemZ::LX
;
1486 StoreOpcode
= SystemZ::STX
;
1487 } else if (RC
== &SystemZ::VR32BitRegClass
) {
1488 LoadOpcode
= SystemZ::VL32
;
1489 StoreOpcode
= SystemZ::VST32
;
1490 } else if (RC
== &SystemZ::VR64BitRegClass
) {
1491 LoadOpcode
= SystemZ::VL64
;
1492 StoreOpcode
= SystemZ::VST64
;
1493 } else if (RC
== &SystemZ::VF128BitRegClass
||
1494 RC
== &SystemZ::VR128BitRegClass
) {
1495 LoadOpcode
= SystemZ::VL
;
1496 StoreOpcode
= SystemZ::VST
;
1498 llvm_unreachable("Unsupported regclass to load or store");
1501 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode
,
1502 int64_t Offset
) const {
1503 const MCInstrDesc
&MCID
= get(Opcode
);
1504 int64_t Offset2
= (MCID
.TSFlags
& SystemZII::Is128Bit
? Offset
+ 8 : Offset
);
1505 if (isUInt
<12>(Offset
) && isUInt
<12>(Offset2
)) {
1506 // Get the instruction to use for unsigned 12-bit displacements.
1507 int Disp12Opcode
= SystemZ::getDisp12Opcode(Opcode
);
1508 if (Disp12Opcode
>= 0)
1509 return Disp12Opcode
;
1511 // All address-related instructions can use unsigned 12-bit
1515 if (isInt
<20>(Offset
) && isInt
<20>(Offset2
)) {
1516 // Get the instruction to use for signed 20-bit displacements.
1517 int Disp20Opcode
= SystemZ::getDisp20Opcode(Opcode
);
1518 if (Disp20Opcode
>= 0)
1519 return Disp20Opcode
;
1521 // Check whether Opcode allows signed 20-bit displacements.
1522 if (MCID
.TSFlags
& SystemZII::Has20BitOffset
)
1528 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode
) const {
1530 case SystemZ::L
: return SystemZ::LT
;
1531 case SystemZ::LY
: return SystemZ::LT
;
1532 case SystemZ::LG
: return SystemZ::LTG
;
1533 case SystemZ::LGF
: return SystemZ::LTGF
;
1534 case SystemZ::LR
: return SystemZ::LTR
;
1535 case SystemZ::LGFR
: return SystemZ::LTGFR
;
1536 case SystemZ::LGR
: return SystemZ::LTGR
;
1537 case SystemZ::LER
: return SystemZ::LTEBR
;
1538 case SystemZ::LDR
: return SystemZ::LTDBR
;
1539 case SystemZ::LXR
: return SystemZ::LTXBR
;
1540 case SystemZ::LCDFR
: return SystemZ::LCDBR
;
1541 case SystemZ::LPDFR
: return SystemZ::LPDBR
;
1542 case SystemZ::LNDFR
: return SystemZ::LNDBR
;
1543 case SystemZ::LCDFR_32
: return SystemZ::LCEBR
;
1544 case SystemZ::LPDFR_32
: return SystemZ::LPEBR
;
1545 case SystemZ::LNDFR_32
: return SystemZ::LNEBR
;
1546 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1547 // actually use the condition code, we may turn it back into RISGB.
1548 // Note that RISBG is not really a "load-and-test" instruction,
1549 // but sets the same condition code values, so is OK to use here.
1550 case SystemZ::RISBGN
: return SystemZ::RISBG
;
1555 // Return true if Mask matches the regexp 0*1+0*, given that zero masks
1556 // have already been filtered out. Store the first set bit in LSB and
1557 // the number of set bits in Length if so.
1558 static bool isStringOfOnes(uint64_t Mask
, unsigned &LSB
, unsigned &Length
) {
1559 unsigned First
= findFirstSet(Mask
);
1560 uint64_t Top
= (Mask
>> First
) + 1;
1561 if ((Top
& -Top
) == Top
) {
1563 Length
= findFirstSet(Top
);
1569 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask
, unsigned BitSize
,
1570 unsigned &Start
, unsigned &End
) const {
1571 // Reject trivial all-zero masks.
1572 Mask
&= allOnes(BitSize
);
1576 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1577 // the msb and End specifies the index of the lsb.
1578 unsigned LSB
, Length
;
1579 if (isStringOfOnes(Mask
, LSB
, Length
)) {
1580 Start
= 63 - (LSB
+ Length
- 1);
1585 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1586 // of the low 1s and End specifies the lsb of the high 1s.
1587 if (isStringOfOnes(Mask
^ allOnes(BitSize
), LSB
, Length
)) {
1588 assert(LSB
> 0 && "Bottom bit must be set");
1589 assert(LSB
+ Length
< BitSize
&& "Top bit must be set");
1590 Start
= 63 - (LSB
- 1);
1591 End
= 63 - (LSB
+ Length
);
1598 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode
,
1599 SystemZII::FusedCompareType Type
,
1600 const MachineInstr
*MI
) const {
1604 if (!(MI
&& isInt
<8>(MI
->getOperand(1).getImm())))
1608 case SystemZ::CLGFI
:
1609 if (!(MI
&& isUInt
<8>(MI
->getOperand(1).getImm())))
1614 if (!STI
.hasMiscellaneousExtensions())
1616 if (!(MI
&& MI
->getOperand(3).getReg() == 0))
1621 case SystemZII::CompareAndBranch
:
1624 return SystemZ::CRJ
;
1626 return SystemZ::CGRJ
;
1628 return SystemZ::CIJ
;
1630 return SystemZ::CGIJ
;
1632 return SystemZ::CLRJ
;
1634 return SystemZ::CLGRJ
;
1636 return SystemZ::CLIJ
;
1637 case SystemZ::CLGFI
:
1638 return SystemZ::CLGIJ
;
1642 case SystemZII::CompareAndReturn
:
1645 return SystemZ::CRBReturn
;
1647 return SystemZ::CGRBReturn
;
1649 return SystemZ::CIBReturn
;
1651 return SystemZ::CGIBReturn
;
1653 return SystemZ::CLRBReturn
;
1655 return SystemZ::CLGRBReturn
;
1657 return SystemZ::CLIBReturn
;
1658 case SystemZ::CLGFI
:
1659 return SystemZ::CLGIBReturn
;
1663 case SystemZII::CompareAndSibcall
:
1666 return SystemZ::CRBCall
;
1668 return SystemZ::CGRBCall
;
1670 return SystemZ::CIBCall
;
1672 return SystemZ::CGIBCall
;
1674 return SystemZ::CLRBCall
;
1676 return SystemZ::CLGRBCall
;
1678 return SystemZ::CLIBCall
;
1679 case SystemZ::CLGFI
:
1680 return SystemZ::CLGIBCall
;
1684 case SystemZII::CompareAndTrap
:
1687 return SystemZ::CRT
;
1689 return SystemZ::CGRT
;
1691 return SystemZ::CIT
;
1693 return SystemZ::CGIT
;
1695 return SystemZ::CLRT
;
1697 return SystemZ::CLGRT
;
1699 return SystemZ::CLFIT
;
1700 case SystemZ::CLGFI
:
1701 return SystemZ::CLGIT
;
1703 return SystemZ::CLT
;
1705 return SystemZ::CLGT
;
1713 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode
) const {
1714 if (!STI
.hasLoadAndTrap())
1719 return SystemZ::LAT
;
1721 return SystemZ::LGAT
;
1723 return SystemZ::LFHAT
;
1725 return SystemZ::LLGFAT
;
1727 return SystemZ::LLGTAT
;
1732 void SystemZInstrInfo::loadImmediate(MachineBasicBlock
&MBB
,
1733 MachineBasicBlock::iterator MBBI
,
1734 unsigned Reg
, uint64_t Value
) const {
1735 DebugLoc DL
= MBBI
!= MBB
.end() ? MBBI
->getDebugLoc() : DebugLoc();
1737 if (isInt
<16>(Value
))
1738 Opcode
= SystemZ::LGHI
;
1739 else if (SystemZ::isImmLL(Value
))
1740 Opcode
= SystemZ::LLILL
;
1741 else if (SystemZ::isImmLH(Value
)) {
1742 Opcode
= SystemZ::LLILH
;
1745 assert(isInt
<32>(Value
) && "Huge values not handled yet");
1746 Opcode
= SystemZ::LGFI
;
1748 BuildMI(MBB
, MBBI
, DL
, get(Opcode
), Reg
).addImm(Value
);
1751 bool SystemZInstrInfo::
1752 areMemAccessesTriviallyDisjoint(const MachineInstr
&MIa
,
1753 const MachineInstr
&MIb
) const {
1755 if (!MIa
.hasOneMemOperand() || !MIb
.hasOneMemOperand())
1758 // If mem-operands show that the same address Value is used by both
1759 // instructions, check for non-overlapping offsets and widths. Not
1760 // sure if a register based analysis would be an improvement...
1762 MachineMemOperand
*MMOa
= *MIa
.memoperands_begin();
1763 MachineMemOperand
*MMOb
= *MIb
.memoperands_begin();
1764 const Value
*VALa
= MMOa
->getValue();
1765 const Value
*VALb
= MMOb
->getValue();
1766 bool SameVal
= (VALa
&& VALb
&& (VALa
== VALb
));
1768 const PseudoSourceValue
*PSVa
= MMOa
->getPseudoValue();
1769 const PseudoSourceValue
*PSVb
= MMOb
->getPseudoValue();
1770 if (PSVa
&& PSVb
&& (PSVa
== PSVb
))
1774 int OffsetA
= MMOa
->getOffset(), OffsetB
= MMOb
->getOffset();
1775 int WidthA
= MMOa
->getSize(), WidthB
= MMOb
->getSize();
1776 int LowOffset
= OffsetA
< OffsetB
? OffsetA
: OffsetB
;
1777 int HighOffset
= OffsetA
< OffsetB
? OffsetB
: OffsetA
;
1778 int LowWidth
= (LowOffset
== OffsetA
) ? WidthA
: WidthB
;
1779 if (LowOffset
+ LowWidth
<= HighOffset
)