[AMDGPU] Check for immediate SrcC in mfma in AsmParser
[llvm-core.git] / lib / Target / SystemZ / SystemZInstrInfo.cpp
blobda71759cc3f36ac4ae2ee8c1b0ac70cc97b579a2
1 //===-- SystemZInstrInfo.cpp - SystemZ instruction information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the SystemZ implementation of the TargetInstrInfo class.
11 //===----------------------------------------------------------------------===//
13 #include "SystemZInstrInfo.h"
14 #include "MCTargetDesc/SystemZMCTargetDesc.h"
15 #include "SystemZ.h"
16 #include "SystemZInstrBuilder.h"
17 #include "SystemZSubtarget.h"
18 #include "llvm/ADT/Statistic.h"
19 #include "llvm/CodeGen/LiveInterval.h"
20 #include "llvm/CodeGen/LiveIntervals.h"
21 #include "llvm/CodeGen/LiveVariables.h"
22 #include "llvm/CodeGen/MachineBasicBlock.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunction.h"
25 #include "llvm/CodeGen/MachineInstr.h"
26 #include "llvm/CodeGen/MachineMemOperand.h"
27 #include "llvm/CodeGen/MachineOperand.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/SlotIndexes.h"
30 #include "llvm/CodeGen/TargetInstrInfo.h"
31 #include "llvm/CodeGen/TargetSubtargetInfo.h"
32 #include "llvm/MC/MCInstrDesc.h"
33 #include "llvm/MC/MCRegisterInfo.h"
34 #include "llvm/Support/BranchProbability.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/Support/MathExtras.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include <cassert>
39 #include <cstdint>
40 #include <iterator>
42 using namespace llvm;
44 #define GET_INSTRINFO_CTOR_DTOR
45 #define GET_INSTRMAP_INFO
46 #include "SystemZGenInstrInfo.inc"
48 #define DEBUG_TYPE "systemz-II"
49 STATISTIC(LOCRMuxJumps, "Number of LOCRMux jump-sequences (lower is better)");
51 // Return a mask with Count low bits set.
52 static uint64_t allOnes(unsigned int Count) {
53 return Count == 0 ? 0 : (uint64_t(1) << (Count - 1) << 1) - 1;
56 // Reg should be a 32-bit GPR. Return true if it is a high register rather
57 // than a low register.
58 static bool isHighReg(unsigned int Reg) {
59 if (SystemZ::GRH32BitRegClass.contains(Reg))
60 return true;
61 assert(SystemZ::GR32BitRegClass.contains(Reg) && "Invalid GRX32");
62 return false;
65 // Pin the vtable to this file.
66 void SystemZInstrInfo::anchor() {}
68 SystemZInstrInfo::SystemZInstrInfo(SystemZSubtarget &sti)
69 : SystemZGenInstrInfo(SystemZ::ADJCALLSTACKDOWN, SystemZ::ADJCALLSTACKUP),
70 RI(), STI(sti) {
73 // MI is a 128-bit load or store. Split it into two 64-bit loads or stores,
74 // each having the opcode given by NewOpcode.
75 void SystemZInstrInfo::splitMove(MachineBasicBlock::iterator MI,
76 unsigned NewOpcode) const {
77 MachineBasicBlock *MBB = MI->getParent();
78 MachineFunction &MF = *MBB->getParent();
80 // Get two load or store instructions. Use the original instruction for one
81 // of them (arbitrarily the second here) and create a clone for the other.
82 MachineInstr *EarlierMI = MF.CloneMachineInstr(&*MI);
83 MBB->insert(MI, EarlierMI);
85 // Set up the two 64-bit registers and remember super reg and its flags.
86 MachineOperand &HighRegOp = EarlierMI->getOperand(0);
87 MachineOperand &LowRegOp = MI->getOperand(0);
88 Register Reg128 = LowRegOp.getReg();
89 unsigned Reg128Killed = getKillRegState(LowRegOp.isKill());
90 unsigned Reg128Undef = getUndefRegState(LowRegOp.isUndef());
91 HighRegOp.setReg(RI.getSubReg(HighRegOp.getReg(), SystemZ::subreg_h64));
92 LowRegOp.setReg(RI.getSubReg(LowRegOp.getReg(), SystemZ::subreg_l64));
94 if (MI->mayStore()) {
95 // Add implicit uses of the super register in case one of the subregs is
96 // undefined. We could track liveness and skip storing an undefined
97 // subreg, but this is hopefully rare (discovered with llvm-stress).
98 // If Reg128 was killed, set kill flag on MI.
99 unsigned Reg128UndefImpl = (Reg128Undef | RegState::Implicit);
100 MachineInstrBuilder(MF, EarlierMI).addReg(Reg128, Reg128UndefImpl);
101 MachineInstrBuilder(MF, MI).addReg(Reg128, (Reg128UndefImpl | Reg128Killed));
104 // The address in the first (high) instruction is already correct.
105 // Adjust the offset in the second (low) instruction.
106 MachineOperand &HighOffsetOp = EarlierMI->getOperand(2);
107 MachineOperand &LowOffsetOp = MI->getOperand(2);
108 LowOffsetOp.setImm(LowOffsetOp.getImm() + 8);
110 // Clear the kill flags on the registers in the first instruction.
111 if (EarlierMI->getOperand(0).isReg() && EarlierMI->getOperand(0).isUse())
112 EarlierMI->getOperand(0).setIsKill(false);
113 EarlierMI->getOperand(1).setIsKill(false);
114 EarlierMI->getOperand(3).setIsKill(false);
116 // Set the opcodes.
117 unsigned HighOpcode = getOpcodeForOffset(NewOpcode, HighOffsetOp.getImm());
118 unsigned LowOpcode = getOpcodeForOffset(NewOpcode, LowOffsetOp.getImm());
119 assert(HighOpcode && LowOpcode && "Both offsets should be in range");
121 EarlierMI->setDesc(get(HighOpcode));
122 MI->setDesc(get(LowOpcode));
125 // Split ADJDYNALLOC instruction MI.
126 void SystemZInstrInfo::splitAdjDynAlloc(MachineBasicBlock::iterator MI) const {
127 MachineBasicBlock *MBB = MI->getParent();
128 MachineFunction &MF = *MBB->getParent();
129 MachineFrameInfo &MFFrame = MF.getFrameInfo();
130 MachineOperand &OffsetMO = MI->getOperand(2);
132 uint64_t Offset = (MFFrame.getMaxCallFrameSize() +
133 SystemZMC::CallFrameSize +
134 OffsetMO.getImm());
135 unsigned NewOpcode = getOpcodeForOffset(SystemZ::LA, Offset);
136 assert(NewOpcode && "No support for huge argument lists yet");
137 MI->setDesc(get(NewOpcode));
138 OffsetMO.setImm(Offset);
141 // MI is an RI-style pseudo instruction. Replace it with LowOpcode
142 // if the first operand is a low GR32 and HighOpcode if the first operand
143 // is a high GR32. ConvertHigh is true if LowOpcode takes a signed operand
144 // and HighOpcode takes an unsigned 32-bit operand. In those cases,
145 // MI has the same kind of operand as LowOpcode, so needs to be converted
146 // if HighOpcode is used.
147 void SystemZInstrInfo::expandRIPseudo(MachineInstr &MI, unsigned LowOpcode,
148 unsigned HighOpcode,
149 bool ConvertHigh) const {
150 Register Reg = MI.getOperand(0).getReg();
151 bool IsHigh = isHighReg(Reg);
152 MI.setDesc(get(IsHigh ? HighOpcode : LowOpcode));
153 if (IsHigh && ConvertHigh)
154 MI.getOperand(1).setImm(uint32_t(MI.getOperand(1).getImm()));
157 // MI is a three-operand RIE-style pseudo instruction. Replace it with
158 // LowOpcodeK if the registers are both low GR32s, otherwise use a move
159 // followed by HighOpcode or LowOpcode, depending on whether the target
160 // is a high or low GR32.
161 void SystemZInstrInfo::expandRIEPseudo(MachineInstr &MI, unsigned LowOpcode,
162 unsigned LowOpcodeK,
163 unsigned HighOpcode) const {
164 Register DestReg = MI.getOperand(0).getReg();
165 Register SrcReg = MI.getOperand(1).getReg();
166 bool DestIsHigh = isHighReg(DestReg);
167 bool SrcIsHigh = isHighReg(SrcReg);
168 if (!DestIsHigh && !SrcIsHigh)
169 MI.setDesc(get(LowOpcodeK));
170 else {
171 if (DestReg != SrcReg) {
172 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, SrcReg,
173 SystemZ::LR, 32, MI.getOperand(1).isKill(),
174 MI.getOperand(1).isUndef());
175 MI.getOperand(1).setReg(DestReg);
177 MI.setDesc(get(DestIsHigh ? HighOpcode : LowOpcode));
178 MI.tieOperands(0, 1);
182 // MI is an RXY-style pseudo instruction. Replace it with LowOpcode
183 // if the first operand is a low GR32 and HighOpcode if the first operand
184 // is a high GR32.
185 void SystemZInstrInfo::expandRXYPseudo(MachineInstr &MI, unsigned LowOpcode,
186 unsigned HighOpcode) const {
187 Register Reg = MI.getOperand(0).getReg();
188 unsigned Opcode = getOpcodeForOffset(isHighReg(Reg) ? HighOpcode : LowOpcode,
189 MI.getOperand(2).getImm());
190 MI.setDesc(get(Opcode));
193 // MI is a load-on-condition pseudo instruction with a single register
194 // (source or destination) operand. Replace it with LowOpcode if the
195 // register is a low GR32 and HighOpcode if the register is a high GR32.
196 void SystemZInstrInfo::expandLOCPseudo(MachineInstr &MI, unsigned LowOpcode,
197 unsigned HighOpcode) const {
198 Register Reg = MI.getOperand(0).getReg();
199 unsigned Opcode = isHighReg(Reg) ? HighOpcode : LowOpcode;
200 MI.setDesc(get(Opcode));
203 // MI is a load-register-on-condition pseudo instruction. Replace it with
204 // LowOpcode if source and destination are both low GR32s and HighOpcode if
205 // source and destination are both high GR32s.
206 void SystemZInstrInfo::expandLOCRPseudo(MachineInstr &MI, unsigned LowOpcode,
207 unsigned HighOpcode) const {
208 Register DestReg = MI.getOperand(0).getReg();
209 Register SrcReg = MI.getOperand(2).getReg();
210 bool DestIsHigh = isHighReg(DestReg);
211 bool SrcIsHigh = isHighReg(SrcReg);
213 if (!DestIsHigh && !SrcIsHigh)
214 MI.setDesc(get(LowOpcode));
215 else if (DestIsHigh && SrcIsHigh)
216 MI.setDesc(get(HighOpcode));
217 else
218 LOCRMuxJumps++;
220 // If we were unable to implement the pseudo with a single instruction, we
221 // need to convert it back into a branch sequence. This cannot be done here
222 // since the caller of expandPostRAPseudo does not handle changes to the CFG
223 // correctly. This change is defered to the SystemZExpandPseudo pass.
226 // MI is a select pseudo instruction. Replace it with LowOpcode if source
227 // and destination are all low GR32s and HighOpcode if source and destination
228 // are all high GR32s. Otherwise, use the two-operand MixedOpcode.
229 void SystemZInstrInfo::expandSELRPseudo(MachineInstr &MI, unsigned LowOpcode,
230 unsigned HighOpcode,
231 unsigned MixedOpcode) const {
232 Register DestReg = MI.getOperand(0).getReg();
233 Register Src1Reg = MI.getOperand(1).getReg();
234 Register Src2Reg = MI.getOperand(2).getReg();
235 bool DestIsHigh = isHighReg(DestReg);
236 bool Src1IsHigh = isHighReg(Src1Reg);
237 bool Src2IsHigh = isHighReg(Src2Reg);
239 // If sources and destination aren't all high or all low, we may be able to
240 // simplify the operation by moving one of the sources to the destination
241 // first. But only if this doesn't clobber the other source.
242 if (DestReg != Src1Reg && DestReg != Src2Reg) {
243 if (DestIsHigh != Src1IsHigh) {
244 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, Src1Reg,
245 SystemZ::LR, 32, MI.getOperand(1).isKill(),
246 MI.getOperand(1).isUndef());
247 MI.getOperand(1).setReg(DestReg);
248 Src1Reg = DestReg;
249 Src1IsHigh = DestIsHigh;
250 } else if (DestIsHigh != Src2IsHigh) {
251 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(), DestReg, Src2Reg,
252 SystemZ::LR, 32, MI.getOperand(2).isKill(),
253 MI.getOperand(2).isUndef());
254 MI.getOperand(2).setReg(DestReg);
255 Src2Reg = DestReg;
256 Src2IsHigh = DestIsHigh;
260 // If the destination (now) matches one source, prefer this to be first.
261 if (DestReg != Src1Reg && DestReg == Src2Reg) {
262 commuteInstruction(MI, false, 1, 2);
263 std::swap(Src1Reg, Src2Reg);
264 std::swap(Src1IsHigh, Src2IsHigh);
267 if (!DestIsHigh && !Src1IsHigh && !Src2IsHigh)
268 MI.setDesc(get(LowOpcode));
269 else if (DestIsHigh && Src1IsHigh && Src2IsHigh)
270 MI.setDesc(get(HighOpcode));
271 else {
272 // Given the simplifcation above, we must already have a two-operand case.
273 assert (DestReg == Src1Reg);
274 MI.setDesc(get(MixedOpcode));
275 MI.tieOperands(0, 1);
276 LOCRMuxJumps++;
279 // If we were unable to implement the pseudo with a single instruction, we
280 // need to convert it back into a branch sequence. This cannot be done here
281 // since the caller of expandPostRAPseudo does not handle changes to the CFG
282 // correctly. This change is defered to the SystemZExpandPseudo pass.
285 // MI is an RR-style pseudo instruction that zero-extends the low Size bits
286 // of one GRX32 into another. Replace it with LowOpcode if both operands
287 // are low registers, otherwise use RISB[LH]G.
288 void SystemZInstrInfo::expandZExtPseudo(MachineInstr &MI, unsigned LowOpcode,
289 unsigned Size) const {
290 MachineInstrBuilder MIB =
291 emitGRX32Move(*MI.getParent(), MI, MI.getDebugLoc(),
292 MI.getOperand(0).getReg(), MI.getOperand(1).getReg(), LowOpcode,
293 Size, MI.getOperand(1).isKill(), MI.getOperand(1).isUndef());
295 // Keep the remaining operands as-is.
296 for (unsigned I = 2; I < MI.getNumOperands(); ++I)
297 MIB.add(MI.getOperand(I));
299 MI.eraseFromParent();
302 void SystemZInstrInfo::expandLoadStackGuard(MachineInstr *MI) const {
303 MachineBasicBlock *MBB = MI->getParent();
304 MachineFunction &MF = *MBB->getParent();
305 const Register Reg64 = MI->getOperand(0).getReg();
306 const Register Reg32 = RI.getSubReg(Reg64, SystemZ::subreg_l32);
308 // EAR can only load the low subregister so us a shift for %a0 to produce
309 // the GR containing %a0 and %a1.
311 // ear <reg>, %a0
312 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
313 .addReg(SystemZ::A0)
314 .addReg(Reg64, RegState::ImplicitDefine);
316 // sllg <reg>, <reg>, 32
317 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::SLLG), Reg64)
318 .addReg(Reg64)
319 .addReg(0)
320 .addImm(32);
322 // ear <reg>, %a1
323 BuildMI(*MBB, MI, MI->getDebugLoc(), get(SystemZ::EAR), Reg32)
324 .addReg(SystemZ::A1);
326 // lg <reg>, 40(<reg>)
327 MI->setDesc(get(SystemZ::LG));
328 MachineInstrBuilder(MF, MI).addReg(Reg64).addImm(40).addReg(0);
331 // Emit a zero-extending move from 32-bit GPR SrcReg to 32-bit GPR
332 // DestReg before MBBI in MBB. Use LowLowOpcode when both DestReg and SrcReg
333 // are low registers, otherwise use RISB[LH]G. Size is the number of bits
334 // taken from the low end of SrcReg (8 for LLCR, 16 for LLHR and 32 for LR).
335 // KillSrc is true if this move is the last use of SrcReg.
336 MachineInstrBuilder
337 SystemZInstrInfo::emitGRX32Move(MachineBasicBlock &MBB,
338 MachineBasicBlock::iterator MBBI,
339 const DebugLoc &DL, unsigned DestReg,
340 unsigned SrcReg, unsigned LowLowOpcode,
341 unsigned Size, bool KillSrc,
342 bool UndefSrc) const {
343 unsigned Opcode;
344 bool DestIsHigh = isHighReg(DestReg);
345 bool SrcIsHigh = isHighReg(SrcReg);
346 if (DestIsHigh && SrcIsHigh)
347 Opcode = SystemZ::RISBHH;
348 else if (DestIsHigh && !SrcIsHigh)
349 Opcode = SystemZ::RISBHL;
350 else if (!DestIsHigh && SrcIsHigh)
351 Opcode = SystemZ::RISBLH;
352 else {
353 return BuildMI(MBB, MBBI, DL, get(LowLowOpcode), DestReg)
354 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc));
356 unsigned Rotate = (DestIsHigh != SrcIsHigh ? 32 : 0);
357 return BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
358 .addReg(DestReg, RegState::Undef)
359 .addReg(SrcReg, getKillRegState(KillSrc) | getUndefRegState(UndefSrc))
360 .addImm(32 - Size).addImm(128 + 31).addImm(Rotate);
363 MachineInstr *SystemZInstrInfo::commuteInstructionImpl(MachineInstr &MI,
364 bool NewMI,
365 unsigned OpIdx1,
366 unsigned OpIdx2) const {
367 auto cloneIfNew = [NewMI](MachineInstr &MI) -> MachineInstr & {
368 if (NewMI)
369 return *MI.getParent()->getParent()->CloneMachineInstr(&MI);
370 return MI;
373 switch (MI.getOpcode()) {
374 case SystemZ::SELRMux:
375 case SystemZ::SELFHR:
376 case SystemZ::SELR:
377 case SystemZ::SELGR:
378 case SystemZ::LOCRMux:
379 case SystemZ::LOCFHR:
380 case SystemZ::LOCR:
381 case SystemZ::LOCGR: {
382 auto &WorkingMI = cloneIfNew(MI);
383 // Invert condition.
384 unsigned CCValid = WorkingMI.getOperand(3).getImm();
385 unsigned CCMask = WorkingMI.getOperand(4).getImm();
386 WorkingMI.getOperand(4).setImm(CCMask ^ CCValid);
387 return TargetInstrInfo::commuteInstructionImpl(WorkingMI, /*NewMI=*/false,
388 OpIdx1, OpIdx2);
390 default:
391 return TargetInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
395 // If MI is a simple load or store for a frame object, return the register
396 // it loads or stores and set FrameIndex to the index of the frame object.
397 // Return 0 otherwise.
399 // Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
400 static int isSimpleMove(const MachineInstr &MI, int &FrameIndex,
401 unsigned Flag) {
402 const MCInstrDesc &MCID = MI.getDesc();
403 if ((MCID.TSFlags & Flag) && MI.getOperand(1).isFI() &&
404 MI.getOperand(2).getImm() == 0 && MI.getOperand(3).getReg() == 0) {
405 FrameIndex = MI.getOperand(1).getIndex();
406 return MI.getOperand(0).getReg();
408 return 0;
411 unsigned SystemZInstrInfo::isLoadFromStackSlot(const MachineInstr &MI,
412 int &FrameIndex) const {
413 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXLoad);
416 unsigned SystemZInstrInfo::isStoreToStackSlot(const MachineInstr &MI,
417 int &FrameIndex) const {
418 return isSimpleMove(MI, FrameIndex, SystemZII::SimpleBDXStore);
421 bool SystemZInstrInfo::isStackSlotCopy(const MachineInstr &MI,
422 int &DestFrameIndex,
423 int &SrcFrameIndex) const {
424 // Check for MVC 0(Length,FI1),0(FI2)
425 const MachineFrameInfo &MFI = MI.getParent()->getParent()->getFrameInfo();
426 if (MI.getOpcode() != SystemZ::MVC || !MI.getOperand(0).isFI() ||
427 MI.getOperand(1).getImm() != 0 || !MI.getOperand(3).isFI() ||
428 MI.getOperand(4).getImm() != 0)
429 return false;
431 // Check that Length covers the full slots.
432 int64_t Length = MI.getOperand(2).getImm();
433 unsigned FI1 = MI.getOperand(0).getIndex();
434 unsigned FI2 = MI.getOperand(3).getIndex();
435 if (MFI.getObjectSize(FI1) != Length ||
436 MFI.getObjectSize(FI2) != Length)
437 return false;
439 DestFrameIndex = FI1;
440 SrcFrameIndex = FI2;
441 return true;
444 bool SystemZInstrInfo::analyzeBranch(MachineBasicBlock &MBB,
445 MachineBasicBlock *&TBB,
446 MachineBasicBlock *&FBB,
447 SmallVectorImpl<MachineOperand> &Cond,
448 bool AllowModify) const {
449 // Most of the code and comments here are boilerplate.
451 // Start from the bottom of the block and work up, examining the
452 // terminator instructions.
453 MachineBasicBlock::iterator I = MBB.end();
454 while (I != MBB.begin()) {
455 --I;
456 if (I->isDebugInstr())
457 continue;
459 // Working from the bottom, when we see a non-terminator instruction, we're
460 // done.
461 if (!isUnpredicatedTerminator(*I))
462 break;
464 // A terminator that isn't a branch can't easily be handled by this
465 // analysis.
466 if (!I->isBranch())
467 return true;
469 // Can't handle indirect branches.
470 SystemZII::Branch Branch(getBranchInfo(*I));
471 if (!Branch.Target->isMBB())
472 return true;
474 // Punt on compound branches.
475 if (Branch.Type != SystemZII::BranchNormal)
476 return true;
478 if (Branch.CCMask == SystemZ::CCMASK_ANY) {
479 // Handle unconditional branches.
480 if (!AllowModify) {
481 TBB = Branch.Target->getMBB();
482 continue;
485 // If the block has any instructions after a JMP, delete them.
486 while (std::next(I) != MBB.end())
487 std::next(I)->eraseFromParent();
489 Cond.clear();
490 FBB = nullptr;
492 // Delete the JMP if it's equivalent to a fall-through.
493 if (MBB.isLayoutSuccessor(Branch.Target->getMBB())) {
494 TBB = nullptr;
495 I->eraseFromParent();
496 I = MBB.end();
497 continue;
500 // TBB is used to indicate the unconditinal destination.
501 TBB = Branch.Target->getMBB();
502 continue;
505 // Working from the bottom, handle the first conditional branch.
506 if (Cond.empty()) {
507 // FIXME: add X86-style branch swap
508 FBB = TBB;
509 TBB = Branch.Target->getMBB();
510 Cond.push_back(MachineOperand::CreateImm(Branch.CCValid));
511 Cond.push_back(MachineOperand::CreateImm(Branch.CCMask));
512 continue;
515 // Handle subsequent conditional branches.
516 assert(Cond.size() == 2 && TBB && "Should have seen a conditional branch");
518 // Only handle the case where all conditional branches branch to the same
519 // destination.
520 if (TBB != Branch.Target->getMBB())
521 return true;
523 // If the conditions are the same, we can leave them alone.
524 unsigned OldCCValid = Cond[0].getImm();
525 unsigned OldCCMask = Cond[1].getImm();
526 if (OldCCValid == Branch.CCValid && OldCCMask == Branch.CCMask)
527 continue;
529 // FIXME: Try combining conditions like X86 does. Should be easy on Z!
530 return false;
533 return false;
536 unsigned SystemZInstrInfo::removeBranch(MachineBasicBlock &MBB,
537 int *BytesRemoved) const {
538 assert(!BytesRemoved && "code size not handled");
540 // Most of the code and comments here are boilerplate.
541 MachineBasicBlock::iterator I = MBB.end();
542 unsigned Count = 0;
544 while (I != MBB.begin()) {
545 --I;
546 if (I->isDebugInstr())
547 continue;
548 if (!I->isBranch())
549 break;
550 if (!getBranchInfo(*I).Target->isMBB())
551 break;
552 // Remove the branch.
553 I->eraseFromParent();
554 I = MBB.end();
555 ++Count;
558 return Count;
561 bool SystemZInstrInfo::
562 reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
563 assert(Cond.size() == 2 && "Invalid condition");
564 Cond[1].setImm(Cond[1].getImm() ^ Cond[0].getImm());
565 return false;
568 unsigned SystemZInstrInfo::insertBranch(MachineBasicBlock &MBB,
569 MachineBasicBlock *TBB,
570 MachineBasicBlock *FBB,
571 ArrayRef<MachineOperand> Cond,
572 const DebugLoc &DL,
573 int *BytesAdded) const {
574 // In this function we output 32-bit branches, which should always
575 // have enough range. They can be shortened and relaxed by later code
576 // in the pipeline, if desired.
578 // Shouldn't be a fall through.
579 assert(TBB && "insertBranch must not be told to insert a fallthrough");
580 assert((Cond.size() == 2 || Cond.size() == 0) &&
581 "SystemZ branch conditions have one component!");
582 assert(!BytesAdded && "code size not handled");
584 if (Cond.empty()) {
585 // Unconditional branch?
586 assert(!FBB && "Unconditional branch with multiple successors!");
587 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(TBB);
588 return 1;
591 // Conditional branch.
592 unsigned Count = 0;
593 unsigned CCValid = Cond[0].getImm();
594 unsigned CCMask = Cond[1].getImm();
595 BuildMI(&MBB, DL, get(SystemZ::BRC))
596 .addImm(CCValid).addImm(CCMask).addMBB(TBB);
597 ++Count;
599 if (FBB) {
600 // Two-way Conditional branch. Insert the second branch.
601 BuildMI(&MBB, DL, get(SystemZ::J)).addMBB(FBB);
602 ++Count;
604 return Count;
607 bool SystemZInstrInfo::analyzeCompare(const MachineInstr &MI, unsigned &SrcReg,
608 unsigned &SrcReg2, int &Mask,
609 int &Value) const {
610 assert(MI.isCompare() && "Caller should have checked for a comparison");
612 if (MI.getNumExplicitOperands() == 2 && MI.getOperand(0).isReg() &&
613 MI.getOperand(1).isImm()) {
614 SrcReg = MI.getOperand(0).getReg();
615 SrcReg2 = 0;
616 Value = MI.getOperand(1).getImm();
617 Mask = ~0;
618 return true;
621 return false;
624 bool SystemZInstrInfo::canInsertSelect(const MachineBasicBlock &MBB,
625 ArrayRef<MachineOperand> Pred,
626 unsigned TrueReg, unsigned FalseReg,
627 int &CondCycles, int &TrueCycles,
628 int &FalseCycles) const {
629 // Not all subtargets have LOCR instructions.
630 if (!STI.hasLoadStoreOnCond())
631 return false;
632 if (Pred.size() != 2)
633 return false;
635 // Check register classes.
636 const MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
637 const TargetRegisterClass *RC =
638 RI.getCommonSubClass(MRI.getRegClass(TrueReg), MRI.getRegClass(FalseReg));
639 if (!RC)
640 return false;
642 // We have LOCR instructions for 32 and 64 bit general purpose registers.
643 if ((STI.hasLoadStoreOnCond2() &&
644 SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) ||
645 SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
646 SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
647 CondCycles = 2;
648 TrueCycles = 2;
649 FalseCycles = 2;
650 return true;
653 // Can't do anything else.
654 return false;
657 void SystemZInstrInfo::insertSelect(MachineBasicBlock &MBB,
658 MachineBasicBlock::iterator I,
659 const DebugLoc &DL, unsigned DstReg,
660 ArrayRef<MachineOperand> Pred,
661 unsigned TrueReg,
662 unsigned FalseReg) const {
663 MachineRegisterInfo &MRI = MBB.getParent()->getRegInfo();
664 const TargetRegisterClass *RC = MRI.getRegClass(DstReg);
666 assert(Pred.size() == 2 && "Invalid condition");
667 unsigned CCValid = Pred[0].getImm();
668 unsigned CCMask = Pred[1].getImm();
670 unsigned Opc;
671 if (SystemZ::GRX32BitRegClass.hasSubClassEq(RC)) {
672 if (STI.hasMiscellaneousExtensions3())
673 Opc = SystemZ::SELRMux;
674 else if (STI.hasLoadStoreOnCond2())
675 Opc = SystemZ::LOCRMux;
676 else {
677 Opc = SystemZ::LOCR;
678 MRI.constrainRegClass(DstReg, &SystemZ::GR32BitRegClass);
679 Register TReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
680 Register FReg = MRI.createVirtualRegister(&SystemZ::GR32BitRegClass);
681 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), TReg).addReg(TrueReg);
682 BuildMI(MBB, I, DL, get(TargetOpcode::COPY), FReg).addReg(FalseReg);
683 TrueReg = TReg;
684 FalseReg = FReg;
686 } else if (SystemZ::GR64BitRegClass.hasSubClassEq(RC)) {
687 if (STI.hasMiscellaneousExtensions3())
688 Opc = SystemZ::SELGR;
689 else
690 Opc = SystemZ::LOCGR;
691 } else
692 llvm_unreachable("Invalid register class");
694 BuildMI(MBB, I, DL, get(Opc), DstReg)
695 .addReg(FalseReg).addReg(TrueReg)
696 .addImm(CCValid).addImm(CCMask);
699 bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
700 unsigned Reg,
701 MachineRegisterInfo *MRI) const {
702 unsigned DefOpc = DefMI.getOpcode();
703 if (DefOpc != SystemZ::LHIMux && DefOpc != SystemZ::LHI &&
704 DefOpc != SystemZ::LGHI)
705 return false;
706 if (DefMI.getOperand(0).getReg() != Reg)
707 return false;
708 int32_t ImmVal = (int32_t)DefMI.getOperand(1).getImm();
710 unsigned UseOpc = UseMI.getOpcode();
711 unsigned NewUseOpc;
712 unsigned UseIdx;
713 int CommuteIdx = -1;
714 bool TieOps = false;
715 switch (UseOpc) {
716 case SystemZ::SELRMux:
717 TieOps = true;
718 LLVM_FALLTHROUGH;
719 case SystemZ::LOCRMux:
720 if (!STI.hasLoadStoreOnCond2())
721 return false;
722 NewUseOpc = SystemZ::LOCHIMux;
723 if (UseMI.getOperand(2).getReg() == Reg)
724 UseIdx = 2;
725 else if (UseMI.getOperand(1).getReg() == Reg)
726 UseIdx = 2, CommuteIdx = 1;
727 else
728 return false;
729 break;
730 case SystemZ::SELGR:
731 TieOps = true;
732 LLVM_FALLTHROUGH;
733 case SystemZ::LOCGR:
734 if (!STI.hasLoadStoreOnCond2())
735 return false;
736 NewUseOpc = SystemZ::LOCGHI;
737 if (UseMI.getOperand(2).getReg() == Reg)
738 UseIdx = 2;
739 else if (UseMI.getOperand(1).getReg() == Reg)
740 UseIdx = 2, CommuteIdx = 1;
741 else
742 return false;
743 break;
744 default:
745 return false;
748 if (CommuteIdx != -1)
749 if (!commuteInstruction(UseMI, false, CommuteIdx, UseIdx))
750 return false;
752 bool DeleteDef = MRI->hasOneNonDBGUse(Reg);
753 UseMI.setDesc(get(NewUseOpc));
754 if (TieOps)
755 UseMI.tieOperands(0, 1);
756 UseMI.getOperand(UseIdx).ChangeToImmediate(ImmVal);
757 if (DeleteDef)
758 DefMI.eraseFromParent();
760 return true;
763 bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
764 unsigned Opcode = MI.getOpcode();
765 if (Opcode == SystemZ::Return ||
766 Opcode == SystemZ::Trap ||
767 Opcode == SystemZ::CallJG ||
768 Opcode == SystemZ::CallBR)
769 return true;
770 return false;
773 bool SystemZInstrInfo::
774 isProfitableToIfCvt(MachineBasicBlock &MBB,
775 unsigned NumCycles, unsigned ExtraPredCycles,
776 BranchProbability Probability) const {
777 // Avoid using conditional returns at the end of a loop (since then
778 // we'd need to emit an unconditional branch to the beginning anyway,
779 // making the loop body longer). This doesn't apply for low-probability
780 // loops (eg. compare-and-swap retry), so just decide based on branch
781 // probability instead of looping structure.
782 // However, since Compare and Trap instructions cost the same as a regular
783 // Compare instruction, we should allow the if conversion to convert this
784 // into a Conditional Compare regardless of the branch probability.
785 if (MBB.getLastNonDebugInstr()->getOpcode() != SystemZ::Trap &&
786 MBB.succ_empty() && Probability < BranchProbability(1, 8))
787 return false;
788 // For now only convert single instructions.
789 return NumCycles == 1;
792 bool SystemZInstrInfo::
793 isProfitableToIfCvt(MachineBasicBlock &TMBB,
794 unsigned NumCyclesT, unsigned ExtraPredCyclesT,
795 MachineBasicBlock &FMBB,
796 unsigned NumCyclesF, unsigned ExtraPredCyclesF,
797 BranchProbability Probability) const {
798 // For now avoid converting mutually-exclusive cases.
799 return false;
802 bool SystemZInstrInfo::
803 isProfitableToDupForIfCvt(MachineBasicBlock &MBB, unsigned NumCycles,
804 BranchProbability Probability) const {
805 // For now only duplicate single instructions.
806 return NumCycles == 1;
809 bool SystemZInstrInfo::PredicateInstruction(
810 MachineInstr &MI, ArrayRef<MachineOperand> Pred) const {
811 assert(Pred.size() == 2 && "Invalid condition");
812 unsigned CCValid = Pred[0].getImm();
813 unsigned CCMask = Pred[1].getImm();
814 assert(CCMask > 0 && CCMask < 15 && "Invalid predicate");
815 unsigned Opcode = MI.getOpcode();
816 if (Opcode == SystemZ::Trap) {
817 MI.setDesc(get(SystemZ::CondTrap));
818 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
819 .addImm(CCValid).addImm(CCMask)
820 .addReg(SystemZ::CC, RegState::Implicit);
821 return true;
823 if (Opcode == SystemZ::Return) {
824 MI.setDesc(get(SystemZ::CondReturn));
825 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
826 .addImm(CCValid).addImm(CCMask)
827 .addReg(SystemZ::CC, RegState::Implicit);
828 return true;
830 if (Opcode == SystemZ::CallJG) {
831 MachineOperand FirstOp = MI.getOperand(0);
832 const uint32_t *RegMask = MI.getOperand(1).getRegMask();
833 MI.RemoveOperand(1);
834 MI.RemoveOperand(0);
835 MI.setDesc(get(SystemZ::CallBRCL));
836 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
837 .addImm(CCValid)
838 .addImm(CCMask)
839 .add(FirstOp)
840 .addRegMask(RegMask)
841 .addReg(SystemZ::CC, RegState::Implicit);
842 return true;
844 if (Opcode == SystemZ::CallBR) {
845 const uint32_t *RegMask = MI.getOperand(0).getRegMask();
846 MI.RemoveOperand(0);
847 MI.setDesc(get(SystemZ::CallBCR));
848 MachineInstrBuilder(*MI.getParent()->getParent(), MI)
849 .addImm(CCValid).addImm(CCMask)
850 .addRegMask(RegMask)
851 .addReg(SystemZ::CC, RegState::Implicit);
852 return true;
854 return false;
857 void SystemZInstrInfo::copyPhysReg(MachineBasicBlock &MBB,
858 MachineBasicBlock::iterator MBBI,
859 const DebugLoc &DL, unsigned DestReg,
860 unsigned SrcReg, bool KillSrc) const {
861 // Split 128-bit GPR moves into two 64-bit moves. Add implicit uses of the
862 // super register in case one of the subregs is undefined.
863 // This handles ADDR128 too.
864 if (SystemZ::GR128BitRegClass.contains(DestReg, SrcReg)) {
865 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_h64),
866 RI.getSubReg(SrcReg, SystemZ::subreg_h64), KillSrc);
867 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
868 .addReg(SrcReg, RegState::Implicit);
869 copyPhysReg(MBB, MBBI, DL, RI.getSubReg(DestReg, SystemZ::subreg_l64),
870 RI.getSubReg(SrcReg, SystemZ::subreg_l64), KillSrc);
871 MachineInstrBuilder(*MBB.getParent(), std::prev(MBBI))
872 .addReg(SrcReg, (getKillRegState(KillSrc) | RegState::Implicit));
873 return;
876 if (SystemZ::GRX32BitRegClass.contains(DestReg, SrcReg)) {
877 emitGRX32Move(MBB, MBBI, DL, DestReg, SrcReg, SystemZ::LR, 32, KillSrc,
878 false);
879 return;
882 // Move 128-bit floating-point values between VR128 and FP128.
883 if (SystemZ::VR128BitRegClass.contains(DestReg) &&
884 SystemZ::FP128BitRegClass.contains(SrcReg)) {
885 unsigned SrcRegHi =
886 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_h64),
887 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
888 unsigned SrcRegLo =
889 RI.getMatchingSuperReg(RI.getSubReg(SrcReg, SystemZ::subreg_l64),
890 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
892 BuildMI(MBB, MBBI, DL, get(SystemZ::VMRHG), DestReg)
893 .addReg(SrcRegHi, getKillRegState(KillSrc))
894 .addReg(SrcRegLo, getKillRegState(KillSrc));
895 return;
897 if (SystemZ::FP128BitRegClass.contains(DestReg) &&
898 SystemZ::VR128BitRegClass.contains(SrcReg)) {
899 unsigned DestRegHi =
900 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_h64),
901 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
902 unsigned DestRegLo =
903 RI.getMatchingSuperReg(RI.getSubReg(DestReg, SystemZ::subreg_l64),
904 SystemZ::subreg_h64, &SystemZ::VR128BitRegClass);
906 if (DestRegHi != SrcReg)
907 copyPhysReg(MBB, MBBI, DL, DestRegHi, SrcReg, false);
908 BuildMI(MBB, MBBI, DL, get(SystemZ::VREPG), DestRegLo)
909 .addReg(SrcReg, getKillRegState(KillSrc)).addImm(1);
910 return;
913 // Move CC value from/to a GR32.
914 if (SrcReg == SystemZ::CC) {
915 auto MIB = BuildMI(MBB, MBBI, DL, get(SystemZ::IPM), DestReg);
916 if (KillSrc) {
917 const MachineFunction *MF = MBB.getParent();
918 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
919 MIB->addRegisterKilled(SrcReg, TRI);
921 return;
923 if (DestReg == SystemZ::CC) {
924 BuildMI(MBB, MBBI, DL, get(SystemZ::TMLH))
925 .addReg(SrcReg, getKillRegState(KillSrc))
926 .addImm(3 << (SystemZ::IPM_CC - 16));
927 return;
930 // Everything else needs only one instruction.
931 unsigned Opcode;
932 if (SystemZ::GR64BitRegClass.contains(DestReg, SrcReg))
933 Opcode = SystemZ::LGR;
934 else if (SystemZ::FP32BitRegClass.contains(DestReg, SrcReg))
935 // For z13 we prefer LDR over LER to avoid partial register dependencies.
936 Opcode = STI.hasVector() ? SystemZ::LDR32 : SystemZ::LER;
937 else if (SystemZ::FP64BitRegClass.contains(DestReg, SrcReg))
938 Opcode = SystemZ::LDR;
939 else if (SystemZ::FP128BitRegClass.contains(DestReg, SrcReg))
940 Opcode = SystemZ::LXR;
941 else if (SystemZ::VR32BitRegClass.contains(DestReg, SrcReg))
942 Opcode = SystemZ::VLR32;
943 else if (SystemZ::VR64BitRegClass.contains(DestReg, SrcReg))
944 Opcode = SystemZ::VLR64;
945 else if (SystemZ::VR128BitRegClass.contains(DestReg, SrcReg))
946 Opcode = SystemZ::VLR;
947 else if (SystemZ::AR32BitRegClass.contains(DestReg, SrcReg))
948 Opcode = SystemZ::CPYA;
949 else if (SystemZ::AR32BitRegClass.contains(DestReg) &&
950 SystemZ::GR32BitRegClass.contains(SrcReg))
951 Opcode = SystemZ::SAR;
952 else if (SystemZ::GR32BitRegClass.contains(DestReg) &&
953 SystemZ::AR32BitRegClass.contains(SrcReg))
954 Opcode = SystemZ::EAR;
955 else
956 llvm_unreachable("Impossible reg-to-reg copy");
958 BuildMI(MBB, MBBI, DL, get(Opcode), DestReg)
959 .addReg(SrcReg, getKillRegState(KillSrc));
962 void SystemZInstrInfo::storeRegToStackSlot(
963 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned SrcReg,
964 bool isKill, int FrameIdx, const TargetRegisterClass *RC,
965 const TargetRegisterInfo *TRI) const {
966 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
968 // Callers may expect a single instruction, so keep 128-bit moves
969 // together for now and lower them after register allocation.
970 unsigned LoadOpcode, StoreOpcode;
971 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
972 addFrameReference(BuildMI(MBB, MBBI, DL, get(StoreOpcode))
973 .addReg(SrcReg, getKillRegState(isKill)),
974 FrameIdx);
977 void SystemZInstrInfo::loadRegFromStackSlot(
978 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI, unsigned DestReg,
979 int FrameIdx, const TargetRegisterClass *RC,
980 const TargetRegisterInfo *TRI) const {
981 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
983 // Callers may expect a single instruction, so keep 128-bit moves
984 // together for now and lower them after register allocation.
985 unsigned LoadOpcode, StoreOpcode;
986 getLoadStoreOpcodes(RC, LoadOpcode, StoreOpcode);
987 addFrameReference(BuildMI(MBB, MBBI, DL, get(LoadOpcode), DestReg),
988 FrameIdx);
991 // Return true if MI is a simple load or store with a 12-bit displacement
992 // and no index. Flag is SimpleBDXLoad for loads and SimpleBDXStore for stores.
993 static bool isSimpleBD12Move(const MachineInstr *MI, unsigned Flag) {
994 const MCInstrDesc &MCID = MI->getDesc();
995 return ((MCID.TSFlags & Flag) &&
996 isUInt<12>(MI->getOperand(2).getImm()) &&
997 MI->getOperand(3).getReg() == 0);
1000 namespace {
1002 struct LogicOp {
1003 LogicOp() = default;
1004 LogicOp(unsigned regSize, unsigned immLSB, unsigned immSize)
1005 : RegSize(regSize), ImmLSB(immLSB), ImmSize(immSize) {}
1007 explicit operator bool() const { return RegSize; }
1009 unsigned RegSize = 0;
1010 unsigned ImmLSB = 0;
1011 unsigned ImmSize = 0;
1014 } // end anonymous namespace
1016 static LogicOp interpretAndImmediate(unsigned Opcode) {
1017 switch (Opcode) {
1018 case SystemZ::NILMux: return LogicOp(32, 0, 16);
1019 case SystemZ::NIHMux: return LogicOp(32, 16, 16);
1020 case SystemZ::NILL64: return LogicOp(64, 0, 16);
1021 case SystemZ::NILH64: return LogicOp(64, 16, 16);
1022 case SystemZ::NIHL64: return LogicOp(64, 32, 16);
1023 case SystemZ::NIHH64: return LogicOp(64, 48, 16);
1024 case SystemZ::NIFMux: return LogicOp(32, 0, 32);
1025 case SystemZ::NILF64: return LogicOp(64, 0, 32);
1026 case SystemZ::NIHF64: return LogicOp(64, 32, 32);
1027 default: return LogicOp();
1031 static void transferDeadCC(MachineInstr *OldMI, MachineInstr *NewMI) {
1032 if (OldMI->registerDefIsDead(SystemZ::CC)) {
1033 MachineOperand *CCDef = NewMI->findRegisterDefOperand(SystemZ::CC);
1034 if (CCDef != nullptr)
1035 CCDef->setIsDead(true);
1039 MachineInstr *SystemZInstrInfo::convertToThreeAddress(
1040 MachineFunction::iterator &MFI, MachineInstr &MI, LiveVariables *LV) const {
1041 MachineBasicBlock *MBB = MI.getParent();
1043 // Try to convert an AND into an RISBG-type instruction.
1044 // TODO: It might be beneficial to select RISBG and shorten to AND instead.
1045 if (LogicOp And = interpretAndImmediate(MI.getOpcode())) {
1046 uint64_t Imm = MI.getOperand(2).getImm() << And.ImmLSB;
1047 // AND IMMEDIATE leaves the other bits of the register unchanged.
1048 Imm |= allOnes(And.RegSize) & ~(allOnes(And.ImmSize) << And.ImmLSB);
1049 unsigned Start, End;
1050 if (isRxSBGMask(Imm, And.RegSize, Start, End)) {
1051 unsigned NewOpcode;
1052 if (And.RegSize == 64) {
1053 NewOpcode = SystemZ::RISBG;
1054 // Prefer RISBGN if available, since it does not clobber CC.
1055 if (STI.hasMiscellaneousExtensions())
1056 NewOpcode = SystemZ::RISBGN;
1057 } else {
1058 NewOpcode = SystemZ::RISBMux;
1059 Start &= 31;
1060 End &= 31;
1062 MachineOperand &Dest = MI.getOperand(0);
1063 MachineOperand &Src = MI.getOperand(1);
1064 MachineInstrBuilder MIB =
1065 BuildMI(*MBB, MI, MI.getDebugLoc(), get(NewOpcode))
1066 .add(Dest)
1067 .addReg(0)
1068 .addReg(Src.getReg(), getKillRegState(Src.isKill()),
1069 Src.getSubReg())
1070 .addImm(Start)
1071 .addImm(End + 128)
1072 .addImm(0);
1073 if (LV) {
1074 unsigned NumOps = MI.getNumOperands();
1075 for (unsigned I = 1; I < NumOps; ++I) {
1076 MachineOperand &Op = MI.getOperand(I);
1077 if (Op.isReg() && Op.isKill())
1078 LV->replaceKillInstruction(Op.getReg(), MI, *MIB);
1081 transferDeadCC(&MI, MIB);
1082 return MIB;
1085 return nullptr;
1088 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1089 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1090 MachineBasicBlock::iterator InsertPt, int FrameIndex,
1091 LiveIntervals *LIS, VirtRegMap *VRM) const {
1092 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
1093 const MachineFrameInfo &MFI = MF.getFrameInfo();
1094 unsigned Size = MFI.getObjectSize(FrameIndex);
1095 unsigned Opcode = MI.getOpcode();
1097 if (Ops.size() == 2 && Ops[0] == 0 && Ops[1] == 1) {
1098 if (LIS != nullptr && (Opcode == SystemZ::LA || Opcode == SystemZ::LAY) &&
1099 isInt<8>(MI.getOperand(2).getImm()) && !MI.getOperand(3).getReg()) {
1101 // Check CC liveness, since new instruction introduces a dead
1102 // def of CC.
1103 MCRegUnitIterator CCUnit(SystemZ::CC, TRI);
1104 LiveRange &CCLiveRange = LIS->getRegUnit(*CCUnit);
1105 ++CCUnit;
1106 assert(!CCUnit.isValid() && "CC only has one reg unit.");
1107 SlotIndex MISlot =
1108 LIS->getSlotIndexes()->getInstructionIndex(MI).getRegSlot();
1109 if (!CCLiveRange.liveAt(MISlot)) {
1110 // LA(Y) %reg, CONST(%reg) -> AGSI %mem, CONST
1111 MachineInstr *BuiltMI = BuildMI(*InsertPt->getParent(), InsertPt,
1112 MI.getDebugLoc(), get(SystemZ::AGSI))
1113 .addFrameIndex(FrameIndex)
1114 .addImm(0)
1115 .addImm(MI.getOperand(2).getImm());
1116 BuiltMI->findRegisterDefOperand(SystemZ::CC)->setIsDead(true);
1117 CCLiveRange.createDeadDef(MISlot, LIS->getVNInfoAllocator());
1118 return BuiltMI;
1121 return nullptr;
1124 // All other cases require a single operand.
1125 if (Ops.size() != 1)
1126 return nullptr;
1128 unsigned OpNum = Ops[0];
1129 assert(Size * 8 ==
1130 TRI->getRegSizeInBits(*MF.getRegInfo()
1131 .getRegClass(MI.getOperand(OpNum).getReg())) &&
1132 "Invalid size combination");
1134 if ((Opcode == SystemZ::AHI || Opcode == SystemZ::AGHI) && OpNum == 0 &&
1135 isInt<8>(MI.getOperand(2).getImm())) {
1136 // A(G)HI %reg, CONST -> A(G)SI %mem, CONST
1137 Opcode = (Opcode == SystemZ::AHI ? SystemZ::ASI : SystemZ::AGSI);
1138 MachineInstr *BuiltMI =
1139 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1140 .addFrameIndex(FrameIndex)
1141 .addImm(0)
1142 .addImm(MI.getOperand(2).getImm());
1143 transferDeadCC(&MI, BuiltMI);
1144 return BuiltMI;
1147 if ((Opcode == SystemZ::ALFI && OpNum == 0 &&
1148 isInt<8>((int32_t)MI.getOperand(2).getImm())) ||
1149 (Opcode == SystemZ::ALGFI && OpNum == 0 &&
1150 isInt<8>((int64_t)MI.getOperand(2).getImm()))) {
1151 // AL(G)FI %reg, CONST -> AL(G)SI %mem, CONST
1152 Opcode = (Opcode == SystemZ::ALFI ? SystemZ::ALSI : SystemZ::ALGSI);
1153 MachineInstr *BuiltMI =
1154 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1155 .addFrameIndex(FrameIndex)
1156 .addImm(0)
1157 .addImm((int8_t)MI.getOperand(2).getImm());
1158 transferDeadCC(&MI, BuiltMI);
1159 return BuiltMI;
1162 if ((Opcode == SystemZ::SLFI && OpNum == 0 &&
1163 isInt<8>((int32_t)-MI.getOperand(2).getImm())) ||
1164 (Opcode == SystemZ::SLGFI && OpNum == 0 &&
1165 isInt<8>((int64_t)-MI.getOperand(2).getImm()))) {
1166 // SL(G)FI %reg, CONST -> AL(G)SI %mem, -CONST
1167 Opcode = (Opcode == SystemZ::SLFI ? SystemZ::ALSI : SystemZ::ALGSI);
1168 MachineInstr *BuiltMI =
1169 BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(), get(Opcode))
1170 .addFrameIndex(FrameIndex)
1171 .addImm(0)
1172 .addImm((int8_t)-MI.getOperand(2).getImm());
1173 transferDeadCC(&MI, BuiltMI);
1174 return BuiltMI;
1177 if (Opcode == SystemZ::LGDR || Opcode == SystemZ::LDGR) {
1178 bool Op0IsGPR = (Opcode == SystemZ::LGDR);
1179 bool Op1IsGPR = (Opcode == SystemZ::LDGR);
1180 // If we're spilling the destination of an LDGR or LGDR, store the
1181 // source register instead.
1182 if (OpNum == 0) {
1183 unsigned StoreOpcode = Op1IsGPR ? SystemZ::STG : SystemZ::STD;
1184 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1185 get(StoreOpcode))
1186 .add(MI.getOperand(1))
1187 .addFrameIndex(FrameIndex)
1188 .addImm(0)
1189 .addReg(0);
1191 // If we're spilling the source of an LDGR or LGDR, load the
1192 // destination register instead.
1193 if (OpNum == 1) {
1194 unsigned LoadOpcode = Op0IsGPR ? SystemZ::LG : SystemZ::LD;
1195 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1196 get(LoadOpcode))
1197 .add(MI.getOperand(0))
1198 .addFrameIndex(FrameIndex)
1199 .addImm(0)
1200 .addReg(0);
1204 // Look for cases where the source of a simple store or the destination
1205 // of a simple load is being spilled. Try to use MVC instead.
1207 // Although MVC is in practice a fast choice in these cases, it is still
1208 // logically a bytewise copy. This means that we cannot use it if the
1209 // load or store is volatile. We also wouldn't be able to use MVC if
1210 // the two memories partially overlap, but that case cannot occur here,
1211 // because we know that one of the memories is a full frame index.
1213 // For performance reasons, we also want to avoid using MVC if the addresses
1214 // might be equal. We don't worry about that case here, because spill slot
1215 // coloring happens later, and because we have special code to remove
1216 // MVCs that turn out to be redundant.
1217 if (OpNum == 0 && MI.hasOneMemOperand()) {
1218 MachineMemOperand *MMO = *MI.memoperands_begin();
1219 if (MMO->getSize() == Size && !MMO->isVolatile() && !MMO->isAtomic()) {
1220 // Handle conversion of loads.
1221 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXLoad)) {
1222 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1223 get(SystemZ::MVC))
1224 .addFrameIndex(FrameIndex)
1225 .addImm(0)
1226 .addImm(Size)
1227 .add(MI.getOperand(1))
1228 .addImm(MI.getOperand(2).getImm())
1229 .addMemOperand(MMO);
1231 // Handle conversion of stores.
1232 if (isSimpleBD12Move(&MI, SystemZII::SimpleBDXStore)) {
1233 return BuildMI(*InsertPt->getParent(), InsertPt, MI.getDebugLoc(),
1234 get(SystemZ::MVC))
1235 .add(MI.getOperand(1))
1236 .addImm(MI.getOperand(2).getImm())
1237 .addImm(Size)
1238 .addFrameIndex(FrameIndex)
1239 .addImm(0)
1240 .addMemOperand(MMO);
1245 // If the spilled operand is the final one or the instruction is
1246 // commutable, try to change <INSN>R into <INSN>.
1247 unsigned NumOps = MI.getNumExplicitOperands();
1248 int MemOpcode = SystemZ::getMemOpcode(Opcode);
1250 // See if this is a 3-address instruction that is convertible to 2-address
1251 // and suitable for folding below. Only try this with virtual registers
1252 // and a provided VRM (during regalloc).
1253 bool NeedsCommute = false;
1254 if (SystemZ::getTwoOperandOpcode(Opcode) != -1 && MemOpcode != -1) {
1255 if (VRM == nullptr)
1256 MemOpcode = -1;
1257 else {
1258 assert(NumOps == 3 && "Expected two source registers.");
1259 Register DstReg = MI.getOperand(0).getReg();
1260 Register DstPhys =
1261 (Register::isVirtualRegister(DstReg) ? VRM->getPhys(DstReg) : DstReg);
1262 Register SrcReg = (OpNum == 2 ? MI.getOperand(1).getReg()
1263 : ((OpNum == 1 && MI.isCommutable())
1264 ? MI.getOperand(2).getReg()
1265 : Register()));
1266 if (DstPhys && !SystemZ::GRH32BitRegClass.contains(DstPhys) && SrcReg &&
1267 Register::isVirtualRegister(SrcReg) &&
1268 DstPhys == VRM->getPhys(SrcReg))
1269 NeedsCommute = (OpNum == 1);
1270 else
1271 MemOpcode = -1;
1275 if (MemOpcode >= 0) {
1276 if ((OpNum == NumOps - 1) || NeedsCommute) {
1277 const MCInstrDesc &MemDesc = get(MemOpcode);
1278 uint64_t AccessBytes = SystemZII::getAccessSize(MemDesc.TSFlags);
1279 assert(AccessBytes != 0 && "Size of access should be known");
1280 assert(AccessBytes <= Size && "Access outside the frame index");
1281 uint64_t Offset = Size - AccessBytes;
1282 MachineInstrBuilder MIB = BuildMI(*InsertPt->getParent(), InsertPt,
1283 MI.getDebugLoc(), get(MemOpcode));
1284 MIB.add(MI.getOperand(0));
1285 if (NeedsCommute)
1286 MIB.add(MI.getOperand(2));
1287 else
1288 for (unsigned I = 1; I < OpNum; ++I)
1289 MIB.add(MI.getOperand(I));
1290 MIB.addFrameIndex(FrameIndex).addImm(Offset);
1291 if (MemDesc.TSFlags & SystemZII::HasIndex)
1292 MIB.addReg(0);
1293 transferDeadCC(&MI, MIB);
1294 return MIB;
1298 return nullptr;
1301 MachineInstr *SystemZInstrInfo::foldMemoryOperandImpl(
1302 MachineFunction &MF, MachineInstr &MI, ArrayRef<unsigned> Ops,
1303 MachineBasicBlock::iterator InsertPt, MachineInstr &LoadMI,
1304 LiveIntervals *LIS) const {
1305 return nullptr;
1308 bool SystemZInstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1309 switch (MI.getOpcode()) {
1310 case SystemZ::L128:
1311 splitMove(MI, SystemZ::LG);
1312 return true;
1314 case SystemZ::ST128:
1315 splitMove(MI, SystemZ::STG);
1316 return true;
1318 case SystemZ::LX:
1319 splitMove(MI, SystemZ::LD);
1320 return true;
1322 case SystemZ::STX:
1323 splitMove(MI, SystemZ::STD);
1324 return true;
1326 case SystemZ::LBMux:
1327 expandRXYPseudo(MI, SystemZ::LB, SystemZ::LBH);
1328 return true;
1330 case SystemZ::LHMux:
1331 expandRXYPseudo(MI, SystemZ::LH, SystemZ::LHH);
1332 return true;
1334 case SystemZ::LLCRMux:
1335 expandZExtPseudo(MI, SystemZ::LLCR, 8);
1336 return true;
1338 case SystemZ::LLHRMux:
1339 expandZExtPseudo(MI, SystemZ::LLHR, 16);
1340 return true;
1342 case SystemZ::LLCMux:
1343 expandRXYPseudo(MI, SystemZ::LLC, SystemZ::LLCH);
1344 return true;
1346 case SystemZ::LLHMux:
1347 expandRXYPseudo(MI, SystemZ::LLH, SystemZ::LLHH);
1348 return true;
1350 case SystemZ::LMux:
1351 expandRXYPseudo(MI, SystemZ::L, SystemZ::LFH);
1352 return true;
1354 case SystemZ::LOCMux:
1355 expandLOCPseudo(MI, SystemZ::LOC, SystemZ::LOCFH);
1356 return true;
1358 case SystemZ::LOCHIMux:
1359 expandLOCPseudo(MI, SystemZ::LOCHI, SystemZ::LOCHHI);
1360 return true;
1362 case SystemZ::LOCRMux:
1363 expandLOCRPseudo(MI, SystemZ::LOCR, SystemZ::LOCFHR);
1364 return true;
1366 case SystemZ::SELRMux:
1367 expandSELRPseudo(MI, SystemZ::SELR, SystemZ::SELFHR,
1368 SystemZ::LOCRMux);
1369 return true;
1371 case SystemZ::STCMux:
1372 expandRXYPseudo(MI, SystemZ::STC, SystemZ::STCH);
1373 return true;
1375 case SystemZ::STHMux:
1376 expandRXYPseudo(MI, SystemZ::STH, SystemZ::STHH);
1377 return true;
1379 case SystemZ::STMux:
1380 expandRXYPseudo(MI, SystemZ::ST, SystemZ::STFH);
1381 return true;
1383 case SystemZ::STOCMux:
1384 expandLOCPseudo(MI, SystemZ::STOC, SystemZ::STOCFH);
1385 return true;
1387 case SystemZ::LHIMux:
1388 expandRIPseudo(MI, SystemZ::LHI, SystemZ::IIHF, true);
1389 return true;
1391 case SystemZ::IIFMux:
1392 expandRIPseudo(MI, SystemZ::IILF, SystemZ::IIHF, false);
1393 return true;
1395 case SystemZ::IILMux:
1396 expandRIPseudo(MI, SystemZ::IILL, SystemZ::IIHL, false);
1397 return true;
1399 case SystemZ::IIHMux:
1400 expandRIPseudo(MI, SystemZ::IILH, SystemZ::IIHH, false);
1401 return true;
1403 case SystemZ::NIFMux:
1404 expandRIPseudo(MI, SystemZ::NILF, SystemZ::NIHF, false);
1405 return true;
1407 case SystemZ::NILMux:
1408 expandRIPseudo(MI, SystemZ::NILL, SystemZ::NIHL, false);
1409 return true;
1411 case SystemZ::NIHMux:
1412 expandRIPseudo(MI, SystemZ::NILH, SystemZ::NIHH, false);
1413 return true;
1415 case SystemZ::OIFMux:
1416 expandRIPseudo(MI, SystemZ::OILF, SystemZ::OIHF, false);
1417 return true;
1419 case SystemZ::OILMux:
1420 expandRIPseudo(MI, SystemZ::OILL, SystemZ::OIHL, false);
1421 return true;
1423 case SystemZ::OIHMux:
1424 expandRIPseudo(MI, SystemZ::OILH, SystemZ::OIHH, false);
1425 return true;
1427 case SystemZ::XIFMux:
1428 expandRIPseudo(MI, SystemZ::XILF, SystemZ::XIHF, false);
1429 return true;
1431 case SystemZ::TMLMux:
1432 expandRIPseudo(MI, SystemZ::TMLL, SystemZ::TMHL, false);
1433 return true;
1435 case SystemZ::TMHMux:
1436 expandRIPseudo(MI, SystemZ::TMLH, SystemZ::TMHH, false);
1437 return true;
1439 case SystemZ::AHIMux:
1440 expandRIPseudo(MI, SystemZ::AHI, SystemZ::AIH, false);
1441 return true;
1443 case SystemZ::AHIMuxK:
1444 expandRIEPseudo(MI, SystemZ::AHI, SystemZ::AHIK, SystemZ::AIH);
1445 return true;
1447 case SystemZ::AFIMux:
1448 expandRIPseudo(MI, SystemZ::AFI, SystemZ::AIH, false);
1449 return true;
1451 case SystemZ::CHIMux:
1452 expandRIPseudo(MI, SystemZ::CHI, SystemZ::CIH, false);
1453 return true;
1455 case SystemZ::CFIMux:
1456 expandRIPseudo(MI, SystemZ::CFI, SystemZ::CIH, false);
1457 return true;
1459 case SystemZ::CLFIMux:
1460 expandRIPseudo(MI, SystemZ::CLFI, SystemZ::CLIH, false);
1461 return true;
1463 case SystemZ::CMux:
1464 expandRXYPseudo(MI, SystemZ::C, SystemZ::CHF);
1465 return true;
1467 case SystemZ::CLMux:
1468 expandRXYPseudo(MI, SystemZ::CL, SystemZ::CLHF);
1469 return true;
1471 case SystemZ::RISBMux: {
1472 bool DestIsHigh = isHighReg(MI.getOperand(0).getReg());
1473 bool SrcIsHigh = isHighReg(MI.getOperand(2).getReg());
1474 if (SrcIsHigh == DestIsHigh)
1475 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHH : SystemZ::RISBLL));
1476 else {
1477 MI.setDesc(get(DestIsHigh ? SystemZ::RISBHL : SystemZ::RISBLH));
1478 MI.getOperand(5).setImm(MI.getOperand(5).getImm() ^ 32);
1480 return true;
1483 case SystemZ::ADJDYNALLOC:
1484 splitAdjDynAlloc(MI);
1485 return true;
1487 case TargetOpcode::LOAD_STACK_GUARD:
1488 expandLoadStackGuard(&MI);
1489 return true;
1491 default:
1492 return false;
1496 unsigned SystemZInstrInfo::getInstSizeInBytes(const MachineInstr &MI) const {
1497 if (MI.isInlineAsm()) {
1498 const MachineFunction *MF = MI.getParent()->getParent();
1499 const char *AsmStr = MI.getOperand(0).getSymbolName();
1500 return getInlineAsmLength(AsmStr, *MF->getTarget().getMCAsmInfo());
1502 return MI.getDesc().getSize();
1505 SystemZII::Branch
1506 SystemZInstrInfo::getBranchInfo(const MachineInstr &MI) const {
1507 switch (MI.getOpcode()) {
1508 case SystemZ::BR:
1509 case SystemZ::BI:
1510 case SystemZ::J:
1511 case SystemZ::JG:
1512 return SystemZII::Branch(SystemZII::BranchNormal, SystemZ::CCMASK_ANY,
1513 SystemZ::CCMASK_ANY, &MI.getOperand(0));
1515 case SystemZ::BRC:
1516 case SystemZ::BRCL:
1517 return SystemZII::Branch(SystemZII::BranchNormal, MI.getOperand(0).getImm(),
1518 MI.getOperand(1).getImm(), &MI.getOperand(2));
1520 case SystemZ::BRCT:
1521 case SystemZ::BRCTH:
1522 return SystemZII::Branch(SystemZII::BranchCT, SystemZ::CCMASK_ICMP,
1523 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1525 case SystemZ::BRCTG:
1526 return SystemZII::Branch(SystemZII::BranchCTG, SystemZ::CCMASK_ICMP,
1527 SystemZ::CCMASK_CMP_NE, &MI.getOperand(2));
1529 case SystemZ::CIJ:
1530 case SystemZ::CRJ:
1531 return SystemZII::Branch(SystemZII::BranchC, SystemZ::CCMASK_ICMP,
1532 MI.getOperand(2).getImm(), &MI.getOperand(3));
1534 case SystemZ::CLIJ:
1535 case SystemZ::CLRJ:
1536 return SystemZII::Branch(SystemZII::BranchCL, SystemZ::CCMASK_ICMP,
1537 MI.getOperand(2).getImm(), &MI.getOperand(3));
1539 case SystemZ::CGIJ:
1540 case SystemZ::CGRJ:
1541 return SystemZII::Branch(SystemZII::BranchCG, SystemZ::CCMASK_ICMP,
1542 MI.getOperand(2).getImm(), &MI.getOperand(3));
1544 case SystemZ::CLGIJ:
1545 case SystemZ::CLGRJ:
1546 return SystemZII::Branch(SystemZII::BranchCLG, SystemZ::CCMASK_ICMP,
1547 MI.getOperand(2).getImm(), &MI.getOperand(3));
1549 default:
1550 llvm_unreachable("Unrecognized branch opcode");
1554 void SystemZInstrInfo::getLoadStoreOpcodes(const TargetRegisterClass *RC,
1555 unsigned &LoadOpcode,
1556 unsigned &StoreOpcode) const {
1557 if (RC == &SystemZ::GR32BitRegClass || RC == &SystemZ::ADDR32BitRegClass) {
1558 LoadOpcode = SystemZ::L;
1559 StoreOpcode = SystemZ::ST;
1560 } else if (RC == &SystemZ::GRH32BitRegClass) {
1561 LoadOpcode = SystemZ::LFH;
1562 StoreOpcode = SystemZ::STFH;
1563 } else if (RC == &SystemZ::GRX32BitRegClass) {
1564 LoadOpcode = SystemZ::LMux;
1565 StoreOpcode = SystemZ::STMux;
1566 } else if (RC == &SystemZ::GR64BitRegClass ||
1567 RC == &SystemZ::ADDR64BitRegClass) {
1568 LoadOpcode = SystemZ::LG;
1569 StoreOpcode = SystemZ::STG;
1570 } else if (RC == &SystemZ::GR128BitRegClass ||
1571 RC == &SystemZ::ADDR128BitRegClass) {
1572 LoadOpcode = SystemZ::L128;
1573 StoreOpcode = SystemZ::ST128;
1574 } else if (RC == &SystemZ::FP32BitRegClass) {
1575 LoadOpcode = SystemZ::LE;
1576 StoreOpcode = SystemZ::STE;
1577 } else if (RC == &SystemZ::FP64BitRegClass) {
1578 LoadOpcode = SystemZ::LD;
1579 StoreOpcode = SystemZ::STD;
1580 } else if (RC == &SystemZ::FP128BitRegClass) {
1581 LoadOpcode = SystemZ::LX;
1582 StoreOpcode = SystemZ::STX;
1583 } else if (RC == &SystemZ::VR32BitRegClass) {
1584 LoadOpcode = SystemZ::VL32;
1585 StoreOpcode = SystemZ::VST32;
1586 } else if (RC == &SystemZ::VR64BitRegClass) {
1587 LoadOpcode = SystemZ::VL64;
1588 StoreOpcode = SystemZ::VST64;
1589 } else if (RC == &SystemZ::VF128BitRegClass ||
1590 RC == &SystemZ::VR128BitRegClass) {
1591 LoadOpcode = SystemZ::VL;
1592 StoreOpcode = SystemZ::VST;
1593 } else
1594 llvm_unreachable("Unsupported regclass to load or store");
1597 unsigned SystemZInstrInfo::getOpcodeForOffset(unsigned Opcode,
1598 int64_t Offset) const {
1599 const MCInstrDesc &MCID = get(Opcode);
1600 int64_t Offset2 = (MCID.TSFlags & SystemZII::Is128Bit ? Offset + 8 : Offset);
1601 if (isUInt<12>(Offset) && isUInt<12>(Offset2)) {
1602 // Get the instruction to use for unsigned 12-bit displacements.
1603 int Disp12Opcode = SystemZ::getDisp12Opcode(Opcode);
1604 if (Disp12Opcode >= 0)
1605 return Disp12Opcode;
1607 // All address-related instructions can use unsigned 12-bit
1608 // displacements.
1609 return Opcode;
1611 if (isInt<20>(Offset) && isInt<20>(Offset2)) {
1612 // Get the instruction to use for signed 20-bit displacements.
1613 int Disp20Opcode = SystemZ::getDisp20Opcode(Opcode);
1614 if (Disp20Opcode >= 0)
1615 return Disp20Opcode;
1617 // Check whether Opcode allows signed 20-bit displacements.
1618 if (MCID.TSFlags & SystemZII::Has20BitOffset)
1619 return Opcode;
1621 return 0;
1624 unsigned SystemZInstrInfo::getLoadAndTest(unsigned Opcode) const {
1625 switch (Opcode) {
1626 case SystemZ::L: return SystemZ::LT;
1627 case SystemZ::LY: return SystemZ::LT;
1628 case SystemZ::LG: return SystemZ::LTG;
1629 case SystemZ::LGF: return SystemZ::LTGF;
1630 case SystemZ::LR: return SystemZ::LTR;
1631 case SystemZ::LGFR: return SystemZ::LTGFR;
1632 case SystemZ::LGR: return SystemZ::LTGR;
1633 case SystemZ::LER: return SystemZ::LTEBR;
1634 case SystemZ::LDR: return SystemZ::LTDBR;
1635 case SystemZ::LXR: return SystemZ::LTXBR;
1636 case SystemZ::LCDFR: return SystemZ::LCDBR;
1637 case SystemZ::LPDFR: return SystemZ::LPDBR;
1638 case SystemZ::LNDFR: return SystemZ::LNDBR;
1639 case SystemZ::LCDFR_32: return SystemZ::LCEBR;
1640 case SystemZ::LPDFR_32: return SystemZ::LPEBR;
1641 case SystemZ::LNDFR_32: return SystemZ::LNEBR;
1642 // On zEC12 we prefer to use RISBGN. But if there is a chance to
1643 // actually use the condition code, we may turn it back into RISGB.
1644 // Note that RISBG is not really a "load-and-test" instruction,
1645 // but sets the same condition code values, so is OK to use here.
1646 case SystemZ::RISBGN: return SystemZ::RISBG;
1647 default: return 0;
1651 // Return true if Mask matches the regexp 0*1+0*, given that zero masks
1652 // have already been filtered out. Store the first set bit in LSB and
1653 // the number of set bits in Length if so.
1654 static bool isStringOfOnes(uint64_t Mask, unsigned &LSB, unsigned &Length) {
1655 unsigned First = findFirstSet(Mask);
1656 uint64_t Top = (Mask >> First) + 1;
1657 if ((Top & -Top) == Top) {
1658 LSB = First;
1659 Length = findFirstSet(Top);
1660 return true;
1662 return false;
1665 bool SystemZInstrInfo::isRxSBGMask(uint64_t Mask, unsigned BitSize,
1666 unsigned &Start, unsigned &End) const {
1667 // Reject trivial all-zero masks.
1668 Mask &= allOnes(BitSize);
1669 if (Mask == 0)
1670 return false;
1672 // Handle the 1+0+ or 0+1+0* cases. Start then specifies the index of
1673 // the msb and End specifies the index of the lsb.
1674 unsigned LSB, Length;
1675 if (isStringOfOnes(Mask, LSB, Length)) {
1676 Start = 63 - (LSB + Length - 1);
1677 End = 63 - LSB;
1678 return true;
1681 // Handle the wrap-around 1+0+1+ cases. Start then specifies the msb
1682 // of the low 1s and End specifies the lsb of the high 1s.
1683 if (isStringOfOnes(Mask ^ allOnes(BitSize), LSB, Length)) {
1684 assert(LSB > 0 && "Bottom bit must be set");
1685 assert(LSB + Length < BitSize && "Top bit must be set");
1686 Start = 63 - (LSB - 1);
1687 End = 63 - (LSB + Length);
1688 return true;
1691 return false;
1694 unsigned SystemZInstrInfo::getFusedCompare(unsigned Opcode,
1695 SystemZII::FusedCompareType Type,
1696 const MachineInstr *MI) const {
1697 switch (Opcode) {
1698 case SystemZ::CHI:
1699 case SystemZ::CGHI:
1700 if (!(MI && isInt<8>(MI->getOperand(1).getImm())))
1701 return 0;
1702 break;
1703 case SystemZ::CLFI:
1704 case SystemZ::CLGFI:
1705 if (!(MI && isUInt<8>(MI->getOperand(1).getImm())))
1706 return 0;
1707 break;
1708 case SystemZ::CL:
1709 case SystemZ::CLG:
1710 if (!STI.hasMiscellaneousExtensions())
1711 return 0;
1712 if (!(MI && MI->getOperand(3).getReg() == 0))
1713 return 0;
1714 break;
1716 switch (Type) {
1717 case SystemZII::CompareAndBranch:
1718 switch (Opcode) {
1719 case SystemZ::CR:
1720 return SystemZ::CRJ;
1721 case SystemZ::CGR:
1722 return SystemZ::CGRJ;
1723 case SystemZ::CHI:
1724 return SystemZ::CIJ;
1725 case SystemZ::CGHI:
1726 return SystemZ::CGIJ;
1727 case SystemZ::CLR:
1728 return SystemZ::CLRJ;
1729 case SystemZ::CLGR:
1730 return SystemZ::CLGRJ;
1731 case SystemZ::CLFI:
1732 return SystemZ::CLIJ;
1733 case SystemZ::CLGFI:
1734 return SystemZ::CLGIJ;
1735 default:
1736 return 0;
1738 case SystemZII::CompareAndReturn:
1739 switch (Opcode) {
1740 case SystemZ::CR:
1741 return SystemZ::CRBReturn;
1742 case SystemZ::CGR:
1743 return SystemZ::CGRBReturn;
1744 case SystemZ::CHI:
1745 return SystemZ::CIBReturn;
1746 case SystemZ::CGHI:
1747 return SystemZ::CGIBReturn;
1748 case SystemZ::CLR:
1749 return SystemZ::CLRBReturn;
1750 case SystemZ::CLGR:
1751 return SystemZ::CLGRBReturn;
1752 case SystemZ::CLFI:
1753 return SystemZ::CLIBReturn;
1754 case SystemZ::CLGFI:
1755 return SystemZ::CLGIBReturn;
1756 default:
1757 return 0;
1759 case SystemZII::CompareAndSibcall:
1760 switch (Opcode) {
1761 case SystemZ::CR:
1762 return SystemZ::CRBCall;
1763 case SystemZ::CGR:
1764 return SystemZ::CGRBCall;
1765 case SystemZ::CHI:
1766 return SystemZ::CIBCall;
1767 case SystemZ::CGHI:
1768 return SystemZ::CGIBCall;
1769 case SystemZ::CLR:
1770 return SystemZ::CLRBCall;
1771 case SystemZ::CLGR:
1772 return SystemZ::CLGRBCall;
1773 case SystemZ::CLFI:
1774 return SystemZ::CLIBCall;
1775 case SystemZ::CLGFI:
1776 return SystemZ::CLGIBCall;
1777 default:
1778 return 0;
1780 case SystemZII::CompareAndTrap:
1781 switch (Opcode) {
1782 case SystemZ::CR:
1783 return SystemZ::CRT;
1784 case SystemZ::CGR:
1785 return SystemZ::CGRT;
1786 case SystemZ::CHI:
1787 return SystemZ::CIT;
1788 case SystemZ::CGHI:
1789 return SystemZ::CGIT;
1790 case SystemZ::CLR:
1791 return SystemZ::CLRT;
1792 case SystemZ::CLGR:
1793 return SystemZ::CLGRT;
1794 case SystemZ::CLFI:
1795 return SystemZ::CLFIT;
1796 case SystemZ::CLGFI:
1797 return SystemZ::CLGIT;
1798 case SystemZ::CL:
1799 return SystemZ::CLT;
1800 case SystemZ::CLG:
1801 return SystemZ::CLGT;
1802 default:
1803 return 0;
1806 return 0;
1809 unsigned SystemZInstrInfo::getLoadAndTrap(unsigned Opcode) const {
1810 if (!STI.hasLoadAndTrap())
1811 return 0;
1812 switch (Opcode) {
1813 case SystemZ::L:
1814 case SystemZ::LY:
1815 return SystemZ::LAT;
1816 case SystemZ::LG:
1817 return SystemZ::LGAT;
1818 case SystemZ::LFH:
1819 return SystemZ::LFHAT;
1820 case SystemZ::LLGF:
1821 return SystemZ::LLGFAT;
1822 case SystemZ::LLGT:
1823 return SystemZ::LLGTAT;
1825 return 0;
1828 void SystemZInstrInfo::loadImmediate(MachineBasicBlock &MBB,
1829 MachineBasicBlock::iterator MBBI,
1830 unsigned Reg, uint64_t Value) const {
1831 DebugLoc DL = MBBI != MBB.end() ? MBBI->getDebugLoc() : DebugLoc();
1832 unsigned Opcode;
1833 if (isInt<16>(Value))
1834 Opcode = SystemZ::LGHI;
1835 else if (SystemZ::isImmLL(Value))
1836 Opcode = SystemZ::LLILL;
1837 else if (SystemZ::isImmLH(Value)) {
1838 Opcode = SystemZ::LLILH;
1839 Value >>= 16;
1840 } else {
1841 assert(isInt<32>(Value) && "Huge values not handled yet");
1842 Opcode = SystemZ::LGFI;
1844 BuildMI(MBB, MBBI, DL, get(Opcode), Reg).addImm(Value);
1847 bool SystemZInstrInfo::
1848 areMemAccessesTriviallyDisjoint(const MachineInstr &MIa,
1849 const MachineInstr &MIb,
1850 AliasAnalysis *AA) const {
1852 if (!MIa.hasOneMemOperand() || !MIb.hasOneMemOperand())
1853 return false;
1855 // If mem-operands show that the same address Value is used by both
1856 // instructions, check for non-overlapping offsets and widths. Not
1857 // sure if a register based analysis would be an improvement...
1859 MachineMemOperand *MMOa = *MIa.memoperands_begin();
1860 MachineMemOperand *MMOb = *MIb.memoperands_begin();
1861 const Value *VALa = MMOa->getValue();
1862 const Value *VALb = MMOb->getValue();
1863 bool SameVal = (VALa && VALb && (VALa == VALb));
1864 if (!SameVal) {
1865 const PseudoSourceValue *PSVa = MMOa->getPseudoValue();
1866 const PseudoSourceValue *PSVb = MMOb->getPseudoValue();
1867 if (PSVa && PSVb && (PSVa == PSVb))
1868 SameVal = true;
1870 if (SameVal) {
1871 int OffsetA = MMOa->getOffset(), OffsetB = MMOb->getOffset();
1872 int WidthA = MMOa->getSize(), WidthB = MMOb->getSize();
1873 int LowOffset = OffsetA < OffsetB ? OffsetA : OffsetB;
1874 int HighOffset = OffsetA < OffsetB ? OffsetB : OffsetA;
1875 int LowWidth = (LowOffset == OffsetA) ? WidthA : WidthB;
1876 if (LowOffset + LowWidth <= HighOffset)
1877 return true;
1880 return false;