[InstCombine] Signed saturation patterns
[llvm-complete.git] / lib / Target / SystemZ / SystemZRegisterInfo.cpp
blob39ace5594b7fe9bb910fb99c1f91e2e91b97fa7b
1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "SystemZRegisterInfo.h"
10 #include "SystemZInstrInfo.h"
11 #include "SystemZSubtarget.h"
12 #include "llvm/CodeGen/LiveIntervals.h"
13 #include "llvm/ADT/SmallSet.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/VirtRegMap.h"
19 using namespace llvm;
21 #define GET_REGINFO_TARGET_DESC
22 #include "SystemZGenRegisterInfo.inc"
24 SystemZRegisterInfo::SystemZRegisterInfo()
25 : SystemZGenRegisterInfo(SystemZ::R14D) {}
27 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
28 // somehow belongs in it. Otherwise, return GRX32.
29 static const TargetRegisterClass *getRC32(MachineOperand &MO,
30 const VirtRegMap *VRM,
31 const MachineRegisterInfo *MRI) {
32 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
34 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
35 MO.getSubReg() == SystemZ::subreg_l32 ||
36 MO.getSubReg() == SystemZ::subreg_hl32)
37 return &SystemZ::GR32BitRegClass;
38 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
39 MO.getSubReg() == SystemZ::subreg_h32 ||
40 MO.getSubReg() == SystemZ::subreg_hh32)
41 return &SystemZ::GRH32BitRegClass;
43 if (VRM && VRM->hasPhys(MO.getReg())) {
44 Register PhysReg = VRM->getPhys(MO.getReg());
45 if (SystemZ::GR32BitRegClass.contains(PhysReg))
46 return &SystemZ::GR32BitRegClass;
47 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
48 "Phys reg not in GR32 or GRH32?");
49 return &SystemZ::GRH32BitRegClass;
52 assert (RC == &SystemZ::GRX32BitRegClass);
53 return RC;
56 // Pass the registers of RC as hints while making sure that if any of these
57 // registers are copy hints (and therefore already in Hints), hint them
58 // first.
59 static void addHints(ArrayRef<MCPhysReg> Order,
60 SmallVectorImpl<MCPhysReg> &Hints,
61 const TargetRegisterClass *RC,
62 const MachineRegisterInfo *MRI) {
63 SmallSet<unsigned, 4> CopyHints;
64 CopyHints.insert(Hints.begin(), Hints.end());
65 Hints.clear();
66 for (MCPhysReg Reg : Order)
67 if (CopyHints.count(Reg) &&
68 RC->contains(Reg) && !MRI->isReserved(Reg))
69 Hints.push_back(Reg);
70 for (MCPhysReg Reg : Order)
71 if (!CopyHints.count(Reg) &&
72 RC->contains(Reg) && !MRI->isReserved(Reg))
73 Hints.push_back(Reg);
76 bool
77 SystemZRegisterInfo::getRegAllocationHints(unsigned VirtReg,
78 ArrayRef<MCPhysReg> Order,
79 SmallVectorImpl<MCPhysReg> &Hints,
80 const MachineFunction &MF,
81 const VirtRegMap *VRM,
82 const LiveRegMatrix *Matrix) const {
83 const MachineRegisterInfo *MRI = &MF.getRegInfo();
84 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
85 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
87 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
88 VirtReg, Order, Hints, MF, VRM, Matrix);
90 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
91 SmallVector<unsigned, 8> Worklist;
92 SmallSet<unsigned, 4> DoneRegs;
93 Worklist.push_back(VirtReg);
94 while (Worklist.size()) {
95 unsigned Reg = Worklist.pop_back_val();
96 if (!DoneRegs.insert(Reg).second)
97 continue;
99 for (auto &Use : MRI->reg_instructions(Reg)) {
100 // For LOCRMux, see if the other operand is already a high or low
101 // register, and in that case give the corresponding hints for
102 // VirtReg. LOCR instructions need both operands in either high or
103 // low parts. Same handling for SELRMux.
104 if (Use.getOpcode() == SystemZ::LOCRMux ||
105 Use.getOpcode() == SystemZ::SELRMux) {
106 MachineOperand &TrueMO = Use.getOperand(1);
107 MachineOperand &FalseMO = Use.getOperand(2);
108 const TargetRegisterClass *RC =
109 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
110 getRC32(TrueMO, VRM, MRI));
111 if (Use.getOpcode() == SystemZ::SELRMux)
112 RC = TRI->getCommonSubClass(RC,
113 getRC32(Use.getOperand(0), VRM, MRI));
114 if (RC && RC != &SystemZ::GRX32BitRegClass) {
115 addHints(Order, Hints, RC, MRI);
116 // Return true to make these hints the only regs available to
117 // RA. This may mean extra spilling but since the alternative is
118 // a jump sequence expansion of the LOCRMux, it is preferred.
119 return true;
122 // Add the other operand of the LOCRMux to the worklist.
123 Register OtherReg =
124 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
125 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
126 Worklist.push_back(OtherReg);
127 } // end LOCRMux
128 else if (Use.getOpcode() == SystemZ::CHIMux ||
129 Use.getOpcode() == SystemZ::CFIMux) {
130 if (Use.getOperand(1).getImm() == 0) {
131 bool OnlyLMuxes = true;
132 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
133 if (DefMI.getOpcode() != SystemZ::LMux)
134 OnlyLMuxes = false;
135 if (OnlyLMuxes) {
136 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
137 // Return false to make these hints preferred but not obligatory.
138 return false;
141 } // end CHIMux / CFIMux
146 if (VRM == nullptr)
147 return BaseImplRetVal;
149 // Add any two address hints after any copy hints.
150 SmallSet<unsigned, 4> TwoAddrHints;
151 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
152 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
153 const MachineOperand *VRRegMO = nullptr;
154 const MachineOperand *OtherMO = nullptr;
155 const MachineOperand *CommuMO = nullptr;
156 if (VirtReg == Use.getOperand(0).getReg()) {
157 VRRegMO = &Use.getOperand(0);
158 OtherMO = &Use.getOperand(1);
159 if (Use.isCommutable())
160 CommuMO = &Use.getOperand(2);
161 } else if (VirtReg == Use.getOperand(1).getReg()) {
162 VRRegMO = &Use.getOperand(1);
163 OtherMO = &Use.getOperand(0);
164 } else if (VirtReg == Use.getOperand(2).getReg() && Use.isCommutable()) {
165 VRRegMO = &Use.getOperand(2);
166 OtherMO = &Use.getOperand(0);
167 } else
168 continue;
170 auto tryAddHint = [&](const MachineOperand *MO) -> void {
171 Register Reg = MO->getReg();
172 Register PhysReg =
173 Register::isPhysicalRegister(Reg) ? Reg : VRM->getPhys(Reg);
174 if (PhysReg) {
175 if (MO->getSubReg())
176 PhysReg = getSubReg(PhysReg, MO->getSubReg());
177 if (VRRegMO->getSubReg())
178 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
179 MRI->getRegClass(VirtReg));
180 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
181 TwoAddrHints.insert(PhysReg);
184 tryAddHint(OtherMO);
185 if (CommuMO)
186 tryAddHint(CommuMO);
188 for (MCPhysReg OrderReg : Order)
189 if (TwoAddrHints.count(OrderReg))
190 Hints.push_back(OrderReg);
192 return BaseImplRetVal;
195 const MCPhysReg *
196 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
197 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
198 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
199 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
200 : CSR_SystemZ_AllRegs_SaveList;
201 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
202 MF->getFunction().getAttributes().hasAttrSomewhere(
203 Attribute::SwiftError))
204 return CSR_SystemZ_SwiftError_SaveList;
205 return CSR_SystemZ_SaveList;
208 const uint32_t *
209 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
210 CallingConv::ID CC) const {
211 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
212 if (CC == CallingConv::AnyReg)
213 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
214 : CSR_SystemZ_AllRegs_RegMask;
215 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
216 MF.getFunction().getAttributes().hasAttrSomewhere(
217 Attribute::SwiftError))
218 return CSR_SystemZ_SwiftError_RegMask;
219 return CSR_SystemZ_RegMask;
222 BitVector
223 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
224 BitVector Reserved(getNumRegs());
225 const SystemZFrameLowering *TFI = getFrameLowering(MF);
227 if (TFI->hasFP(MF)) {
228 // R11D is the frame pointer. Reserve all aliases.
229 Reserved.set(SystemZ::R11D);
230 Reserved.set(SystemZ::R11L);
231 Reserved.set(SystemZ::R11H);
232 Reserved.set(SystemZ::R10Q);
235 // R15D is the stack pointer. Reserve all aliases.
236 Reserved.set(SystemZ::R15D);
237 Reserved.set(SystemZ::R15L);
238 Reserved.set(SystemZ::R15H);
239 Reserved.set(SystemZ::R14Q);
241 // A0 and A1 hold the thread pointer.
242 Reserved.set(SystemZ::A0);
243 Reserved.set(SystemZ::A1);
245 // FPC is the floating-point control register.
246 Reserved.set(SystemZ::FPC);
248 return Reserved;
251 void
252 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
253 int SPAdj, unsigned FIOperandNum,
254 RegScavenger *RS) const {
255 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
257 MachineBasicBlock &MBB = *MI->getParent();
258 MachineFunction &MF = *MBB.getParent();
259 auto *TII =
260 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
261 const SystemZFrameLowering *TFI = getFrameLowering(MF);
262 DebugLoc DL = MI->getDebugLoc();
264 // Decompose the frame index into a base and offset.
265 int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
266 unsigned BasePtr;
267 int64_t Offset = (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr) +
268 MI->getOperand(FIOperandNum + 1).getImm());
270 // Special handling of dbg_value instructions.
271 if (MI->isDebugValue()) {
272 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
273 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
274 return;
277 // See if the offset is in range, or if an equivalent instruction that
278 // accepts the offset exists.
279 unsigned Opcode = MI->getOpcode();
280 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
281 if (OpcodeForOffset) {
282 if (OpcodeForOffset == SystemZ::LE &&
283 MF.getSubtarget<SystemZSubtarget>().hasVector()) {
284 // If LE is ok for offset, use LDE instead on z13.
285 OpcodeForOffset = SystemZ::LDE32;
287 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
289 else {
290 // Create an anchor point that is in range. Start at 0xffff so that
291 // can use LLILH to load the immediate.
292 int64_t OldOffset = Offset;
293 int64_t Mask = 0xffff;
294 do {
295 Offset = OldOffset & Mask;
296 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
297 Mask >>= 1;
298 assert(Mask && "One offset must be OK");
299 } while (!OpcodeForOffset);
301 Register ScratchReg =
302 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
303 int64_t HighOffset = OldOffset - Offset;
305 if (MI->getDesc().TSFlags & SystemZII::HasIndex
306 && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
307 // Load the offset into the scratch register and use it as an index.
308 // The scratch register then dies here.
309 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
310 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
311 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
312 false, false, true);
313 } else {
314 // Load the anchor address into a scratch register.
315 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
316 if (LAOpcode)
317 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
318 .addReg(BasePtr).addImm(HighOffset).addReg(0);
319 else {
320 // Load the high offset into the scratch register and use it as
321 // an index.
322 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
323 BuildMI(MBB, MI, DL, TII->get(SystemZ::AGR),ScratchReg)
324 .addReg(ScratchReg, RegState::Kill).addReg(BasePtr);
327 // Use the scratch register as the base. It then dies here.
328 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
329 false, false, true);
332 MI->setDesc(TII->get(OpcodeForOffset));
333 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
336 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
337 const TargetRegisterClass *SrcRC,
338 unsigned SubReg,
339 const TargetRegisterClass *DstRC,
340 unsigned DstSubReg,
341 const TargetRegisterClass *NewRC,
342 LiveIntervals &LIS) const {
343 assert (MI->isCopy() && "Only expecting COPY instructions");
345 // Coalesce anything which is not a COPY involving a subreg to/from GR128.
346 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
347 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
348 return true;
350 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
351 // and local to one MBB with not too much interferring registers. Otherwise
352 // regalloc may run out of registers.
354 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
355 Register GR128Reg = MI->getOperand(WideOpNo).getReg();
356 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
357 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
358 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
360 // Check that the two virtual registers are local to MBB.
361 MachineBasicBlock *MBB = MI->getParent();
362 MachineInstr *FirstMI_GR128 =
363 LIS.getInstructionFromIndex(IntGR128.beginIndex());
364 MachineInstr *FirstMI_GRNar =
365 LIS.getInstructionFromIndex(IntGRNar.beginIndex());
366 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex());
367 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex());
368 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) ||
369 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) ||
370 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) ||
371 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB))
372 return false;
374 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr;
375 if (WideOpNo == 1) {
376 MII = FirstMI_GR128;
377 MEE = LastMI_GRNar;
378 } else {
379 MII = FirstMI_GRNar;
380 MEE = LastMI_GR128;
383 // Check if coalescing seems safe by finding the set of clobbered physreg
384 // pairs in the region.
385 BitVector PhysClobbered(getNumRegs());
386 MEE++;
387 for (; MII != MEE; ++MII) {
388 for (const MachineOperand &MO : MII->operands())
389 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
390 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
391 SI.isValid(); ++SI)
392 if (NewRC->contains(*SI)) {
393 PhysClobbered.set(*SI);
394 break;
399 // Demand an arbitrary margin of free regs.
400 unsigned const DemandedFreeGR128 = 3;
401 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
402 return false;
404 return true;
407 Register
408 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
409 const SystemZFrameLowering *TFI = getFrameLowering(MF);
410 return TFI->hasFP(MF) ? SystemZ::R11D : SystemZ::R15D;
413 const TargetRegisterClass *
414 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
415 if (RC == &SystemZ::CCRRegClass)
416 return &SystemZ::GR32BitRegClass;
417 return RC;