[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / SystemZ / SystemZRegisterInfo.cpp
blob0062e39602f56893640e0ea3173fc66c5871aedb
1 //===-- SystemZRegisterInfo.cpp - SystemZ register information ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
9 #include "SystemZRegisterInfo.h"
10 #include "SystemZInstrInfo.h"
11 #include "SystemZSubtarget.h"
12 #include "llvm/ADT/SmallSet.h"
13 #include "llvm/CodeGen/LiveIntervals.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetFrameLowering.h"
17 #include "llvm/CodeGen/VirtRegMap.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
20 using namespace llvm;
22 #define GET_REGINFO_TARGET_DESC
23 #include "SystemZGenRegisterInfo.inc"
25 // Given that MO is a GRX32 operand, return either GR32 or GRH32 if MO
26 // somehow belongs in it. Otherwise, return GRX32.
27 static const TargetRegisterClass *getRC32(MachineOperand &MO,
28 const VirtRegMap *VRM,
29 const MachineRegisterInfo *MRI) {
30 const TargetRegisterClass *RC = MRI->getRegClass(MO.getReg());
32 if (SystemZ::GR32BitRegClass.hasSubClassEq(RC) ||
33 MO.getSubReg() == SystemZ::subreg_l32 ||
34 MO.getSubReg() == SystemZ::subreg_hl32)
35 return &SystemZ::GR32BitRegClass;
36 if (SystemZ::GRH32BitRegClass.hasSubClassEq(RC) ||
37 MO.getSubReg() == SystemZ::subreg_h32 ||
38 MO.getSubReg() == SystemZ::subreg_hh32)
39 return &SystemZ::GRH32BitRegClass;
41 if (VRM && VRM->hasPhys(MO.getReg())) {
42 Register PhysReg = VRM->getPhys(MO.getReg());
43 if (SystemZ::GR32BitRegClass.contains(PhysReg))
44 return &SystemZ::GR32BitRegClass;
45 assert (SystemZ::GRH32BitRegClass.contains(PhysReg) &&
46 "Phys reg not in GR32 or GRH32?");
47 return &SystemZ::GRH32BitRegClass;
50 assert (RC == &SystemZ::GRX32BitRegClass);
51 return RC;
54 // Pass the registers of RC as hints while making sure that if any of these
55 // registers are copy hints (and therefore already in Hints), hint them
56 // first.
57 static void addHints(ArrayRef<MCPhysReg> Order,
58 SmallVectorImpl<MCPhysReg> &Hints,
59 const TargetRegisterClass *RC,
60 const MachineRegisterInfo *MRI) {
61 SmallSet<unsigned, 4> CopyHints;
62 CopyHints.insert(Hints.begin(), Hints.end());
63 Hints.clear();
64 for (MCPhysReg Reg : Order)
65 if (CopyHints.count(Reg) &&
66 RC->contains(Reg) && !MRI->isReserved(Reg))
67 Hints.push_back(Reg);
68 for (MCPhysReg Reg : Order)
69 if (!CopyHints.count(Reg) &&
70 RC->contains(Reg) && !MRI->isReserved(Reg))
71 Hints.push_back(Reg);
74 bool SystemZRegisterInfo::getRegAllocationHints(
75 Register VirtReg, ArrayRef<MCPhysReg> Order,
76 SmallVectorImpl<MCPhysReg> &Hints, const MachineFunction &MF,
77 const VirtRegMap *VRM, const LiveRegMatrix *Matrix) const {
78 const MachineRegisterInfo *MRI = &MF.getRegInfo();
79 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
80 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
82 bool BaseImplRetVal = TargetRegisterInfo::getRegAllocationHints(
83 VirtReg, Order, Hints, MF, VRM, Matrix);
85 if (VRM != nullptr) {
86 // Add any two address hints after any copy hints.
87 SmallSet<unsigned, 4> TwoAddrHints;
88 for (auto &Use : MRI->reg_nodbg_instructions(VirtReg))
89 if (SystemZ::getTwoOperandOpcode(Use.getOpcode()) != -1) {
90 const MachineOperand *VRRegMO = nullptr;
91 const MachineOperand *OtherMO = nullptr;
92 const MachineOperand *CommuMO = nullptr;
93 if (VirtReg == Use.getOperand(0).getReg()) {
94 VRRegMO = &Use.getOperand(0);
95 OtherMO = &Use.getOperand(1);
96 if (Use.isCommutable())
97 CommuMO = &Use.getOperand(2);
98 } else if (VirtReg == Use.getOperand(1).getReg()) {
99 VRRegMO = &Use.getOperand(1);
100 OtherMO = &Use.getOperand(0);
101 } else if (VirtReg == Use.getOperand(2).getReg() &&
102 Use.isCommutable()) {
103 VRRegMO = &Use.getOperand(2);
104 OtherMO = &Use.getOperand(0);
105 } else
106 continue;
108 auto tryAddHint = [&](const MachineOperand *MO) -> void {
109 Register Reg = MO->getReg();
110 Register PhysReg = Register::isPhysicalRegister(Reg)
111 ? Reg
112 : Register(VRM->getPhys(Reg));
113 if (PhysReg) {
114 if (MO->getSubReg())
115 PhysReg = getSubReg(PhysReg, MO->getSubReg());
116 if (VRRegMO->getSubReg())
117 PhysReg = getMatchingSuperReg(PhysReg, VRRegMO->getSubReg(),
118 MRI->getRegClass(VirtReg));
119 if (!MRI->isReserved(PhysReg) && !is_contained(Hints, PhysReg))
120 TwoAddrHints.insert(PhysReg);
123 tryAddHint(OtherMO);
124 if (CommuMO)
125 tryAddHint(CommuMO);
127 for (MCPhysReg OrderReg : Order)
128 if (TwoAddrHints.count(OrderReg))
129 Hints.push_back(OrderReg);
132 if (MRI->getRegClass(VirtReg) == &SystemZ::GRX32BitRegClass) {
133 SmallVector<Register, 8> Worklist;
134 SmallSet<Register, 4> DoneRegs;
135 Worklist.push_back(VirtReg);
136 while (Worklist.size()) {
137 Register Reg = Worklist.pop_back_val();
138 if (!DoneRegs.insert(Reg).second)
139 continue;
141 for (auto &Use : MRI->reg_instructions(Reg)) {
142 // For LOCRMux, see if the other operand is already a high or low
143 // register, and in that case give the corresponding hints for
144 // VirtReg. LOCR instructions need both operands in either high or
145 // low parts. Same handling for SELRMux.
146 if (Use.getOpcode() == SystemZ::LOCRMux ||
147 Use.getOpcode() == SystemZ::SELRMux) {
148 MachineOperand &TrueMO = Use.getOperand(1);
149 MachineOperand &FalseMO = Use.getOperand(2);
150 const TargetRegisterClass *RC =
151 TRI->getCommonSubClass(getRC32(FalseMO, VRM, MRI),
152 getRC32(TrueMO, VRM, MRI));
153 if (Use.getOpcode() == SystemZ::SELRMux)
154 RC = TRI->getCommonSubClass(RC,
155 getRC32(Use.getOperand(0), VRM, MRI));
156 if (RC && RC != &SystemZ::GRX32BitRegClass) {
157 addHints(Order, Hints, RC, MRI);
158 // Return true to make these hints the only regs available to
159 // RA. This may mean extra spilling but since the alternative is
160 // a jump sequence expansion of the LOCRMux, it is preferred.
161 return true;
164 // Add the other operand of the LOCRMux to the worklist.
165 Register OtherReg =
166 (TrueMO.getReg() == Reg ? FalseMO.getReg() : TrueMO.getReg());
167 if (MRI->getRegClass(OtherReg) == &SystemZ::GRX32BitRegClass)
168 Worklist.push_back(OtherReg);
169 } // end LOCRMux
170 else if (Use.getOpcode() == SystemZ::CHIMux ||
171 Use.getOpcode() == SystemZ::CFIMux) {
172 if (Use.getOperand(1).getImm() == 0) {
173 bool OnlyLMuxes = true;
174 for (MachineInstr &DefMI : MRI->def_instructions(VirtReg))
175 if (DefMI.getOpcode() != SystemZ::LMux)
176 OnlyLMuxes = false;
177 if (OnlyLMuxes) {
178 addHints(Order, Hints, &SystemZ::GR32BitRegClass, MRI);
179 // Return false to make these hints preferred but not obligatory.
180 return false;
183 } // end CHIMux / CFIMux
188 return BaseImplRetVal;
191 const MCPhysReg *
192 SystemZXPLINK64Registers::getCalleeSavedRegs(const MachineFunction *MF) const {
193 return CSR_SystemZ_XPLINK64_SaveList;
196 const MCPhysReg *
197 SystemZELFRegisters::getCalleeSavedRegs(const MachineFunction *MF) const {
198 const SystemZSubtarget &Subtarget = MF->getSubtarget<SystemZSubtarget>();
199 if (MF->getFunction().getCallingConv() == CallingConv::GHC)
200 return CSR_SystemZ_NoRegs_SaveList;
201 if (MF->getFunction().getCallingConv() == CallingConv::AnyReg)
202 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_SaveList
203 : CSR_SystemZ_AllRegs_SaveList;
204 if (MF->getSubtarget().getTargetLowering()->supportSwiftError() &&
205 MF->getFunction().getAttributes().hasAttrSomewhere(
206 Attribute::SwiftError))
207 return CSR_SystemZ_SwiftError_SaveList;
208 return CSR_SystemZ_ELF_SaveList;
211 const uint32_t *
212 SystemZXPLINK64Registers::getCallPreservedMask(const MachineFunction &MF,
213 CallingConv::ID CC) const {
214 return CSR_SystemZ_XPLINK64_RegMask;
217 const uint32_t *
218 SystemZELFRegisters::getCallPreservedMask(const MachineFunction &MF,
219 CallingConv::ID CC) const {
220 const SystemZSubtarget &Subtarget = MF.getSubtarget<SystemZSubtarget>();
221 if (CC == CallingConv::GHC)
222 return CSR_SystemZ_NoRegs_RegMask;
223 if (CC == CallingConv::AnyReg)
224 return Subtarget.hasVector()? CSR_SystemZ_AllRegs_Vector_RegMask
225 : CSR_SystemZ_AllRegs_RegMask;
226 if (MF.getSubtarget().getTargetLowering()->supportSwiftError() &&
227 MF.getFunction().getAttributes().hasAttrSomewhere(
228 Attribute::SwiftError))
229 return CSR_SystemZ_SwiftError_RegMask;
230 return CSR_SystemZ_ELF_RegMask;
233 SystemZRegisterInfo::SystemZRegisterInfo(unsigned int RA)
234 : SystemZGenRegisterInfo(RA) {}
236 const MCPhysReg *
237 SystemZRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
239 const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>();
240 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
242 return Regs->getCalleeSavedRegs(MF);
245 const uint32_t *
246 SystemZRegisterInfo::getCallPreservedMask(const MachineFunction &MF,
247 CallingConv::ID CC) const {
249 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
250 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
251 return Regs->getCallPreservedMask(MF, CC);
254 BitVector
255 SystemZRegisterInfo::getReservedRegs(const MachineFunction &MF) const {
256 BitVector Reserved(getNumRegs());
257 const SystemZFrameLowering *TFI = getFrameLowering(MF);
258 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
259 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
260 if (TFI->hasFP(MF))
261 // The frame pointer. Reserve all aliases.
262 for (MCRegAliasIterator AI(Regs->getFramePointerRegister(), this, true);
263 AI.isValid(); ++AI)
264 Reserved.set(*AI);
266 // Reserve all aliases for the stack pointer.
267 for (MCRegAliasIterator AI(Regs->getStackPointerRegister(), this, true);
268 AI.isValid(); ++AI)
269 Reserved.set(*AI);
271 // A0 and A1 hold the thread pointer.
272 Reserved.set(SystemZ::A0);
273 Reserved.set(SystemZ::A1);
275 // FPC is the floating-point control register.
276 Reserved.set(SystemZ::FPC);
278 return Reserved;
281 void
282 SystemZRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator MI,
283 int SPAdj, unsigned FIOperandNum,
284 RegScavenger *RS) const {
285 assert(SPAdj == 0 && "Outgoing arguments should be part of the frame");
287 MachineBasicBlock &MBB = *MI->getParent();
288 MachineFunction &MF = *MBB.getParent();
289 auto *TII =
290 static_cast<const SystemZInstrInfo *>(MF.getSubtarget().getInstrInfo());
291 const SystemZFrameLowering *TFI = getFrameLowering(MF);
292 DebugLoc DL = MI->getDebugLoc();
294 // Decompose the frame index into a base and offset.
295 int FrameIndex = MI->getOperand(FIOperandNum).getIndex();
296 Register BasePtr;
297 int64_t Offset =
298 (TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed() +
299 MI->getOperand(FIOperandNum + 1).getImm());
301 // Special handling of dbg_value instructions.
302 if (MI->isDebugValue()) {
303 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, /*isDef*/ false);
304 if (MI->isNonListDebugValue()) {
305 MI->getDebugOffset().ChangeToImmediate(Offset);
306 } else {
307 unsigned OpIdx = MI->getDebugOperandIndex(&MI->getOperand(FIOperandNum));
308 SmallVector<uint64_t, 3> Ops;
309 DIExpression::appendOffset(
310 Ops, TFI->getFrameIndexReference(MF, FrameIndex, BasePtr).getFixed());
311 MI->getDebugExpressionOp().setMetadata(
312 DIExpression::appendOpsToArg(MI->getDebugExpression(), Ops, OpIdx));
314 return;
317 // See if the offset is in range, or if an equivalent instruction that
318 // accepts the offset exists.
319 unsigned Opcode = MI->getOpcode();
320 unsigned OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
321 if (OpcodeForOffset) {
322 if (OpcodeForOffset == SystemZ::LE &&
323 MF.getSubtarget<SystemZSubtarget>().hasVector()) {
324 // If LE is ok for offset, use LDE instead on z13.
325 OpcodeForOffset = SystemZ::LDE32;
327 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
329 else {
330 // Create an anchor point that is in range. Start at 0xffff so that
331 // can use LLILH to load the immediate.
332 int64_t OldOffset = Offset;
333 int64_t Mask = 0xffff;
334 do {
335 Offset = OldOffset & Mask;
336 OpcodeForOffset = TII->getOpcodeForOffset(Opcode, Offset);
337 Mask >>= 1;
338 assert(Mask && "One offset must be OK");
339 } while (!OpcodeForOffset);
341 Register ScratchReg =
342 MF.getRegInfo().createVirtualRegister(&SystemZ::ADDR64BitRegClass);
343 int64_t HighOffset = OldOffset - Offset;
345 if (MI->getDesc().TSFlags & SystemZII::HasIndex
346 && MI->getOperand(FIOperandNum + 2).getReg() == 0) {
347 // Load the offset into the scratch register and use it as an index.
348 // The scratch register then dies here.
349 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
350 MI->getOperand(FIOperandNum).ChangeToRegister(BasePtr, false);
351 MI->getOperand(FIOperandNum + 2).ChangeToRegister(ScratchReg,
352 false, false, true);
353 } else {
354 // Load the anchor address into a scratch register.
355 unsigned LAOpcode = TII->getOpcodeForOffset(SystemZ::LA, HighOffset);
356 if (LAOpcode)
357 BuildMI(MBB, MI, DL, TII->get(LAOpcode),ScratchReg)
358 .addReg(BasePtr).addImm(HighOffset).addReg(0);
359 else {
360 // Load the high offset into the scratch register and use it as
361 // an index.
362 TII->loadImmediate(MBB, MI, ScratchReg, HighOffset);
363 BuildMI(MBB, MI, DL, TII->get(SystemZ::LA), ScratchReg)
364 .addReg(BasePtr, RegState::Kill).addImm(0).addReg(ScratchReg);
367 // Use the scratch register as the base. It then dies here.
368 MI->getOperand(FIOperandNum).ChangeToRegister(ScratchReg,
369 false, false, true);
372 MI->setDesc(TII->get(OpcodeForOffset));
373 MI->getOperand(FIOperandNum + 1).ChangeToImmediate(Offset);
376 bool SystemZRegisterInfo::shouldCoalesce(MachineInstr *MI,
377 const TargetRegisterClass *SrcRC,
378 unsigned SubReg,
379 const TargetRegisterClass *DstRC,
380 unsigned DstSubReg,
381 const TargetRegisterClass *NewRC,
382 LiveIntervals &LIS) const {
383 assert (MI->isCopy() && "Only expecting COPY instructions");
385 // Coalesce anything which is not a COPY involving a subreg to/from GR128.
386 if (!(NewRC->hasSuperClassEq(&SystemZ::GR128BitRegClass) &&
387 (getRegSizeInBits(*SrcRC) <= 64 || getRegSizeInBits(*DstRC) <= 64)))
388 return true;
390 // Allow coalescing of a GR128 subreg COPY only if the live ranges are small
391 // and local to one MBB with not too much interferring registers. Otherwise
392 // regalloc may run out of registers.
394 unsigned WideOpNo = (getRegSizeInBits(*SrcRC) == 128 ? 1 : 0);
395 Register GR128Reg = MI->getOperand(WideOpNo).getReg();
396 Register GRNarReg = MI->getOperand((WideOpNo == 1) ? 0 : 1).getReg();
397 LiveInterval &IntGR128 = LIS.getInterval(GR128Reg);
398 LiveInterval &IntGRNar = LIS.getInterval(GRNarReg);
400 // Check that the two virtual registers are local to MBB.
401 MachineBasicBlock *MBB = MI->getParent();
402 MachineInstr *FirstMI_GR128 =
403 LIS.getInstructionFromIndex(IntGR128.beginIndex());
404 MachineInstr *FirstMI_GRNar =
405 LIS.getInstructionFromIndex(IntGRNar.beginIndex());
406 MachineInstr *LastMI_GR128 = LIS.getInstructionFromIndex(IntGR128.endIndex());
407 MachineInstr *LastMI_GRNar = LIS.getInstructionFromIndex(IntGRNar.endIndex());
408 if ((!FirstMI_GR128 || FirstMI_GR128->getParent() != MBB) ||
409 (!FirstMI_GRNar || FirstMI_GRNar->getParent() != MBB) ||
410 (!LastMI_GR128 || LastMI_GR128->getParent() != MBB) ||
411 (!LastMI_GRNar || LastMI_GRNar->getParent() != MBB))
412 return false;
414 MachineBasicBlock::iterator MII = nullptr, MEE = nullptr;
415 if (WideOpNo == 1) {
416 MII = FirstMI_GR128;
417 MEE = LastMI_GRNar;
418 } else {
419 MII = FirstMI_GRNar;
420 MEE = LastMI_GR128;
423 // Check if coalescing seems safe by finding the set of clobbered physreg
424 // pairs in the region.
425 BitVector PhysClobbered(getNumRegs());
426 MEE++;
427 for (; MII != MEE; ++MII) {
428 for (const MachineOperand &MO : MII->operands())
429 if (MO.isReg() && Register::isPhysicalRegister(MO.getReg())) {
430 for (MCSuperRegIterator SI(MO.getReg(), this, true/*IncludeSelf*/);
431 SI.isValid(); ++SI)
432 if (NewRC->contains(*SI)) {
433 PhysClobbered.set(*SI);
434 break;
439 // Demand an arbitrary margin of free regs.
440 unsigned const DemandedFreeGR128 = 3;
441 if (PhysClobbered.count() > (NewRC->getNumRegs() - DemandedFreeGR128))
442 return false;
444 return true;
447 Register
448 SystemZRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
449 const SystemZFrameLowering *TFI = getFrameLowering(MF);
450 const SystemZSubtarget *Subtarget = &MF.getSubtarget<SystemZSubtarget>();
451 SystemZCallingConventionRegisters *Regs = Subtarget->getSpecialRegisters();
453 return TFI->hasFP(MF) ? Regs->getFramePointerRegister()
454 : Regs->getStackPointerRegister();
457 const TargetRegisterClass *
458 SystemZRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
459 if (RC == &SystemZ::CCRRegClass)
460 return &SystemZ::GR32BitRegClass;
461 return RC;