AMDGPU/GlobalISel: Default to using TableGen'd instruction selector
[llvm-project.git] / llvm / lib / Target / AMDGPU / AMDGPUInstructionSelector.cpp
blob80f062b7daafc98ce42c7606e00f39b5407398b8
1 //===- AMDGPUInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file implements the targeting of the InstructionSelector class for
11 /// AMDGPU.
12 /// \todo This should be generated by TableGen.
13 //===----------------------------------------------------------------------===//
15 #include "AMDGPUInstructionSelector.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPURegisterBankInfo.h"
18 #include "AMDGPURegisterInfo.h"
19 #include "AMDGPUSubtarget.h"
20 #include "AMDGPUTargetMachine.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
23 #include "llvm/CodeGen/GlobalISel/InstructionSelectorImpl.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/MachineBasicBlock.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstr.h"
28 #include "llvm/CodeGen/MachineInstrBuilder.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/IR/Type.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
34 #define DEBUG_TYPE "amdgpu-isel"
36 using namespace llvm;
38 #define GET_GLOBALISEL_IMPL
39 #include "AMDGPUGenGlobalISel.inc"
40 #undef GET_GLOBALISEL_IMPL
42 AMDGPUInstructionSelector::AMDGPUInstructionSelector(
43 const SISubtarget &STI, const AMDGPURegisterBankInfo &RBI,
44 const AMDGPUTargetMachine &TM)
45 : InstructionSelector(), TII(*STI.getInstrInfo()),
46 TRI(*STI.getRegisterInfo()), RBI(RBI), TM(TM),
47 STI(STI),
48 EnableLateStructurizeCFG(AMDGPUTargetMachine::EnableLateStructurizeCFG),
49 #define GET_GLOBALISEL_PREDICATES_INIT
50 #include "AMDGPUGenGlobalISel.inc"
51 #undef GET_GLOBALISEL_PREDICATES_INIT
52 #define GET_GLOBALISEL_TEMPORARIES_INIT
53 #include "AMDGPUGenGlobalISel.inc"
54 #undef GET_GLOBALISEL_TEMPORARIES_INIT
55 ,AMDGPUASI(STI.getAMDGPUAS())
59 const char *AMDGPUInstructionSelector::getName() { return DEBUG_TYPE; }
61 bool AMDGPUInstructionSelector::selectCOPY(MachineInstr &I) const {
62 MachineBasicBlock *BB = I.getParent();
63 MachineFunction *MF = BB->getParent();
64 MachineRegisterInfo &MRI = MF->getRegInfo();
65 I.setDesc(TII.get(TargetOpcode::COPY));
66 for (const MachineOperand &MO : I.operands()) {
67 if (TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
68 continue;
70 const TargetRegisterClass *RC =
71 TRI.getConstrainedRegClassForOperand(MO, MRI);
72 if (!RC)
73 continue;
74 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
76 return true;
79 MachineOperand
80 AMDGPUInstructionSelector::getSubOperand64(MachineOperand &MO,
81 unsigned SubIdx) const {
83 MachineInstr *MI = MO.getParent();
84 MachineBasicBlock *BB = MO.getParent()->getParent();
85 MachineFunction *MF = BB->getParent();
86 MachineRegisterInfo &MRI = MF->getRegInfo();
87 unsigned DstReg = MRI.createVirtualRegister(&AMDGPU::SGPR_32RegClass);
89 if (MO.isReg()) {
90 unsigned ComposedSubIdx = TRI.composeSubRegIndices(MO.getSubReg(), SubIdx);
91 unsigned Reg = MO.getReg();
92 BuildMI(*BB, MI, MI->getDebugLoc(), TII.get(AMDGPU::COPY), DstReg)
93 .addReg(Reg, 0, ComposedSubIdx);
95 return MachineOperand::CreateReg(DstReg, MO.isDef(), MO.isImplicit(),
96 MO.isKill(), MO.isDead(), MO.isUndef(),
97 MO.isEarlyClobber(), 0, MO.isDebug(),
98 MO.isInternalRead());
101 assert(MO.isImm());
103 APInt Imm(64, MO.getImm());
105 switch (SubIdx) {
106 default:
107 llvm_unreachable("do not know to split immediate with this sub index.");
108 case AMDGPU::sub0:
109 return MachineOperand::CreateImm(Imm.getLoBits(32).getSExtValue());
110 case AMDGPU::sub1:
111 return MachineOperand::CreateImm(Imm.getHiBits(32).getSExtValue());
115 bool AMDGPUInstructionSelector::selectG_ADD(MachineInstr &I) const {
116 MachineBasicBlock *BB = I.getParent();
117 MachineFunction *MF = BB->getParent();
118 MachineRegisterInfo &MRI = MF->getRegInfo();
119 unsigned Size = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
120 unsigned DstLo = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
121 unsigned DstHi = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
123 if (Size != 64)
124 return false;
126 DebugLoc DL = I.getDebugLoc();
128 MachineOperand Lo1(getSubOperand64(I.getOperand(1), AMDGPU::sub0));
129 MachineOperand Lo2(getSubOperand64(I.getOperand(2), AMDGPU::sub0));
131 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADD_U32), DstLo)
132 .add(Lo1)
133 .add(Lo2);
135 MachineOperand Hi1(getSubOperand64(I.getOperand(1), AMDGPU::sub1));
136 MachineOperand Hi2(getSubOperand64(I.getOperand(2), AMDGPU::sub1));
138 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_ADDC_U32), DstHi)
139 .add(Hi1)
140 .add(Hi2);
142 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), I.getOperand(0).getReg())
143 .addReg(DstLo)
144 .addImm(AMDGPU::sub0)
145 .addReg(DstHi)
146 .addImm(AMDGPU::sub1);
148 for (MachineOperand &MO : I.explicit_operands()) {
149 if (!MO.isReg() || TargetRegisterInfo::isPhysicalRegister(MO.getReg()))
150 continue;
151 RBI.constrainGenericRegister(MO.getReg(), AMDGPU::SReg_64RegClass, MRI);
154 I.eraseFromParent();
155 return true;
158 bool AMDGPUInstructionSelector::selectG_GEP(MachineInstr &I) const {
159 return selectG_ADD(I);
162 bool AMDGPUInstructionSelector::selectG_IMPLICIT_DEF(MachineInstr &I) const {
163 MachineBasicBlock *BB = I.getParent();
164 MachineFunction *MF = BB->getParent();
165 MachineRegisterInfo &MRI = MF->getRegInfo();
166 const MachineOperand &MO = I.getOperand(0);
167 const TargetRegisterClass *RC =
168 TRI.getConstrainedRegClassForOperand(MO, MRI);
169 if (RC)
170 RBI.constrainGenericRegister(MO.getReg(), *RC, MRI);
171 I.setDesc(TII.get(TargetOpcode::IMPLICIT_DEF));
172 return true;
175 bool AMDGPUInstructionSelector::selectG_INTRINSIC(MachineInstr &I,
176 CodeGenCoverage &CoverageInfo) const {
177 unsigned IntrinsicID = I.getOperand(1).getIntrinsicID();
179 switch (IntrinsicID) {
180 default:
181 break;
182 case Intrinsic::amdgcn_cvt_pkrtz:
183 return selectImpl(I, CoverageInfo);
185 return false;
188 bool AMDGPUInstructionSelector::selectG_STORE(MachineInstr &I) const {
189 MachineBasicBlock *BB = I.getParent();
190 MachineFunction *MF = BB->getParent();
191 MachineRegisterInfo &MRI = MF->getRegInfo();
192 DebugLoc DL = I.getDebugLoc();
193 unsigned StoreSize = RBI.getSizeInBits(I.getOperand(0).getReg(), MRI, TRI);
194 unsigned Opcode;
196 // FIXME: Select store instruction based on address space
197 switch (StoreSize) {
198 default:
199 return false;
200 case 32:
201 Opcode = AMDGPU::FLAT_STORE_DWORD;
202 break;
203 case 64:
204 Opcode = AMDGPU::FLAT_STORE_DWORDX2;
205 break;
206 case 96:
207 Opcode = AMDGPU::FLAT_STORE_DWORDX3;
208 break;
209 case 128:
210 Opcode = AMDGPU::FLAT_STORE_DWORDX4;
211 break;
214 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
215 .add(I.getOperand(1))
216 .add(I.getOperand(0))
217 .addImm(0) // offset
218 .addImm(0) // glc
219 .addImm(0); // slc
222 // Now that we selected an opcode, we need to constrain the register
223 // operands to use appropriate classes.
224 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
226 I.eraseFromParent();
227 return Ret;
230 bool AMDGPUInstructionSelector::selectG_CONSTANT(MachineInstr &I) const {
231 MachineBasicBlock *BB = I.getParent();
232 MachineFunction *MF = BB->getParent();
233 MachineRegisterInfo &MRI = MF->getRegInfo();
234 MachineOperand &ImmOp = I.getOperand(1);
236 // The AMDGPU backend only supports Imm operands and not CImm or FPImm.
237 if (ImmOp.isFPImm()) {
238 const APInt &Imm = ImmOp.getFPImm()->getValueAPF().bitcastToAPInt();
239 ImmOp.ChangeToImmediate(Imm.getZExtValue());
240 } else if (ImmOp.isCImm()) {
241 ImmOp.ChangeToImmediate(ImmOp.getCImm()->getZExtValue());
244 unsigned DstReg = I.getOperand(0).getReg();
245 unsigned Size;
246 bool IsSgpr;
247 const RegisterBank *RB = MRI.getRegBankOrNull(I.getOperand(0).getReg());
248 if (RB) {
249 IsSgpr = RB->getID() == AMDGPU::SGPRRegBankID;
250 Size = MRI.getType(DstReg).getSizeInBits();
251 } else {
252 const TargetRegisterClass *RC = TRI.getRegClassForReg(MRI, DstReg);
253 IsSgpr = TRI.isSGPRClass(RC);
254 Size = TRI.getRegSizeInBits(*RC);
257 if (Size != 32 && Size != 64)
258 return false;
260 unsigned Opcode = IsSgpr ? AMDGPU::S_MOV_B32 : AMDGPU::V_MOV_B32_e32;
261 if (Size == 32) {
262 I.setDesc(TII.get(Opcode));
263 I.addImplicitDefUseOperands(*MF);
264 return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
267 DebugLoc DL = I.getDebugLoc();
268 const TargetRegisterClass *RC = IsSgpr ? &AMDGPU::SReg_32_XM0RegClass :
269 &AMDGPU::VGPR_32RegClass;
270 unsigned LoReg = MRI.createVirtualRegister(RC);
271 unsigned HiReg = MRI.createVirtualRegister(RC);
272 const APInt &Imm = APInt(Size, I.getOperand(1).getImm());
274 BuildMI(*BB, &I, DL, TII.get(Opcode), LoReg)
275 .addImm(Imm.trunc(32).getZExtValue());
277 BuildMI(*BB, &I, DL, TII.get(Opcode), HiReg)
278 .addImm(Imm.ashr(32).getZExtValue());
280 const MachineInstr *RS =
281 BuildMI(*BB, &I, DL, TII.get(AMDGPU::REG_SEQUENCE), DstReg)
282 .addReg(LoReg)
283 .addImm(AMDGPU::sub0)
284 .addReg(HiReg)
285 .addImm(AMDGPU::sub1);
287 // We can't call constrainSelectedInstRegOperands here, because it doesn't
288 // work for target independent opcodes
289 I.eraseFromParent();
290 const TargetRegisterClass *DstRC =
291 TRI.getConstrainedRegClassForOperand(RS->getOperand(0), MRI);
292 if (!DstRC)
293 return true;
294 return RBI.constrainGenericRegister(DstReg, *DstRC, MRI);
297 static bool isConstant(const MachineInstr &MI) {
298 return MI.getOpcode() == TargetOpcode::G_CONSTANT;
301 void AMDGPUInstructionSelector::getAddrModeInfo(const MachineInstr &Load,
302 const MachineRegisterInfo &MRI, SmallVectorImpl<GEPInfo> &AddrInfo) const {
304 const MachineInstr *PtrMI = MRI.getUniqueVRegDef(Load.getOperand(1).getReg());
306 assert(PtrMI);
308 if (PtrMI->getOpcode() != TargetOpcode::G_GEP)
309 return;
311 GEPInfo GEPInfo(*PtrMI);
313 for (unsigned i = 1, e = 3; i < e; ++i) {
314 const MachineOperand &GEPOp = PtrMI->getOperand(i);
315 const MachineInstr *OpDef = MRI.getUniqueVRegDef(GEPOp.getReg());
316 assert(OpDef);
317 if (isConstant(*OpDef)) {
318 // FIXME: Is it possible to have multiple Imm parts? Maybe if we
319 // are lacking other optimizations.
320 assert(GEPInfo.Imm == 0);
321 GEPInfo.Imm = OpDef->getOperand(1).getCImm()->getSExtValue();
322 continue;
324 const RegisterBank *OpBank = RBI.getRegBank(GEPOp.getReg(), MRI, TRI);
325 if (OpBank->getID() == AMDGPU::SGPRRegBankID)
326 GEPInfo.SgprParts.push_back(GEPOp.getReg());
327 else
328 GEPInfo.VgprParts.push_back(GEPOp.getReg());
331 AddrInfo.push_back(GEPInfo);
332 getAddrModeInfo(*PtrMI, MRI, AddrInfo);
335 static bool isInstrUniform(const MachineInstr &MI) {
336 if (!MI.hasOneMemOperand())
337 return false;
339 const MachineMemOperand *MMO = *MI.memoperands_begin();
340 const Value *Ptr = MMO->getValue();
342 // UndefValue means this is a load of a kernel input. These are uniform.
343 // Sometimes LDS instructions have constant pointers.
344 // If Ptr is null, then that means this mem operand contains a
345 // PseudoSourceValue like GOT.
346 if (!Ptr || isa<UndefValue>(Ptr) || isa<Argument>(Ptr) ||
347 isa<Constant>(Ptr) || isa<GlobalValue>(Ptr))
348 return true;
350 if (MMO->getAddrSpace() == AMDGPUAS::CONSTANT_ADDRESS_32BIT)
351 return true;
353 const Instruction *I = dyn_cast<Instruction>(Ptr);
354 return I && I->getMetadata("amdgpu.uniform");
357 static unsigned getSmrdOpcode(unsigned BaseOpcode, unsigned LoadSize) {
359 if (LoadSize == 32)
360 return BaseOpcode;
362 switch (BaseOpcode) {
363 case AMDGPU::S_LOAD_DWORD_IMM:
364 switch (LoadSize) {
365 case 64:
366 return AMDGPU::S_LOAD_DWORDX2_IMM;
367 case 128:
368 return AMDGPU::S_LOAD_DWORDX4_IMM;
369 case 256:
370 return AMDGPU::S_LOAD_DWORDX8_IMM;
371 case 512:
372 return AMDGPU::S_LOAD_DWORDX16_IMM;
374 break;
375 case AMDGPU::S_LOAD_DWORD_IMM_ci:
376 switch (LoadSize) {
377 case 64:
378 return AMDGPU::S_LOAD_DWORDX2_IMM_ci;
379 case 128:
380 return AMDGPU::S_LOAD_DWORDX4_IMM_ci;
381 case 256:
382 return AMDGPU::S_LOAD_DWORDX8_IMM_ci;
383 case 512:
384 return AMDGPU::S_LOAD_DWORDX16_IMM_ci;
386 break;
387 case AMDGPU::S_LOAD_DWORD_SGPR:
388 switch (LoadSize) {
389 case 64:
390 return AMDGPU::S_LOAD_DWORDX2_SGPR;
391 case 128:
392 return AMDGPU::S_LOAD_DWORDX4_SGPR;
393 case 256:
394 return AMDGPU::S_LOAD_DWORDX8_SGPR;
395 case 512:
396 return AMDGPU::S_LOAD_DWORDX16_SGPR;
398 break;
400 llvm_unreachable("Invalid base smrd opcode or size");
403 bool AMDGPUInstructionSelector::hasVgprParts(ArrayRef<GEPInfo> AddrInfo) const {
404 for (const GEPInfo &GEPInfo : AddrInfo) {
405 if (!GEPInfo.VgprParts.empty())
406 return true;
408 return false;
411 bool AMDGPUInstructionSelector::selectSMRD(MachineInstr &I,
412 ArrayRef<GEPInfo> AddrInfo) const {
414 if (!I.hasOneMemOperand())
415 return false;
417 if ((*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS &&
418 (*I.memoperands_begin())->getAddrSpace() != AMDGPUASI.CONSTANT_ADDRESS_32BIT)
419 return false;
421 if (!isInstrUniform(I))
422 return false;
424 if (hasVgprParts(AddrInfo))
425 return false;
427 MachineBasicBlock *BB = I.getParent();
428 MachineFunction *MF = BB->getParent();
429 const SISubtarget &Subtarget = MF->getSubtarget<SISubtarget>();
430 MachineRegisterInfo &MRI = MF->getRegInfo();
431 unsigned DstReg = I.getOperand(0).getReg();
432 const DebugLoc &DL = I.getDebugLoc();
433 unsigned Opcode;
434 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
436 if (!AddrInfo.empty() && AddrInfo[0].SgprParts.size() == 1) {
438 const GEPInfo &GEPInfo = AddrInfo[0];
440 unsigned PtrReg = GEPInfo.SgprParts[0];
441 int64_t EncodedImm = AMDGPU::getSMRDEncodedOffset(Subtarget, GEPInfo.Imm);
442 if (AMDGPU::isLegalSMRDImmOffset(Subtarget, GEPInfo.Imm)) {
443 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
445 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
446 .addReg(PtrReg)
447 .addImm(EncodedImm)
448 .addImm(0); // glc
449 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
452 if (Subtarget.getGeneration() == AMDGPUSubtarget::SEA_ISLANDS &&
453 isUInt<32>(EncodedImm)) {
454 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM_ci, LoadSize);
455 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
456 .addReg(PtrReg)
457 .addImm(EncodedImm)
458 .addImm(0); // glc
459 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
462 if (isUInt<32>(GEPInfo.Imm)) {
463 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_SGPR, LoadSize);
464 unsigned OffsetReg = MRI.createVirtualRegister(&AMDGPU::SReg_32RegClass);
465 BuildMI(*BB, &I, DL, TII.get(AMDGPU::S_MOV_B32), OffsetReg)
466 .addImm(GEPInfo.Imm);
468 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
469 .addReg(PtrReg)
470 .addReg(OffsetReg)
471 .addImm(0); // glc
472 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
476 unsigned PtrReg = I.getOperand(1).getReg();
477 Opcode = getSmrdOpcode(AMDGPU::S_LOAD_DWORD_IMM, LoadSize);
478 MachineInstr *SMRD = BuildMI(*BB, &I, DL, TII.get(Opcode), DstReg)
479 .addReg(PtrReg)
480 .addImm(0)
481 .addImm(0); // glc
482 return constrainSelectedInstRegOperands(*SMRD, TII, TRI, RBI);
486 bool AMDGPUInstructionSelector::selectG_LOAD(MachineInstr &I) const {
487 MachineBasicBlock *BB = I.getParent();
488 MachineFunction *MF = BB->getParent();
489 MachineRegisterInfo &MRI = MF->getRegInfo();
490 DebugLoc DL = I.getDebugLoc();
491 unsigned DstReg = I.getOperand(0).getReg();
492 unsigned PtrReg = I.getOperand(1).getReg();
493 unsigned LoadSize = RBI.getSizeInBits(DstReg, MRI, TRI);
494 unsigned Opcode;
496 SmallVector<GEPInfo, 4> AddrInfo;
498 getAddrModeInfo(I, MRI, AddrInfo);
500 if (selectSMRD(I, AddrInfo)) {
501 I.eraseFromParent();
502 return true;
505 switch (LoadSize) {
506 default:
507 llvm_unreachable("Load size not supported\n");
508 case 32:
509 Opcode = AMDGPU::FLAT_LOAD_DWORD;
510 break;
511 case 64:
512 Opcode = AMDGPU::FLAT_LOAD_DWORDX2;
513 break;
516 MachineInstr *Flat = BuildMI(*BB, &I, DL, TII.get(Opcode))
517 .add(I.getOperand(0))
518 .addReg(PtrReg)
519 .addImm(0) // offset
520 .addImm(0) // glc
521 .addImm(0); // slc
523 bool Ret = constrainSelectedInstRegOperands(*Flat, TII, TRI, RBI);
524 I.eraseFromParent();
525 return Ret;
528 bool AMDGPUInstructionSelector::select(MachineInstr &I,
529 CodeGenCoverage &CoverageInfo) const {
531 if (!isPreISelGenericOpcode(I.getOpcode())) {
532 if (I.isCopy())
533 return selectCOPY(I);
534 return true;
537 switch (I.getOpcode()) {
538 default:
539 return selectImpl(I, CoverageInfo);
540 case TargetOpcode::G_ADD:
541 return selectG_ADD(I);
542 case TargetOpcode::G_BITCAST:
543 return selectCOPY(I);
544 case TargetOpcode::G_CONSTANT:
545 case TargetOpcode::G_FCONSTANT:
546 return selectG_CONSTANT(I);
547 case TargetOpcode::G_GEP:
548 return selectG_GEP(I);
549 case TargetOpcode::G_IMPLICIT_DEF:
550 return selectG_IMPLICIT_DEF(I);
551 case TargetOpcode::G_INTRINSIC:
552 return selectG_INTRINSIC(I, CoverageInfo);
553 case TargetOpcode::G_LOAD:
554 return selectG_LOAD(I);
555 case TargetOpcode::G_STORE:
556 return selectG_STORE(I);
558 return false;
561 InstructionSelector::ComplexRendererFns
562 AMDGPUInstructionSelector::selectVCSRC(MachineOperand &Root) const {
563 return {{
564 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
570 /// This will select either an SGPR or VGPR operand and will save us from
571 /// having to write an extra tablegen pattern.
572 InstructionSelector::ComplexRendererFns
573 AMDGPUInstructionSelector::selectVSRC0(MachineOperand &Root) const {
574 return {{
575 [=](MachineInstrBuilder &MIB) { MIB.add(Root); }
579 InstructionSelector::ComplexRendererFns
580 AMDGPUInstructionSelector::selectVOP3Mods0(MachineOperand &Root) const {
581 return {{
582 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
583 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // src0_mods
584 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
585 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
588 InstructionSelector::ComplexRendererFns
589 AMDGPUInstructionSelector::selectVOP3OMods(MachineOperand &Root) const {
590 return {{
591 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
592 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); }, // clamp
593 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // omod
597 InstructionSelector::ComplexRendererFns
598 AMDGPUInstructionSelector::selectVOP3Mods(MachineOperand &Root) const {
599 return {{
600 [=](MachineInstrBuilder &MIB) { MIB.add(Root); },
601 [=](MachineInstrBuilder &MIB) { MIB.addImm(0); } // src_mods