[yaml2obj][obj2yaml] - Do not create a symbol table by default.
[llvm-complete.git] / lib / Target / AMDGPU / R600InstrInfo.cpp
blob04a5e93f6213c4e5298b1f4a9abfe24c3090d685
1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// R600 Implementation of TargetInstrInfo.
12 //===----------------------------------------------------------------------===//
14 #include "R600InstrInfo.h"
15 #include "AMDGPU.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600Defines.h"
19 #include "R600FrameLowering.h"
20 #include "R600RegisterInfo.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include <algorithm>
37 #include <cassert>
38 #include <cstdint>
39 #include <cstring>
40 #include <iterator>
41 #include <utility>
42 #include <vector>
44 using namespace llvm;
46 #define GET_INSTRINFO_CTOR_DTOR
47 #include "R600GenDFAPacketizer.inc"
49 #define GET_INSTRINFO_CTOR_DTOR
50 #define GET_INSTRMAP_INFO
51 #define GET_INSTRINFO_NAMED_OPS
52 #include "R600GenInstrInfo.inc"
54 R600InstrInfo::R600InstrInfo(const R600Subtarget &ST)
55 : R600GenInstrInfo(-1, -1), RI(), ST(ST) {}
57 bool R600InstrInfo::isVector(const MachineInstr &MI) const {
58 return get(MI.getOpcode()).TSFlags & R600_InstFlag::VECTOR;
61 void R600InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
62 MachineBasicBlock::iterator MI,
63 const DebugLoc &DL, unsigned DestReg,
64 unsigned SrcReg, bool KillSrc) const {
65 unsigned VectorComponents = 0;
66 if ((R600::R600_Reg128RegClass.contains(DestReg) ||
67 R600::R600_Reg128VerticalRegClass.contains(DestReg)) &&
68 (R600::R600_Reg128RegClass.contains(SrcReg) ||
69 R600::R600_Reg128VerticalRegClass.contains(SrcReg))) {
70 VectorComponents = 4;
71 } else if((R600::R600_Reg64RegClass.contains(DestReg) ||
72 R600::R600_Reg64VerticalRegClass.contains(DestReg)) &&
73 (R600::R600_Reg64RegClass.contains(SrcReg) ||
74 R600::R600_Reg64VerticalRegClass.contains(SrcReg))) {
75 VectorComponents = 2;
78 if (VectorComponents > 0) {
79 for (unsigned I = 0; I < VectorComponents; I++) {
80 unsigned SubRegIndex = AMDGPURegisterInfo::getSubRegFromChannel(I);
81 buildDefaultInstruction(MBB, MI, R600::MOV,
82 RI.getSubReg(DestReg, SubRegIndex),
83 RI.getSubReg(SrcReg, SubRegIndex))
84 .addReg(DestReg,
85 RegState::Define | RegState::Implicit);
87 } else {
88 MachineInstr *NewMI = buildDefaultInstruction(MBB, MI, R600::MOV,
89 DestReg, SrcReg);
90 NewMI->getOperand(getOperandIdx(*NewMI, R600::OpName::src0))
91 .setIsKill(KillSrc);
95 /// \returns true if \p MBBI can be moved into a new basic.
96 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
97 MachineBasicBlock::iterator MBBI) const {
98 for (MachineInstr::const_mop_iterator I = MBBI->operands_begin(),
99 E = MBBI->operands_end(); I != E; ++I) {
100 if (I->isReg() && !Register::isVirtualRegister(I->getReg()) && I->isUse() &&
101 RI.isPhysRegLiveAcrossClauses(I->getReg()))
102 return false;
104 return true;
107 bool R600InstrInfo::isMov(unsigned Opcode) const {
108 switch(Opcode) {
109 default:
110 return false;
111 case R600::MOV:
112 case R600::MOV_IMM_F32:
113 case R600::MOV_IMM_I32:
114 return true;
118 bool R600InstrInfo::isReductionOp(unsigned Opcode) const {
119 return false;
122 bool R600InstrInfo::isCubeOp(unsigned Opcode) const {
123 switch(Opcode) {
124 default: return false;
125 case R600::CUBE_r600_pseudo:
126 case R600::CUBE_r600_real:
127 case R600::CUBE_eg_pseudo:
128 case R600::CUBE_eg_real:
129 return true;
133 bool R600InstrInfo::isALUInstr(unsigned Opcode) const {
134 unsigned TargetFlags = get(Opcode).TSFlags;
136 return (TargetFlags & R600_InstFlag::ALU_INST);
139 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode) const {
140 unsigned TargetFlags = get(Opcode).TSFlags;
142 return ((TargetFlags & R600_InstFlag::OP1) |
143 (TargetFlags & R600_InstFlag::OP2) |
144 (TargetFlags & R600_InstFlag::OP3));
147 bool R600InstrInfo::isLDSInstr(unsigned Opcode) const {
148 unsigned TargetFlags = get(Opcode).TSFlags;
150 return ((TargetFlags & R600_InstFlag::LDS_1A) |
151 (TargetFlags & R600_InstFlag::LDS_1A1D) |
152 (TargetFlags & R600_InstFlag::LDS_1A2D));
155 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode) const {
156 return isLDSInstr(Opcode) && getOperandIdx(Opcode, R600::OpName::dst) != -1;
159 bool R600InstrInfo::canBeConsideredALU(const MachineInstr &MI) const {
160 if (isALUInstr(MI.getOpcode()))
161 return true;
162 if (isVector(MI) || isCubeOp(MI.getOpcode()))
163 return true;
164 switch (MI.getOpcode()) {
165 case R600::PRED_X:
166 case R600::INTERP_PAIR_XY:
167 case R600::INTERP_PAIR_ZW:
168 case R600::INTERP_VEC_LOAD:
169 case R600::COPY:
170 case R600::DOT_4:
171 return true;
172 default:
173 return false;
177 bool R600InstrInfo::isTransOnly(unsigned Opcode) const {
178 if (ST.hasCaymanISA())
179 return false;
180 return (get(Opcode).getSchedClass() == R600::Sched::TransALU);
183 bool R600InstrInfo::isTransOnly(const MachineInstr &MI) const {
184 return isTransOnly(MI.getOpcode());
187 bool R600InstrInfo::isVectorOnly(unsigned Opcode) const {
188 return (get(Opcode).getSchedClass() == R600::Sched::VecALU);
191 bool R600InstrInfo::isVectorOnly(const MachineInstr &MI) const {
192 return isVectorOnly(MI.getOpcode());
195 bool R600InstrInfo::isExport(unsigned Opcode) const {
196 return (get(Opcode).TSFlags & R600_InstFlag::IS_EXPORT);
199 bool R600InstrInfo::usesVertexCache(unsigned Opcode) const {
200 return ST.hasVertexCache() && IS_VTX(get(Opcode));
203 bool R600InstrInfo::usesVertexCache(const MachineInstr &MI) const {
204 const MachineFunction *MF = MI.getParent()->getParent();
205 return !AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
206 usesVertexCache(MI.getOpcode());
209 bool R600InstrInfo::usesTextureCache(unsigned Opcode) const {
210 return (!ST.hasVertexCache() && IS_VTX(get(Opcode))) || IS_TEX(get(Opcode));
213 bool R600InstrInfo::usesTextureCache(const MachineInstr &MI) const {
214 const MachineFunction *MF = MI.getParent()->getParent();
215 return (AMDGPU::isCompute(MF->getFunction().getCallingConv()) &&
216 usesVertexCache(MI.getOpcode())) ||
217 usesTextureCache(MI.getOpcode());
220 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode) const {
221 switch (Opcode) {
222 case R600::KILLGT:
223 case R600::GROUP_BARRIER:
224 return true;
225 default:
226 return false;
230 bool R600InstrInfo::usesAddressRegister(MachineInstr &MI) const {
231 return MI.findRegisterUseOperandIdx(R600::AR_X, false, &RI) != -1;
234 bool R600InstrInfo::definesAddressRegister(MachineInstr &MI) const {
235 return MI.findRegisterDefOperandIdx(R600::AR_X, false, false, &RI) != -1;
238 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr &MI) const {
239 if (!isALUInstr(MI.getOpcode())) {
240 return false;
242 for (MachineInstr::const_mop_iterator I = MI.operands_begin(),
243 E = MI.operands_end();
244 I != E; ++I) {
245 if (!I->isReg() || !I->isUse() || Register::isVirtualRegister(I->getReg()))
246 continue;
248 if (R600::R600_LDS_SRC_REGRegClass.contains(I->getReg()))
249 return true;
251 return false;
254 int R600InstrInfo::getSelIdx(unsigned Opcode, unsigned SrcIdx) const {
255 static const unsigned SrcSelTable[][2] = {
256 {R600::OpName::src0, R600::OpName::src0_sel},
257 {R600::OpName::src1, R600::OpName::src1_sel},
258 {R600::OpName::src2, R600::OpName::src2_sel},
259 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
260 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
261 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
262 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
263 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
264 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
265 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
266 {R600::OpName::src1_W, R600::OpName::src1_sel_W}
269 for (const auto &Row : SrcSelTable) {
270 if (getOperandIdx(Opcode, Row[0]) == (int)SrcIdx) {
271 return getOperandIdx(Opcode, Row[1]);
274 return -1;
277 SmallVector<std::pair<MachineOperand *, int64_t>, 3>
278 R600InstrInfo::getSrcs(MachineInstr &MI) const {
279 SmallVector<std::pair<MachineOperand *, int64_t>, 3> Result;
281 if (MI.getOpcode() == R600::DOT_4) {
282 static const unsigned OpTable[8][2] = {
283 {R600::OpName::src0_X, R600::OpName::src0_sel_X},
284 {R600::OpName::src0_Y, R600::OpName::src0_sel_Y},
285 {R600::OpName::src0_Z, R600::OpName::src0_sel_Z},
286 {R600::OpName::src0_W, R600::OpName::src0_sel_W},
287 {R600::OpName::src1_X, R600::OpName::src1_sel_X},
288 {R600::OpName::src1_Y, R600::OpName::src1_sel_Y},
289 {R600::OpName::src1_Z, R600::OpName::src1_sel_Z},
290 {R600::OpName::src1_W, R600::OpName::src1_sel_W},
293 for (unsigned j = 0; j < 8; j++) {
294 MachineOperand &MO =
295 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][0]));
296 Register Reg = MO.getReg();
297 if (Reg == R600::ALU_CONST) {
298 MachineOperand &Sel =
299 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
300 Result.push_back(std::make_pair(&MO, Sel.getImm()));
301 continue;
305 return Result;
308 static const unsigned OpTable[3][2] = {
309 {R600::OpName::src0, R600::OpName::src0_sel},
310 {R600::OpName::src1, R600::OpName::src1_sel},
311 {R600::OpName::src2, R600::OpName::src2_sel},
314 for (unsigned j = 0; j < 3; j++) {
315 int SrcIdx = getOperandIdx(MI.getOpcode(), OpTable[j][0]);
316 if (SrcIdx < 0)
317 break;
318 MachineOperand &MO = MI.getOperand(SrcIdx);
319 Register Reg = MO.getReg();
320 if (Reg == R600::ALU_CONST) {
321 MachineOperand &Sel =
322 MI.getOperand(getOperandIdx(MI.getOpcode(), OpTable[j][1]));
323 Result.push_back(std::make_pair(&MO, Sel.getImm()));
324 continue;
326 if (Reg == R600::ALU_LITERAL_X) {
327 MachineOperand &Operand =
328 MI.getOperand(getOperandIdx(MI.getOpcode(), R600::OpName::literal));
329 if (Operand.isImm()) {
330 Result.push_back(std::make_pair(&MO, Operand.getImm()));
331 continue;
333 assert(Operand.isGlobal());
335 Result.push_back(std::make_pair(&MO, 0));
337 return Result;
340 std::vector<std::pair<int, unsigned>>
341 R600InstrInfo::ExtractSrcs(MachineInstr &MI,
342 const DenseMap<unsigned, unsigned> &PV,
343 unsigned &ConstCount) const {
344 ConstCount = 0;
345 const std::pair<int, unsigned> DummyPair(-1, 0);
346 std::vector<std::pair<int, unsigned>> Result;
347 unsigned i = 0;
348 for (const auto &Src : getSrcs(MI)) {
349 ++i;
350 Register Reg = Src.first->getReg();
351 int Index = RI.getEncodingValue(Reg) & 0xff;
352 if (Reg == R600::OQAP) {
353 Result.push_back(std::make_pair(Index, 0U));
355 if (PV.find(Reg) != PV.end()) {
356 // 255 is used to tells its a PS/PV reg
357 Result.push_back(std::make_pair(255, 0U));
358 continue;
360 if (Index > 127) {
361 ConstCount++;
362 Result.push_back(DummyPair);
363 continue;
365 unsigned Chan = RI.getHWRegChan(Reg);
366 Result.push_back(std::make_pair(Index, Chan));
368 for (; i < 3; ++i)
369 Result.push_back(DummyPair);
370 return Result;
373 static std::vector<std::pair<int, unsigned>>
374 Swizzle(std::vector<std::pair<int, unsigned>> Src,
375 R600InstrInfo::BankSwizzle Swz) {
376 if (Src[0] == Src[1])
377 Src[1].first = -1;
378 switch (Swz) {
379 case R600InstrInfo::ALU_VEC_012_SCL_210:
380 break;
381 case R600InstrInfo::ALU_VEC_021_SCL_122:
382 std::swap(Src[1], Src[2]);
383 break;
384 case R600InstrInfo::ALU_VEC_102_SCL_221:
385 std::swap(Src[0], Src[1]);
386 break;
387 case R600InstrInfo::ALU_VEC_120_SCL_212:
388 std::swap(Src[0], Src[1]);
389 std::swap(Src[0], Src[2]);
390 break;
391 case R600InstrInfo::ALU_VEC_201:
392 std::swap(Src[0], Src[2]);
393 std::swap(Src[0], Src[1]);
394 break;
395 case R600InstrInfo::ALU_VEC_210:
396 std::swap(Src[0], Src[2]);
397 break;
399 return Src;
402 static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz, unsigned Op) {
403 assert(Op < 3 && "Out of range swizzle index");
404 switch (Swz) {
405 case R600InstrInfo::ALU_VEC_012_SCL_210: {
406 unsigned Cycles[3] = { 2, 1, 0};
407 return Cycles[Op];
409 case R600InstrInfo::ALU_VEC_021_SCL_122: {
410 unsigned Cycles[3] = { 1, 2, 2};
411 return Cycles[Op];
413 case R600InstrInfo::ALU_VEC_120_SCL_212: {
414 unsigned Cycles[3] = { 2, 1, 2};
415 return Cycles[Op];
417 case R600InstrInfo::ALU_VEC_102_SCL_221: {
418 unsigned Cycles[3] = { 2, 2, 1};
419 return Cycles[Op];
421 default:
422 llvm_unreachable("Wrong Swizzle for Trans Slot");
426 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
427 /// in the same Instruction Group while meeting read port limitations given a
428 /// Swz swizzle sequence.
429 unsigned R600InstrInfo::isLegalUpTo(
430 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
431 const std::vector<R600InstrInfo::BankSwizzle> &Swz,
432 const std::vector<std::pair<int, unsigned>> &TransSrcs,
433 R600InstrInfo::BankSwizzle TransSwz) const {
434 int Vector[4][3];
435 memset(Vector, -1, sizeof(Vector));
436 for (unsigned i = 0, e = IGSrcs.size(); i < e; i++) {
437 const std::vector<std::pair<int, unsigned>> &Srcs =
438 Swizzle(IGSrcs[i], Swz[i]);
439 for (unsigned j = 0; j < 3; j++) {
440 const std::pair<int, unsigned> &Src = Srcs[j];
441 if (Src.first < 0 || Src.first == 255)
442 continue;
443 if (Src.first == GET_REG_INDEX(RI.getEncodingValue(R600::OQAP))) {
444 if (Swz[i] != R600InstrInfo::ALU_VEC_012_SCL_210 &&
445 Swz[i] != R600InstrInfo::ALU_VEC_021_SCL_122) {
446 // The value from output queue A (denoted by register OQAP) can
447 // only be fetched during the first cycle.
448 return false;
450 // OQAP does not count towards the normal read port restrictions
451 continue;
453 if (Vector[Src.second][j] < 0)
454 Vector[Src.second][j] = Src.first;
455 if (Vector[Src.second][j] != Src.first)
456 return i;
459 // Now check Trans Alu
460 for (unsigned i = 0, e = TransSrcs.size(); i < e; ++i) {
461 const std::pair<int, unsigned> &Src = TransSrcs[i];
462 unsigned Cycle = getTransSwizzle(TransSwz, i);
463 if (Src.first < 0)
464 continue;
465 if (Src.first == 255)
466 continue;
467 if (Vector[Src.second][Cycle] < 0)
468 Vector[Src.second][Cycle] = Src.first;
469 if (Vector[Src.second][Cycle] != Src.first)
470 return IGSrcs.size() - 1;
472 return IGSrcs.size();
475 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
476 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
477 /// Idx can be skipped
478 static bool
479 NextPossibleSolution(
480 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
481 unsigned Idx) {
482 assert(Idx < SwzCandidate.size());
483 int ResetIdx = Idx;
484 while (ResetIdx > -1 && SwzCandidate[ResetIdx] == R600InstrInfo::ALU_VEC_210)
485 ResetIdx --;
486 for (unsigned i = ResetIdx + 1, e = SwzCandidate.size(); i < e; i++) {
487 SwzCandidate[i] = R600InstrInfo::ALU_VEC_012_SCL_210;
489 if (ResetIdx == -1)
490 return false;
491 int NextSwizzle = SwzCandidate[ResetIdx] + 1;
492 SwzCandidate[ResetIdx] = (R600InstrInfo::BankSwizzle)NextSwizzle;
493 return true;
496 /// Enumerate all possible Swizzle sequence to find one that can meet all
497 /// read port requirements.
498 bool R600InstrInfo::FindSwizzleForVectorSlot(
499 const std::vector<std::vector<std::pair<int, unsigned>>> &IGSrcs,
500 std::vector<R600InstrInfo::BankSwizzle> &SwzCandidate,
501 const std::vector<std::pair<int, unsigned>> &TransSrcs,
502 R600InstrInfo::BankSwizzle TransSwz) const {
503 unsigned ValidUpTo = 0;
504 do {
505 ValidUpTo = isLegalUpTo(IGSrcs, SwzCandidate, TransSrcs, TransSwz);
506 if (ValidUpTo == IGSrcs.size())
507 return true;
508 } while (NextPossibleSolution(SwzCandidate, ValidUpTo));
509 return false;
512 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
513 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
514 static bool
515 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz,
516 const std::vector<std::pair<int, unsigned>> &TransOps,
517 unsigned ConstCount) {
518 // TransALU can't read 3 constants
519 if (ConstCount > 2)
520 return false;
521 for (unsigned i = 0, e = TransOps.size(); i < e; ++i) {
522 const std::pair<int, unsigned> &Src = TransOps[i];
523 unsigned Cycle = getTransSwizzle(TransSwz, i);
524 if (Src.first < 0)
525 continue;
526 if (ConstCount > 0 && Cycle == 0)
527 return false;
528 if (ConstCount > 1 && Cycle == 1)
529 return false;
531 return true;
534 bool
535 R600InstrInfo::fitsReadPortLimitations(const std::vector<MachineInstr *> &IG,
536 const DenseMap<unsigned, unsigned> &PV,
537 std::vector<BankSwizzle> &ValidSwizzle,
538 bool isLastAluTrans)
539 const {
540 //Todo : support shared src0 - src1 operand
542 std::vector<std::vector<std::pair<int, unsigned>>> IGSrcs;
543 ValidSwizzle.clear();
544 unsigned ConstCount;
545 BankSwizzle TransBS = ALU_VEC_012_SCL_210;
546 for (unsigned i = 0, e = IG.size(); i < e; ++i) {
547 IGSrcs.push_back(ExtractSrcs(*IG[i], PV, ConstCount));
548 unsigned Op = getOperandIdx(IG[i]->getOpcode(),
549 R600::OpName::bank_swizzle);
550 ValidSwizzle.push_back( (R600InstrInfo::BankSwizzle)
551 IG[i]->getOperand(Op).getImm());
553 std::vector<std::pair<int, unsigned>> TransOps;
554 if (!isLastAluTrans)
555 return FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps, TransBS);
557 TransOps = std::move(IGSrcs.back());
558 IGSrcs.pop_back();
559 ValidSwizzle.pop_back();
561 static const R600InstrInfo::BankSwizzle TransSwz[] = {
562 ALU_VEC_012_SCL_210,
563 ALU_VEC_021_SCL_122,
564 ALU_VEC_120_SCL_212,
565 ALU_VEC_102_SCL_221
567 for (unsigned i = 0; i < 4; i++) {
568 TransBS = TransSwz[i];
569 if (!isConstCompatible(TransBS, TransOps, ConstCount))
570 continue;
571 bool Result = FindSwizzleForVectorSlot(IGSrcs, ValidSwizzle, TransOps,
572 TransBS);
573 if (Result) {
574 ValidSwizzle.push_back(TransBS);
575 return true;
579 return false;
582 bool
583 R600InstrInfo::fitsConstReadLimitations(const std::vector<unsigned> &Consts)
584 const {
585 assert (Consts.size() <= 12 && "Too many operands in instructions group");
586 unsigned Pair1 = 0, Pair2 = 0;
587 for (unsigned i = 0, n = Consts.size(); i < n; ++i) {
588 unsigned ReadConstHalf = Consts[i] & 2;
589 unsigned ReadConstIndex = Consts[i] & (~3);
590 unsigned ReadHalfConst = ReadConstIndex | ReadConstHalf;
591 if (!Pair1) {
592 Pair1 = ReadHalfConst;
593 continue;
595 if (Pair1 == ReadHalfConst)
596 continue;
597 if (!Pair2) {
598 Pair2 = ReadHalfConst;
599 continue;
601 if (Pair2 != ReadHalfConst)
602 return false;
604 return true;
607 bool
608 R600InstrInfo::fitsConstReadLimitations(const std::vector<MachineInstr *> &MIs)
609 const {
610 std::vector<unsigned> Consts;
611 SmallSet<int64_t, 4> Literals;
612 for (unsigned i = 0, n = MIs.size(); i < n; i++) {
613 MachineInstr &MI = *MIs[i];
614 if (!isALUInstr(MI.getOpcode()))
615 continue;
617 for (const auto &Src : getSrcs(MI)) {
618 if (Src.first->getReg() == R600::ALU_LITERAL_X)
619 Literals.insert(Src.second);
620 if (Literals.size() > 4)
621 return false;
622 if (Src.first->getReg() == R600::ALU_CONST)
623 Consts.push_back(Src.second);
624 if (R600::R600_KC0RegClass.contains(Src.first->getReg()) ||
625 R600::R600_KC1RegClass.contains(Src.first->getReg())) {
626 unsigned Index = RI.getEncodingValue(Src.first->getReg()) & 0xff;
627 unsigned Chan = RI.getHWRegChan(Src.first->getReg());
628 Consts.push_back((Index << 2) | Chan);
632 return fitsConstReadLimitations(Consts);
635 DFAPacketizer *
636 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo &STI) const {
637 const InstrItineraryData *II = STI.getInstrItineraryData();
638 return static_cast<const R600Subtarget &>(STI).createDFAPacketizer(II);
641 static bool
642 isPredicateSetter(unsigned Opcode) {
643 switch (Opcode) {
644 case R600::PRED_X:
645 return true;
646 default:
647 return false;
651 static MachineInstr *
652 findFirstPredicateSetterFrom(MachineBasicBlock &MBB,
653 MachineBasicBlock::iterator I) {
654 while (I != MBB.begin()) {
655 --I;
656 MachineInstr &MI = *I;
657 if (isPredicateSetter(MI.getOpcode()))
658 return &MI;
661 return nullptr;
664 static
665 bool isJump(unsigned Opcode) {
666 return Opcode == R600::JUMP || Opcode == R600::JUMP_COND;
669 static bool isBranch(unsigned Opcode) {
670 return Opcode == R600::BRANCH || Opcode == R600::BRANCH_COND_i32 ||
671 Opcode == R600::BRANCH_COND_f32;
674 bool R600InstrInfo::analyzeBranch(MachineBasicBlock &MBB,
675 MachineBasicBlock *&TBB,
676 MachineBasicBlock *&FBB,
677 SmallVectorImpl<MachineOperand> &Cond,
678 bool AllowModify) const {
679 // Most of the following comes from the ARM implementation of AnalyzeBranch
681 // If the block has no terminators, it just falls into the block after it.
682 MachineBasicBlock::iterator I = MBB.getLastNonDebugInstr();
683 if (I == MBB.end())
684 return false;
686 // R600::BRANCH* instructions are only available after isel and are not
687 // handled
688 if (isBranch(I->getOpcode()))
689 return true;
690 if (!isJump(I->getOpcode())) {
691 return false;
694 // Remove successive JUMP
695 while (I != MBB.begin() && std::prev(I)->getOpcode() == R600::JUMP) {
696 MachineBasicBlock::iterator PriorI = std::prev(I);
697 if (AllowModify)
698 I->removeFromParent();
699 I = PriorI;
701 MachineInstr &LastInst = *I;
703 // If there is only one terminator instruction, process it.
704 unsigned LastOpc = LastInst.getOpcode();
705 if (I == MBB.begin() || !isJump((--I)->getOpcode())) {
706 if (LastOpc == R600::JUMP) {
707 TBB = LastInst.getOperand(0).getMBB();
708 return false;
709 } else if (LastOpc == R600::JUMP_COND) {
710 auto predSet = I;
711 while (!isPredicateSetter(predSet->getOpcode())) {
712 predSet = --I;
714 TBB = LastInst.getOperand(0).getMBB();
715 Cond.push_back(predSet->getOperand(1));
716 Cond.push_back(predSet->getOperand(2));
717 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
718 return false;
720 return true; // Can't handle indirect branch.
723 // Get the instruction before it if it is a terminator.
724 MachineInstr &SecondLastInst = *I;
725 unsigned SecondLastOpc = SecondLastInst.getOpcode();
727 // If the block ends with a B and a Bcc, handle it.
728 if (SecondLastOpc == R600::JUMP_COND && LastOpc == R600::JUMP) {
729 auto predSet = --I;
730 while (!isPredicateSetter(predSet->getOpcode())) {
731 predSet = --I;
733 TBB = SecondLastInst.getOperand(0).getMBB();
734 FBB = LastInst.getOperand(0).getMBB();
735 Cond.push_back(predSet->getOperand(1));
736 Cond.push_back(predSet->getOperand(2));
737 Cond.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE, false));
738 return false;
741 // Otherwise, can't handle this.
742 return true;
745 static
746 MachineBasicBlock::iterator FindLastAluClause(MachineBasicBlock &MBB) {
747 for (MachineBasicBlock::reverse_iterator It = MBB.rbegin(), E = MBB.rend();
748 It != E; ++It) {
749 if (It->getOpcode() == R600::CF_ALU ||
750 It->getOpcode() == R600::CF_ALU_PUSH_BEFORE)
751 return It.getReverse();
753 return MBB.end();
756 unsigned R600InstrInfo::insertBranch(MachineBasicBlock &MBB,
757 MachineBasicBlock *TBB,
758 MachineBasicBlock *FBB,
759 ArrayRef<MachineOperand> Cond,
760 const DebugLoc &DL,
761 int *BytesAdded) const {
762 assert(TBB && "insertBranch must not be told to insert a fallthrough");
763 assert(!BytesAdded && "code size not handled");
765 if (!FBB) {
766 if (Cond.empty()) {
767 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(TBB);
768 return 1;
769 } else {
770 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
771 assert(PredSet && "No previous predicate !");
772 addFlag(*PredSet, 0, MO_FLAG_PUSH);
773 PredSet->getOperand(2).setImm(Cond[1].getImm());
775 BuildMI(&MBB, DL, get(R600::JUMP_COND))
776 .addMBB(TBB)
777 .addReg(R600::PREDICATE_BIT, RegState::Kill);
778 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
779 if (CfAlu == MBB.end())
780 return 1;
781 assert (CfAlu->getOpcode() == R600::CF_ALU);
782 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
783 return 1;
785 } else {
786 MachineInstr *PredSet = findFirstPredicateSetterFrom(MBB, MBB.end());
787 assert(PredSet && "No previous predicate !");
788 addFlag(*PredSet, 0, MO_FLAG_PUSH);
789 PredSet->getOperand(2).setImm(Cond[1].getImm());
790 BuildMI(&MBB, DL, get(R600::JUMP_COND))
791 .addMBB(TBB)
792 .addReg(R600::PREDICATE_BIT, RegState::Kill);
793 BuildMI(&MBB, DL, get(R600::JUMP)).addMBB(FBB);
794 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
795 if (CfAlu == MBB.end())
796 return 2;
797 assert (CfAlu->getOpcode() == R600::CF_ALU);
798 CfAlu->setDesc(get(R600::CF_ALU_PUSH_BEFORE));
799 return 2;
803 unsigned R600InstrInfo::removeBranch(MachineBasicBlock &MBB,
804 int *BytesRemoved) const {
805 assert(!BytesRemoved && "code size not handled");
807 // Note : we leave PRED* instructions there.
808 // They may be needed when predicating instructions.
810 MachineBasicBlock::iterator I = MBB.end();
812 if (I == MBB.begin()) {
813 return 0;
815 --I;
816 switch (I->getOpcode()) {
817 default:
818 return 0;
819 case R600::JUMP_COND: {
820 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
821 clearFlag(*predSet, 0, MO_FLAG_PUSH);
822 I->eraseFromParent();
823 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
824 if (CfAlu == MBB.end())
825 break;
826 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE);
827 CfAlu->setDesc(get(R600::CF_ALU));
828 break;
830 case R600::JUMP:
831 I->eraseFromParent();
832 break;
834 I = MBB.end();
836 if (I == MBB.begin()) {
837 return 1;
839 --I;
840 switch (I->getOpcode()) {
841 // FIXME: only one case??
842 default:
843 return 1;
844 case R600::JUMP_COND: {
845 MachineInstr *predSet = findFirstPredicateSetterFrom(MBB, I);
846 clearFlag(*predSet, 0, MO_FLAG_PUSH);
847 I->eraseFromParent();
848 MachineBasicBlock::iterator CfAlu = FindLastAluClause(MBB);
849 if (CfAlu == MBB.end())
850 break;
851 assert (CfAlu->getOpcode() == R600::CF_ALU_PUSH_BEFORE);
852 CfAlu->setDesc(get(R600::CF_ALU));
853 break;
855 case R600::JUMP:
856 I->eraseFromParent();
857 break;
859 return 2;
862 bool R600InstrInfo::isPredicated(const MachineInstr &MI) const {
863 int idx = MI.findFirstPredOperandIdx();
864 if (idx < 0)
865 return false;
867 Register Reg = MI.getOperand(idx).getReg();
868 switch (Reg) {
869 default: return false;
870 case R600::PRED_SEL_ONE:
871 case R600::PRED_SEL_ZERO:
872 case R600::PREDICATE_BIT:
873 return true;
877 bool R600InstrInfo::isPredicable(const MachineInstr &MI) const {
878 // XXX: KILL* instructions can be predicated, but they must be the last
879 // instruction in a clause, so this means any instructions after them cannot
880 // be predicated. Until we have proper support for instruction clauses in the
881 // backend, we will mark KILL* instructions as unpredicable.
883 if (MI.getOpcode() == R600::KILLGT) {
884 return false;
885 } else if (MI.getOpcode() == R600::CF_ALU) {
886 // If the clause start in the middle of MBB then the MBB has more
887 // than a single clause, unable to predicate several clauses.
888 if (MI.getParent()->begin() != MachineBasicBlock::const_iterator(MI))
889 return false;
890 // TODO: We don't support KC merging atm
891 return MI.getOperand(3).getImm() == 0 && MI.getOperand(4).getImm() == 0;
892 } else if (isVector(MI)) {
893 return false;
894 } else {
895 return TargetInstrInfo::isPredicable(MI);
899 bool
900 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &MBB,
901 unsigned NumCycles,
902 unsigned ExtraPredCycles,
903 BranchProbability Probability) const{
904 return true;
907 bool
908 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock &TMBB,
909 unsigned NumTCycles,
910 unsigned ExtraTCycles,
911 MachineBasicBlock &FMBB,
912 unsigned NumFCycles,
913 unsigned ExtraFCycles,
914 BranchProbability Probability) const {
915 return true;
918 bool
919 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock &MBB,
920 unsigned NumCycles,
921 BranchProbability Probability)
922 const {
923 return true;
926 bool
927 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock &TMBB,
928 MachineBasicBlock &FMBB) const {
929 return false;
932 bool
933 R600InstrInfo::reverseBranchCondition(SmallVectorImpl<MachineOperand> &Cond) const {
934 MachineOperand &MO = Cond[1];
935 switch (MO.getImm()) {
936 case R600::PRED_SETE_INT:
937 MO.setImm(R600::PRED_SETNE_INT);
938 break;
939 case R600::PRED_SETNE_INT:
940 MO.setImm(R600::PRED_SETE_INT);
941 break;
942 case R600::PRED_SETE:
943 MO.setImm(R600::PRED_SETNE);
944 break;
945 case R600::PRED_SETNE:
946 MO.setImm(R600::PRED_SETE);
947 break;
948 default:
949 return true;
952 MachineOperand &MO2 = Cond[2];
953 switch (MO2.getReg()) {
954 case R600::PRED_SEL_ZERO:
955 MO2.setReg(R600::PRED_SEL_ONE);
956 break;
957 case R600::PRED_SEL_ONE:
958 MO2.setReg(R600::PRED_SEL_ZERO);
959 break;
960 default:
961 return true;
963 return false;
966 bool R600InstrInfo::DefinesPredicate(MachineInstr &MI,
967 std::vector<MachineOperand> &Pred) const {
968 return isPredicateSetter(MI.getOpcode());
971 bool R600InstrInfo::PredicateInstruction(MachineInstr &MI,
972 ArrayRef<MachineOperand> Pred) const {
973 int PIdx = MI.findFirstPredOperandIdx();
975 if (MI.getOpcode() == R600::CF_ALU) {
976 MI.getOperand(8).setImm(0);
977 return true;
980 if (MI.getOpcode() == R600::DOT_4) {
981 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_X))
982 .setReg(Pred[2].getReg());
983 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Y))
984 .setReg(Pred[2].getReg());
985 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_Z))
986 .setReg(Pred[2].getReg());
987 MI.getOperand(getOperandIdx(MI, R600::OpName::pred_sel_W))
988 .setReg(Pred[2].getReg());
989 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
990 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
991 return true;
994 if (PIdx != -1) {
995 MachineOperand &PMO = MI.getOperand(PIdx);
996 PMO.setReg(Pred[2].getReg());
997 MachineInstrBuilder MIB(*MI.getParent()->getParent(), MI);
998 MIB.addReg(R600::PREDICATE_BIT, RegState::Implicit);
999 return true;
1002 return false;
1005 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr &) const {
1006 return 2;
1009 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData *ItinData,
1010 const MachineInstr &,
1011 unsigned *PredCost) const {
1012 if (PredCost)
1013 *PredCost = 2;
1014 return 2;
1017 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex,
1018 unsigned Channel) const {
1019 assert(Channel == 0);
1020 return RegIndex;
1023 bool R600InstrInfo::expandPostRAPseudo(MachineInstr &MI) const {
1024 switch (MI.getOpcode()) {
1025 default: {
1026 MachineBasicBlock *MBB = MI.getParent();
1027 int OffsetOpIdx =
1028 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::addr);
1029 // addr is a custom operand with multiple MI operands, and only the
1030 // first MI operand is given a name.
1031 int RegOpIdx = OffsetOpIdx + 1;
1032 int ChanOpIdx =
1033 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::chan);
1034 if (isRegisterLoad(MI)) {
1035 int DstOpIdx =
1036 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::dst);
1037 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1038 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1039 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1040 Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1041 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1042 buildMovInstr(MBB, MI, MI.getOperand(DstOpIdx).getReg(),
1043 getIndirectAddrRegClass()->getRegister(Address));
1044 } else {
1045 buildIndirectRead(MBB, MI, MI.getOperand(DstOpIdx).getReg(), Address,
1046 OffsetReg);
1048 } else if (isRegisterStore(MI)) {
1049 int ValOpIdx =
1050 R600::getNamedOperandIdx(MI.getOpcode(), R600::OpName::val);
1051 unsigned RegIndex = MI.getOperand(RegOpIdx).getImm();
1052 unsigned Channel = MI.getOperand(ChanOpIdx).getImm();
1053 unsigned Address = calculateIndirectAddress(RegIndex, Channel);
1054 Register OffsetReg = MI.getOperand(OffsetOpIdx).getReg();
1055 if (OffsetReg == R600::INDIRECT_BASE_ADDR) {
1056 buildMovInstr(MBB, MI, getIndirectAddrRegClass()->getRegister(Address),
1057 MI.getOperand(ValOpIdx).getReg());
1058 } else {
1059 buildIndirectWrite(MBB, MI, MI.getOperand(ValOpIdx).getReg(),
1060 calculateIndirectAddress(RegIndex, Channel),
1061 OffsetReg);
1063 } else {
1064 return false;
1067 MBB->erase(MI);
1068 return true;
1070 case R600::R600_EXTRACT_ELT_V2:
1071 case R600::R600_EXTRACT_ELT_V4:
1072 buildIndirectRead(MI.getParent(), MI, MI.getOperand(0).getReg(),
1073 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1074 MI.getOperand(2).getReg(),
1075 RI.getHWRegChan(MI.getOperand(1).getReg()));
1076 break;
1077 case R600::R600_INSERT_ELT_V2:
1078 case R600::R600_INSERT_ELT_V4:
1079 buildIndirectWrite(MI.getParent(), MI, MI.getOperand(2).getReg(), // Value
1080 RI.getHWRegIndex(MI.getOperand(1).getReg()), // Address
1081 MI.getOperand(3).getReg(), // Offset
1082 RI.getHWRegChan(MI.getOperand(1).getReg())); // Channel
1083 break;
1085 MI.eraseFromParent();
1086 return true;
1089 void R600InstrInfo::reserveIndirectRegisters(BitVector &Reserved,
1090 const MachineFunction &MF,
1091 const R600RegisterInfo &TRI) const {
1092 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1093 const R600FrameLowering *TFL = ST.getFrameLowering();
1095 unsigned StackWidth = TFL->getStackWidth(MF);
1096 int End = getIndirectIndexEnd(MF);
1098 if (End == -1)
1099 return;
1101 for (int Index = getIndirectIndexBegin(MF); Index <= End; ++Index) {
1102 for (unsigned Chan = 0; Chan < StackWidth; ++Chan) {
1103 unsigned Reg = R600::R600_TReg32RegClass.getRegister((4 * Index) + Chan);
1104 TRI.reserveRegisterTuples(Reserved, Reg);
1109 const TargetRegisterClass *R600InstrInfo::getIndirectAddrRegClass() const {
1110 return &R600::R600_TReg32_XRegClass;
1113 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1114 MachineBasicBlock::iterator I,
1115 unsigned ValueReg, unsigned Address,
1116 unsigned OffsetReg) const {
1117 return buildIndirectWrite(MBB, I, ValueReg, Address, OffsetReg, 0);
1120 MachineInstrBuilder R600InstrInfo::buildIndirectWrite(MachineBasicBlock *MBB,
1121 MachineBasicBlock::iterator I,
1122 unsigned ValueReg, unsigned Address,
1123 unsigned OffsetReg,
1124 unsigned AddrChan) const {
1125 unsigned AddrReg;
1126 switch (AddrChan) {
1127 default: llvm_unreachable("Invalid Channel");
1128 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1129 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1130 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1131 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1133 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1134 R600::AR_X, OffsetReg);
1135 setImmOperand(*MOVA, R600::OpName::write, 0);
1137 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1138 AddrReg, ValueReg)
1139 .addReg(R600::AR_X,
1140 RegState::Implicit | RegState::Kill);
1141 setImmOperand(*Mov, R600::OpName::dst_rel, 1);
1142 return Mov;
1145 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1146 MachineBasicBlock::iterator I,
1147 unsigned ValueReg, unsigned Address,
1148 unsigned OffsetReg) const {
1149 return buildIndirectRead(MBB, I, ValueReg, Address, OffsetReg, 0);
1152 MachineInstrBuilder R600InstrInfo::buildIndirectRead(MachineBasicBlock *MBB,
1153 MachineBasicBlock::iterator I,
1154 unsigned ValueReg, unsigned Address,
1155 unsigned OffsetReg,
1156 unsigned AddrChan) const {
1157 unsigned AddrReg;
1158 switch (AddrChan) {
1159 default: llvm_unreachable("Invalid Channel");
1160 case 0: AddrReg = R600::R600_AddrRegClass.getRegister(Address); break;
1161 case 1: AddrReg = R600::R600_Addr_YRegClass.getRegister(Address); break;
1162 case 2: AddrReg = R600::R600_Addr_ZRegClass.getRegister(Address); break;
1163 case 3: AddrReg = R600::R600_Addr_WRegClass.getRegister(Address); break;
1165 MachineInstr *MOVA = buildDefaultInstruction(*MBB, I, R600::MOVA_INT_eg,
1166 R600::AR_X,
1167 OffsetReg);
1168 setImmOperand(*MOVA, R600::OpName::write, 0);
1169 MachineInstrBuilder Mov = buildDefaultInstruction(*MBB, I, R600::MOV,
1170 ValueReg,
1171 AddrReg)
1172 .addReg(R600::AR_X,
1173 RegState::Implicit | RegState::Kill);
1174 setImmOperand(*Mov, R600::OpName::src0_rel, 1);
1176 return Mov;
1179 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction &MF) const {
1180 const MachineRegisterInfo &MRI = MF.getRegInfo();
1181 const MachineFrameInfo &MFI = MF.getFrameInfo();
1182 int Offset = -1;
1184 if (MFI.getNumObjects() == 0) {
1185 return -1;
1188 if (MRI.livein_empty()) {
1189 return 0;
1192 const TargetRegisterClass *IndirectRC = getIndirectAddrRegClass();
1193 for (std::pair<unsigned, unsigned> LI : MRI.liveins()) {
1194 unsigned Reg = LI.first;
1195 if (Register::isVirtualRegister(Reg) || !IndirectRC->contains(Reg))
1196 continue;
1198 unsigned RegIndex;
1199 unsigned RegEnd;
1200 for (RegIndex = 0, RegEnd = IndirectRC->getNumRegs(); RegIndex != RegEnd;
1201 ++RegIndex) {
1202 if (IndirectRC->getRegister(RegIndex) == Reg)
1203 break;
1205 Offset = std::max(Offset, (int)RegIndex);
1208 return Offset + 1;
1211 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction &MF) const {
1212 int Offset = 0;
1213 const MachineFrameInfo &MFI = MF.getFrameInfo();
1215 // Variable sized objects are not supported
1216 if (MFI.hasVarSizedObjects()) {
1217 return -1;
1220 if (MFI.getNumObjects() == 0) {
1221 return -1;
1224 const R600Subtarget &ST = MF.getSubtarget<R600Subtarget>();
1225 const R600FrameLowering *TFL = ST.getFrameLowering();
1227 unsigned IgnoredFrameReg;
1228 Offset = TFL->getFrameIndexReference(MF, -1, IgnoredFrameReg);
1230 return getIndirectIndexBegin(MF) + Offset;
1233 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1234 return 115;
1237 MachineInstrBuilder R600InstrInfo::buildDefaultInstruction(MachineBasicBlock &MBB,
1238 MachineBasicBlock::iterator I,
1239 unsigned Opcode,
1240 unsigned DstReg,
1241 unsigned Src0Reg,
1242 unsigned Src1Reg) const {
1243 MachineInstrBuilder MIB = BuildMI(MBB, I, MBB.findDebugLoc(I), get(Opcode),
1244 DstReg); // $dst
1246 if (Src1Reg) {
1247 MIB.addImm(0) // $update_exec_mask
1248 .addImm(0); // $update_predicate
1250 MIB.addImm(1) // $write
1251 .addImm(0) // $omod
1252 .addImm(0) // $dst_rel
1253 .addImm(0) // $dst_clamp
1254 .addReg(Src0Reg) // $src0
1255 .addImm(0) // $src0_neg
1256 .addImm(0) // $src0_rel
1257 .addImm(0) // $src0_abs
1258 .addImm(-1); // $src0_sel
1260 if (Src1Reg) {
1261 MIB.addReg(Src1Reg) // $src1
1262 .addImm(0) // $src1_neg
1263 .addImm(0) // $src1_rel
1264 .addImm(0) // $src1_abs
1265 .addImm(-1); // $src1_sel
1268 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1269 //scheduling to the backend, we can change the default to 0.
1270 MIB.addImm(1) // $last
1271 .addReg(R600::PRED_SEL_OFF) // $pred_sel
1272 .addImm(0) // $literal
1273 .addImm(0); // $bank_swizzle
1275 return MIB;
1278 #define OPERAND_CASE(Label) \
1279 case Label: { \
1280 static const unsigned Ops[] = \
1282 Label##_X, \
1283 Label##_Y, \
1284 Label##_Z, \
1285 Label##_W \
1286 }; \
1287 return Ops[Slot]; \
1290 static unsigned getSlotedOps(unsigned Op, unsigned Slot) {
1291 switch (Op) {
1292 OPERAND_CASE(R600::OpName::update_exec_mask)
1293 OPERAND_CASE(R600::OpName::update_pred)
1294 OPERAND_CASE(R600::OpName::write)
1295 OPERAND_CASE(R600::OpName::omod)
1296 OPERAND_CASE(R600::OpName::dst_rel)
1297 OPERAND_CASE(R600::OpName::clamp)
1298 OPERAND_CASE(R600::OpName::src0)
1299 OPERAND_CASE(R600::OpName::src0_neg)
1300 OPERAND_CASE(R600::OpName::src0_rel)
1301 OPERAND_CASE(R600::OpName::src0_abs)
1302 OPERAND_CASE(R600::OpName::src0_sel)
1303 OPERAND_CASE(R600::OpName::src1)
1304 OPERAND_CASE(R600::OpName::src1_neg)
1305 OPERAND_CASE(R600::OpName::src1_rel)
1306 OPERAND_CASE(R600::OpName::src1_abs)
1307 OPERAND_CASE(R600::OpName::src1_sel)
1308 OPERAND_CASE(R600::OpName::pred_sel)
1309 default:
1310 llvm_unreachable("Wrong Operand");
1314 #undef OPERAND_CASE
1316 MachineInstr *R600InstrInfo::buildSlotOfVectorInstruction(
1317 MachineBasicBlock &MBB, MachineInstr *MI, unsigned Slot, unsigned DstReg)
1318 const {
1319 assert (MI->getOpcode() == R600::DOT_4 && "Not Implemented");
1320 unsigned Opcode;
1321 if (ST.getGeneration() <= AMDGPUSubtarget::R700)
1322 Opcode = R600::DOT4_r600;
1323 else
1324 Opcode = R600::DOT4_eg;
1325 MachineBasicBlock::iterator I = MI;
1326 MachineOperand &Src0 = MI->getOperand(
1327 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src0, Slot)));
1328 MachineOperand &Src1 = MI->getOperand(
1329 getOperandIdx(MI->getOpcode(), getSlotedOps(R600::OpName::src1, Slot)));
1330 MachineInstr *MIB = buildDefaultInstruction(
1331 MBB, I, Opcode, DstReg, Src0.getReg(), Src1.getReg());
1332 static const unsigned Operands[14] = {
1333 R600::OpName::update_exec_mask,
1334 R600::OpName::update_pred,
1335 R600::OpName::write,
1336 R600::OpName::omod,
1337 R600::OpName::dst_rel,
1338 R600::OpName::clamp,
1339 R600::OpName::src0_neg,
1340 R600::OpName::src0_rel,
1341 R600::OpName::src0_abs,
1342 R600::OpName::src0_sel,
1343 R600::OpName::src1_neg,
1344 R600::OpName::src1_rel,
1345 R600::OpName::src1_abs,
1346 R600::OpName::src1_sel,
1349 MachineOperand &MO = MI->getOperand(getOperandIdx(MI->getOpcode(),
1350 getSlotedOps(R600::OpName::pred_sel, Slot)));
1351 MIB->getOperand(getOperandIdx(Opcode, R600::OpName::pred_sel))
1352 .setReg(MO.getReg());
1354 for (unsigned i = 0; i < 14; i++) {
1355 MachineOperand &MO = MI->getOperand(
1356 getOperandIdx(MI->getOpcode(), getSlotedOps(Operands[i], Slot)));
1357 assert (MO.isImm());
1358 setImmOperand(*MIB, Operands[i], MO.getImm());
1360 MIB->getOperand(20).setImm(0);
1361 return MIB;
1364 MachineInstr *R600InstrInfo::buildMovImm(MachineBasicBlock &BB,
1365 MachineBasicBlock::iterator I,
1366 unsigned DstReg,
1367 uint64_t Imm) const {
1368 MachineInstr *MovImm = buildDefaultInstruction(BB, I, R600::MOV, DstReg,
1369 R600::ALU_LITERAL_X);
1370 setImmOperand(*MovImm, R600::OpName::literal, Imm);
1371 return MovImm;
1374 MachineInstr *R600InstrInfo::buildMovInstr(MachineBasicBlock *MBB,
1375 MachineBasicBlock::iterator I,
1376 unsigned DstReg, unsigned SrcReg) const {
1377 return buildDefaultInstruction(*MBB, I, R600::MOV, DstReg, SrcReg);
1380 int R600InstrInfo::getOperandIdx(const MachineInstr &MI, unsigned Op) const {
1381 return getOperandIdx(MI.getOpcode(), Op);
1384 int R600InstrInfo::getOperandIdx(unsigned Opcode, unsigned Op) const {
1385 return R600::getNamedOperandIdx(Opcode, Op);
1388 void R600InstrInfo::setImmOperand(MachineInstr &MI, unsigned Op,
1389 int64_t Imm) const {
1390 int Idx = getOperandIdx(MI, Op);
1391 assert(Idx != -1 && "Operand not supported for this instruction.");
1392 assert(MI.getOperand(Idx).isImm());
1393 MI.getOperand(Idx).setImm(Imm);
1396 //===----------------------------------------------------------------------===//
1397 // Instruction flag getters/setters
1398 //===----------------------------------------------------------------------===//
1400 MachineOperand &R600InstrInfo::getFlagOp(MachineInstr &MI, unsigned SrcIdx,
1401 unsigned Flag) const {
1402 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1403 int FlagIndex = 0;
1404 if (Flag != 0) {
1405 // If we pass something other than the default value of Flag to this
1406 // function, it means we are want to set a flag on an instruction
1407 // that uses native encoding.
1408 assert(HAS_NATIVE_OPERANDS(TargetFlags));
1409 bool IsOP3 = (TargetFlags & R600_InstFlag::OP3) == R600_InstFlag::OP3;
1410 switch (Flag) {
1411 case MO_FLAG_CLAMP:
1412 FlagIndex = getOperandIdx(MI, R600::OpName::clamp);
1413 break;
1414 case MO_FLAG_MASK:
1415 FlagIndex = getOperandIdx(MI, R600::OpName::write);
1416 break;
1417 case MO_FLAG_NOT_LAST:
1418 case MO_FLAG_LAST:
1419 FlagIndex = getOperandIdx(MI, R600::OpName::last);
1420 break;
1421 case MO_FLAG_NEG:
1422 switch (SrcIdx) {
1423 case 0:
1424 FlagIndex = getOperandIdx(MI, R600::OpName::src0_neg);
1425 break;
1426 case 1:
1427 FlagIndex = getOperandIdx(MI, R600::OpName::src1_neg);
1428 break;
1429 case 2:
1430 FlagIndex = getOperandIdx(MI, R600::OpName::src2_neg);
1431 break;
1433 break;
1435 case MO_FLAG_ABS:
1436 assert(!IsOP3 && "Cannot set absolute value modifier for OP3 "
1437 "instructions.");
1438 (void)IsOP3;
1439 switch (SrcIdx) {
1440 case 0:
1441 FlagIndex = getOperandIdx(MI, R600::OpName::src0_abs);
1442 break;
1443 case 1:
1444 FlagIndex = getOperandIdx(MI, R600::OpName::src1_abs);
1445 break;
1447 break;
1449 default:
1450 FlagIndex = -1;
1451 break;
1453 assert(FlagIndex != -1 && "Flag not supported for this instruction");
1454 } else {
1455 FlagIndex = GET_FLAG_OPERAND_IDX(TargetFlags);
1456 assert(FlagIndex != 0 &&
1457 "Instruction flags not supported for this instruction");
1460 MachineOperand &FlagOp = MI.getOperand(FlagIndex);
1461 assert(FlagOp.isImm());
1462 return FlagOp;
1465 void R600InstrInfo::addFlag(MachineInstr &MI, unsigned Operand,
1466 unsigned Flag) const {
1467 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1468 if (Flag == 0) {
1469 return;
1471 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1472 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1473 if (Flag == MO_FLAG_NOT_LAST) {
1474 clearFlag(MI, Operand, MO_FLAG_LAST);
1475 } else if (Flag == MO_FLAG_MASK) {
1476 clearFlag(MI, Operand, Flag);
1477 } else {
1478 FlagOp.setImm(1);
1480 } else {
1481 MachineOperand &FlagOp = getFlagOp(MI, Operand);
1482 FlagOp.setImm(FlagOp.getImm() | (Flag << (NUM_MO_FLAGS * Operand)));
1486 void R600InstrInfo::clearFlag(MachineInstr &MI, unsigned Operand,
1487 unsigned Flag) const {
1488 unsigned TargetFlags = get(MI.getOpcode()).TSFlags;
1489 if (HAS_NATIVE_OPERANDS(TargetFlags)) {
1490 MachineOperand &FlagOp = getFlagOp(MI, Operand, Flag);
1491 FlagOp.setImm(0);
1492 } else {
1493 MachineOperand &FlagOp = getFlagOp(MI);
1494 unsigned InstFlags = FlagOp.getImm();
1495 InstFlags &= ~(Flag << (NUM_MO_FLAGS * Operand));
1496 FlagOp.setImm(InstFlags);
1500 unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1501 unsigned Kind) const {
1502 switch (Kind) {
1503 case PseudoSourceValue::Stack:
1504 case PseudoSourceValue::FixedStack:
1505 return AMDGPUAS::PRIVATE_ADDRESS;
1506 case PseudoSourceValue::ConstantPool:
1507 case PseudoSourceValue::GOT:
1508 case PseudoSourceValue::JumpTable:
1509 case PseudoSourceValue::GlobalValueCallEntry:
1510 case PseudoSourceValue::ExternalSymbolCallEntry:
1511 case PseudoSourceValue::TargetCustom:
1512 return AMDGPUAS::CONSTANT_ADDRESS;
1515 llvm_unreachable("Invalid pseudo source kind");