1 //===-- R600InstrInfo.cpp - R600 Instruction Information ------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// R600 Implementation of TargetInstrInfo.
12 //===----------------------------------------------------------------------===//
14 #include "R600InstrInfo.h"
16 #include "AMDGPUInstrInfo.h"
17 #include "AMDGPUSubtarget.h"
18 #include "R600Defines.h"
19 #include "R600FrameLowering.h"
20 #include "R600RegisterInfo.h"
21 #include "MCTargetDesc/AMDGPUMCTargetDesc.h"
22 #include "Utils/AMDGPUBaseInfo.h"
23 #include "llvm/ADT/BitVector.h"
24 #include "llvm/ADT/SmallSet.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstr.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/Support/ErrorHandling.h"
46 #define GET_INSTRINFO_CTOR_DTOR
47 #include "R600GenDFAPacketizer.inc"
49 #define GET_INSTRINFO_CTOR_DTOR
50 #define GET_INSTRMAP_INFO
51 #define GET_INSTRINFO_NAMED_OPS
52 #include "R600GenInstrInfo.inc"
54 R600InstrInfo::R600InstrInfo(const R600Subtarget
&ST
)
55 : R600GenInstrInfo(-1, -1), RI(), ST(ST
) {}
57 bool R600InstrInfo::isVector(const MachineInstr
&MI
) const {
58 return get(MI
.getOpcode()).TSFlags
& R600_InstFlag::VECTOR
;
61 void R600InstrInfo::copyPhysReg(MachineBasicBlock
&MBB
,
62 MachineBasicBlock::iterator MI
,
63 const DebugLoc
&DL
, unsigned DestReg
,
64 unsigned SrcReg
, bool KillSrc
) const {
65 unsigned VectorComponents
= 0;
66 if ((R600::R600_Reg128RegClass
.contains(DestReg
) ||
67 R600::R600_Reg128VerticalRegClass
.contains(DestReg
)) &&
68 (R600::R600_Reg128RegClass
.contains(SrcReg
) ||
69 R600::R600_Reg128VerticalRegClass
.contains(SrcReg
))) {
71 } else if((R600::R600_Reg64RegClass
.contains(DestReg
) ||
72 R600::R600_Reg64VerticalRegClass
.contains(DestReg
)) &&
73 (R600::R600_Reg64RegClass
.contains(SrcReg
) ||
74 R600::R600_Reg64VerticalRegClass
.contains(SrcReg
))) {
78 if (VectorComponents
> 0) {
79 for (unsigned I
= 0; I
< VectorComponents
; I
++) {
80 unsigned SubRegIndex
= AMDGPURegisterInfo::getSubRegFromChannel(I
);
81 buildDefaultInstruction(MBB
, MI
, R600::MOV
,
82 RI
.getSubReg(DestReg
, SubRegIndex
),
83 RI
.getSubReg(SrcReg
, SubRegIndex
))
85 RegState::Define
| RegState::Implicit
);
88 MachineInstr
*NewMI
= buildDefaultInstruction(MBB
, MI
, R600::MOV
,
90 NewMI
->getOperand(getOperandIdx(*NewMI
, R600::OpName::src0
))
95 /// \returns true if \p MBBI can be moved into a new basic.
96 bool R600InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock
&MBB
,
97 MachineBasicBlock::iterator MBBI
) const {
98 for (MachineInstr::const_mop_iterator I
= MBBI
->operands_begin(),
99 E
= MBBI
->operands_end(); I
!= E
; ++I
) {
100 if (I
->isReg() && !TargetRegisterInfo::isVirtualRegister(I
->getReg()) &&
101 I
->isUse() && RI
.isPhysRegLiveAcrossClauses(I
->getReg()))
107 bool R600InstrInfo::isMov(unsigned Opcode
) const {
112 case R600::MOV_IMM_F32
:
113 case R600::MOV_IMM_I32
:
118 bool R600InstrInfo::isReductionOp(unsigned Opcode
) const {
122 bool R600InstrInfo::isCubeOp(unsigned Opcode
) const {
124 default: return false;
125 case R600::CUBE_r600_pseudo
:
126 case R600::CUBE_r600_real
:
127 case R600::CUBE_eg_pseudo
:
128 case R600::CUBE_eg_real
:
133 bool R600InstrInfo::isALUInstr(unsigned Opcode
) const {
134 unsigned TargetFlags
= get(Opcode
).TSFlags
;
136 return (TargetFlags
& R600_InstFlag::ALU_INST
);
139 bool R600InstrInfo::hasInstrModifiers(unsigned Opcode
) const {
140 unsigned TargetFlags
= get(Opcode
).TSFlags
;
142 return ((TargetFlags
& R600_InstFlag::OP1
) |
143 (TargetFlags
& R600_InstFlag::OP2
) |
144 (TargetFlags
& R600_InstFlag::OP3
));
147 bool R600InstrInfo::isLDSInstr(unsigned Opcode
) const {
148 unsigned TargetFlags
= get(Opcode
).TSFlags
;
150 return ((TargetFlags
& R600_InstFlag::LDS_1A
) |
151 (TargetFlags
& R600_InstFlag::LDS_1A1D
) |
152 (TargetFlags
& R600_InstFlag::LDS_1A2D
));
155 bool R600InstrInfo::isLDSRetInstr(unsigned Opcode
) const {
156 return isLDSInstr(Opcode
) && getOperandIdx(Opcode
, R600::OpName::dst
) != -1;
159 bool R600InstrInfo::canBeConsideredALU(const MachineInstr
&MI
) const {
160 if (isALUInstr(MI
.getOpcode()))
162 if (isVector(MI
) || isCubeOp(MI
.getOpcode()))
164 switch (MI
.getOpcode()) {
166 case R600::INTERP_PAIR_XY
:
167 case R600::INTERP_PAIR_ZW
:
168 case R600::INTERP_VEC_LOAD
:
177 bool R600InstrInfo::isTransOnly(unsigned Opcode
) const {
178 if (ST
.hasCaymanISA())
180 return (get(Opcode
).getSchedClass() == R600::Sched::TransALU
);
183 bool R600InstrInfo::isTransOnly(const MachineInstr
&MI
) const {
184 return isTransOnly(MI
.getOpcode());
187 bool R600InstrInfo::isVectorOnly(unsigned Opcode
) const {
188 return (get(Opcode
).getSchedClass() == R600::Sched::VecALU
);
191 bool R600InstrInfo::isVectorOnly(const MachineInstr
&MI
) const {
192 return isVectorOnly(MI
.getOpcode());
195 bool R600InstrInfo::isExport(unsigned Opcode
) const {
196 return (get(Opcode
).TSFlags
& R600_InstFlag::IS_EXPORT
);
199 bool R600InstrInfo::usesVertexCache(unsigned Opcode
) const {
200 return ST
.hasVertexCache() && IS_VTX(get(Opcode
));
203 bool R600InstrInfo::usesVertexCache(const MachineInstr
&MI
) const {
204 const MachineFunction
*MF
= MI
.getParent()->getParent();
205 return !AMDGPU::isCompute(MF
->getFunction().getCallingConv()) &&
206 usesVertexCache(MI
.getOpcode());
209 bool R600InstrInfo::usesTextureCache(unsigned Opcode
) const {
210 return (!ST
.hasVertexCache() && IS_VTX(get(Opcode
))) || IS_TEX(get(Opcode
));
213 bool R600InstrInfo::usesTextureCache(const MachineInstr
&MI
) const {
214 const MachineFunction
*MF
= MI
.getParent()->getParent();
215 return (AMDGPU::isCompute(MF
->getFunction().getCallingConv()) &&
216 usesVertexCache(MI
.getOpcode())) ||
217 usesTextureCache(MI
.getOpcode());
220 bool R600InstrInfo::mustBeLastInClause(unsigned Opcode
) const {
223 case R600::GROUP_BARRIER
:
230 bool R600InstrInfo::usesAddressRegister(MachineInstr
&MI
) const {
231 return MI
.findRegisterUseOperandIdx(R600::AR_X
, false, &RI
) != -1;
234 bool R600InstrInfo::definesAddressRegister(MachineInstr
&MI
) const {
235 return MI
.findRegisterDefOperandIdx(R600::AR_X
, false, false, &RI
) != -1;
238 bool R600InstrInfo::readsLDSSrcReg(const MachineInstr
&MI
) const {
239 if (!isALUInstr(MI
.getOpcode())) {
242 for (MachineInstr::const_mop_iterator I
= MI
.operands_begin(),
243 E
= MI
.operands_end();
245 if (!I
->isReg() || !I
->isUse() ||
246 TargetRegisterInfo::isVirtualRegister(I
->getReg()))
249 if (R600::R600_LDS_SRC_REGRegClass
.contains(I
->getReg()))
255 int R600InstrInfo::getSelIdx(unsigned Opcode
, unsigned SrcIdx
) const {
256 static const unsigned SrcSelTable
[][2] = {
257 {R600::OpName::src0
, R600::OpName::src0_sel
},
258 {R600::OpName::src1
, R600::OpName::src1_sel
},
259 {R600::OpName::src2
, R600::OpName::src2_sel
},
260 {R600::OpName::src0_X
, R600::OpName::src0_sel_X
},
261 {R600::OpName::src0_Y
, R600::OpName::src0_sel_Y
},
262 {R600::OpName::src0_Z
, R600::OpName::src0_sel_Z
},
263 {R600::OpName::src0_W
, R600::OpName::src0_sel_W
},
264 {R600::OpName::src1_X
, R600::OpName::src1_sel_X
},
265 {R600::OpName::src1_Y
, R600::OpName::src1_sel_Y
},
266 {R600::OpName::src1_Z
, R600::OpName::src1_sel_Z
},
267 {R600::OpName::src1_W
, R600::OpName::src1_sel_W
}
270 for (const auto &Row
: SrcSelTable
) {
271 if (getOperandIdx(Opcode
, Row
[0]) == (int)SrcIdx
) {
272 return getOperandIdx(Opcode
, Row
[1]);
278 SmallVector
<std::pair
<MachineOperand
*, int64_t>, 3>
279 R600InstrInfo::getSrcs(MachineInstr
&MI
) const {
280 SmallVector
<std::pair
<MachineOperand
*, int64_t>, 3> Result
;
282 if (MI
.getOpcode() == R600::DOT_4
) {
283 static const unsigned OpTable
[8][2] = {
284 {R600::OpName::src0_X
, R600::OpName::src0_sel_X
},
285 {R600::OpName::src0_Y
, R600::OpName::src0_sel_Y
},
286 {R600::OpName::src0_Z
, R600::OpName::src0_sel_Z
},
287 {R600::OpName::src0_W
, R600::OpName::src0_sel_W
},
288 {R600::OpName::src1_X
, R600::OpName::src1_sel_X
},
289 {R600::OpName::src1_Y
, R600::OpName::src1_sel_Y
},
290 {R600::OpName::src1_Z
, R600::OpName::src1_sel_Z
},
291 {R600::OpName::src1_W
, R600::OpName::src1_sel_W
},
294 for (unsigned j
= 0; j
< 8; j
++) {
296 MI
.getOperand(getOperandIdx(MI
.getOpcode(), OpTable
[j
][0]));
297 unsigned Reg
= MO
.getReg();
298 if (Reg
== R600::ALU_CONST
) {
299 MachineOperand
&Sel
=
300 MI
.getOperand(getOperandIdx(MI
.getOpcode(), OpTable
[j
][1]));
301 Result
.push_back(std::make_pair(&MO
, Sel
.getImm()));
309 static const unsigned OpTable
[3][2] = {
310 {R600::OpName::src0
, R600::OpName::src0_sel
},
311 {R600::OpName::src1
, R600::OpName::src1_sel
},
312 {R600::OpName::src2
, R600::OpName::src2_sel
},
315 for (unsigned j
= 0; j
< 3; j
++) {
316 int SrcIdx
= getOperandIdx(MI
.getOpcode(), OpTable
[j
][0]);
319 MachineOperand
&MO
= MI
.getOperand(SrcIdx
);
320 unsigned Reg
= MO
.getReg();
321 if (Reg
== R600::ALU_CONST
) {
322 MachineOperand
&Sel
=
323 MI
.getOperand(getOperandIdx(MI
.getOpcode(), OpTable
[j
][1]));
324 Result
.push_back(std::make_pair(&MO
, Sel
.getImm()));
327 if (Reg
== R600::ALU_LITERAL_X
) {
328 MachineOperand
&Operand
=
329 MI
.getOperand(getOperandIdx(MI
.getOpcode(), R600::OpName::literal
));
330 if (Operand
.isImm()) {
331 Result
.push_back(std::make_pair(&MO
, Operand
.getImm()));
334 assert(Operand
.isGlobal());
336 Result
.push_back(std::make_pair(&MO
, 0));
341 std::vector
<std::pair
<int, unsigned>>
342 R600InstrInfo::ExtractSrcs(MachineInstr
&MI
,
343 const DenseMap
<unsigned, unsigned> &PV
,
344 unsigned &ConstCount
) const {
346 const std::pair
<int, unsigned> DummyPair(-1, 0);
347 std::vector
<std::pair
<int, unsigned>> Result
;
349 for (const auto &Src
: getSrcs(MI
)) {
351 unsigned Reg
= Src
.first
->getReg();
352 int Index
= RI
.getEncodingValue(Reg
) & 0xff;
353 if (Reg
== R600::OQAP
) {
354 Result
.push_back(std::make_pair(Index
, 0U));
356 if (PV
.find(Reg
) != PV
.end()) {
357 // 255 is used to tells its a PS/PV reg
358 Result
.push_back(std::make_pair(255, 0U));
363 Result
.push_back(DummyPair
);
366 unsigned Chan
= RI
.getHWRegChan(Reg
);
367 Result
.push_back(std::make_pair(Index
, Chan
));
370 Result
.push_back(DummyPair
);
374 static std::vector
<std::pair
<int, unsigned>>
375 Swizzle(std::vector
<std::pair
<int, unsigned>> Src
,
376 R600InstrInfo::BankSwizzle Swz
) {
377 if (Src
[0] == Src
[1])
380 case R600InstrInfo::ALU_VEC_012_SCL_210
:
382 case R600InstrInfo::ALU_VEC_021_SCL_122
:
383 std::swap(Src
[1], Src
[2]);
385 case R600InstrInfo::ALU_VEC_102_SCL_221
:
386 std::swap(Src
[0], Src
[1]);
388 case R600InstrInfo::ALU_VEC_120_SCL_212
:
389 std::swap(Src
[0], Src
[1]);
390 std::swap(Src
[0], Src
[2]);
392 case R600InstrInfo::ALU_VEC_201
:
393 std::swap(Src
[0], Src
[2]);
394 std::swap(Src
[0], Src
[1]);
396 case R600InstrInfo::ALU_VEC_210
:
397 std::swap(Src
[0], Src
[2]);
403 static unsigned getTransSwizzle(R600InstrInfo::BankSwizzle Swz
, unsigned Op
) {
405 case R600InstrInfo::ALU_VEC_012_SCL_210
: {
406 unsigned Cycles
[3] = { 2, 1, 0};
409 case R600InstrInfo::ALU_VEC_021_SCL_122
: {
410 unsigned Cycles
[3] = { 1, 2, 2};
413 case R600InstrInfo::ALU_VEC_120_SCL_212
: {
414 unsigned Cycles
[3] = { 2, 1, 2};
417 case R600InstrInfo::ALU_VEC_102_SCL_221
: {
418 unsigned Cycles
[3] = { 2, 2, 1};
422 llvm_unreachable("Wrong Swizzle for Trans Slot");
426 /// returns how many MIs (whose inputs are represented by IGSrcs) can be packed
427 /// in the same Instruction Group while meeting read port limitations given a
428 /// Swz swizzle sequence.
429 unsigned R600InstrInfo::isLegalUpTo(
430 const std::vector
<std::vector
<std::pair
<int, unsigned>>> &IGSrcs
,
431 const std::vector
<R600InstrInfo::BankSwizzle
> &Swz
,
432 const std::vector
<std::pair
<int, unsigned>> &TransSrcs
,
433 R600InstrInfo::BankSwizzle TransSwz
) const {
435 memset(Vector
, -1, sizeof(Vector
));
436 for (unsigned i
= 0, e
= IGSrcs
.size(); i
< e
; i
++) {
437 const std::vector
<std::pair
<int, unsigned>> &Srcs
=
438 Swizzle(IGSrcs
[i
], Swz
[i
]);
439 for (unsigned j
= 0; j
< 3; j
++) {
440 const std::pair
<int, unsigned> &Src
= Srcs
[j
];
441 if (Src
.first
< 0 || Src
.first
== 255)
443 if (Src
.first
== GET_REG_INDEX(RI
.getEncodingValue(R600::OQAP
))) {
444 if (Swz
[i
] != R600InstrInfo::ALU_VEC_012_SCL_210
&&
445 Swz
[i
] != R600InstrInfo::ALU_VEC_021_SCL_122
) {
446 // The value from output queue A (denoted by register OQAP) can
447 // only be fetched during the first cycle.
450 // OQAP does not count towards the normal read port restrictions
453 if (Vector
[Src
.second
][j
] < 0)
454 Vector
[Src
.second
][j
] = Src
.first
;
455 if (Vector
[Src
.second
][j
] != Src
.first
)
459 // Now check Trans Alu
460 for (unsigned i
= 0, e
= TransSrcs
.size(); i
< e
; ++i
) {
461 const std::pair
<int, unsigned> &Src
= TransSrcs
[i
];
462 unsigned Cycle
= getTransSwizzle(TransSwz
, i
);
465 if (Src
.first
== 255)
467 if (Vector
[Src
.second
][Cycle
] < 0)
468 Vector
[Src
.second
][Cycle
] = Src
.first
;
469 if (Vector
[Src
.second
][Cycle
] != Src
.first
)
470 return IGSrcs
.size() - 1;
472 return IGSrcs
.size();
475 /// Given a swizzle sequence SwzCandidate and an index Idx, returns the next
476 /// (in lexicographic term) swizzle sequence assuming that all swizzles after
477 /// Idx can be skipped
479 NextPossibleSolution(
480 std::vector
<R600InstrInfo::BankSwizzle
> &SwzCandidate
,
482 assert(Idx
< SwzCandidate
.size());
484 while (ResetIdx
> -1 && SwzCandidate
[ResetIdx
] == R600InstrInfo::ALU_VEC_210
)
486 for (unsigned i
= ResetIdx
+ 1, e
= SwzCandidate
.size(); i
< e
; i
++) {
487 SwzCandidate
[i
] = R600InstrInfo::ALU_VEC_012_SCL_210
;
491 int NextSwizzle
= SwzCandidate
[ResetIdx
] + 1;
492 SwzCandidate
[ResetIdx
] = (R600InstrInfo::BankSwizzle
)NextSwizzle
;
496 /// Enumerate all possible Swizzle sequence to find one that can meet all
497 /// read port requirements.
498 bool R600InstrInfo::FindSwizzleForVectorSlot(
499 const std::vector
<std::vector
<std::pair
<int, unsigned>>> &IGSrcs
,
500 std::vector
<R600InstrInfo::BankSwizzle
> &SwzCandidate
,
501 const std::vector
<std::pair
<int, unsigned>> &TransSrcs
,
502 R600InstrInfo::BankSwizzle TransSwz
) const {
503 unsigned ValidUpTo
= 0;
505 ValidUpTo
= isLegalUpTo(IGSrcs
, SwzCandidate
, TransSrcs
, TransSwz
);
506 if (ValidUpTo
== IGSrcs
.size())
508 } while (NextPossibleSolution(SwzCandidate
, ValidUpTo
));
512 /// Instructions in Trans slot can't read gpr at cycle 0 if they also read
513 /// a const, and can't read a gpr at cycle 1 if they read 2 const.
515 isConstCompatible(R600InstrInfo::BankSwizzle TransSwz
,
516 const std::vector
<std::pair
<int, unsigned>> &TransOps
,
517 unsigned ConstCount
) {
518 // TransALU can't read 3 constants
521 for (unsigned i
= 0, e
= TransOps
.size(); i
< e
; ++i
) {
522 const std::pair
<int, unsigned> &Src
= TransOps
[i
];
523 unsigned Cycle
= getTransSwizzle(TransSwz
, i
);
526 if (ConstCount
> 0 && Cycle
== 0)
528 if (ConstCount
> 1 && Cycle
== 1)
535 R600InstrInfo::fitsReadPortLimitations(const std::vector
<MachineInstr
*> &IG
,
536 const DenseMap
<unsigned, unsigned> &PV
,
537 std::vector
<BankSwizzle
> &ValidSwizzle
,
540 //Todo : support shared src0 - src1 operand
542 std::vector
<std::vector
<std::pair
<int, unsigned>>> IGSrcs
;
543 ValidSwizzle
.clear();
545 BankSwizzle TransBS
= ALU_VEC_012_SCL_210
;
546 for (unsigned i
= 0, e
= IG
.size(); i
< e
; ++i
) {
547 IGSrcs
.push_back(ExtractSrcs(*IG
[i
], PV
, ConstCount
));
548 unsigned Op
= getOperandIdx(IG
[i
]->getOpcode(),
549 R600::OpName::bank_swizzle
);
550 ValidSwizzle
.push_back( (R600InstrInfo::BankSwizzle
)
551 IG
[i
]->getOperand(Op
).getImm());
553 std::vector
<std::pair
<int, unsigned>> TransOps
;
555 return FindSwizzleForVectorSlot(IGSrcs
, ValidSwizzle
, TransOps
, TransBS
);
557 TransOps
= std::move(IGSrcs
.back());
559 ValidSwizzle
.pop_back();
561 static const R600InstrInfo::BankSwizzle TransSwz
[] = {
567 for (unsigned i
= 0; i
< 4; i
++) {
568 TransBS
= TransSwz
[i
];
569 if (!isConstCompatible(TransBS
, TransOps
, ConstCount
))
571 bool Result
= FindSwizzleForVectorSlot(IGSrcs
, ValidSwizzle
, TransOps
,
574 ValidSwizzle
.push_back(TransBS
);
583 R600InstrInfo::fitsConstReadLimitations(const std::vector
<unsigned> &Consts
)
585 assert (Consts
.size() <= 12 && "Too many operands in instructions group");
586 unsigned Pair1
= 0, Pair2
= 0;
587 for (unsigned i
= 0, n
= Consts
.size(); i
< n
; ++i
) {
588 unsigned ReadConstHalf
= Consts
[i
] & 2;
589 unsigned ReadConstIndex
= Consts
[i
] & (~3);
590 unsigned ReadHalfConst
= ReadConstIndex
| ReadConstHalf
;
592 Pair1
= ReadHalfConst
;
595 if (Pair1
== ReadHalfConst
)
598 Pair2
= ReadHalfConst
;
601 if (Pair2
!= ReadHalfConst
)
608 R600InstrInfo::fitsConstReadLimitations(const std::vector
<MachineInstr
*> &MIs
)
610 std::vector
<unsigned> Consts
;
611 SmallSet
<int64_t, 4> Literals
;
612 for (unsigned i
= 0, n
= MIs
.size(); i
< n
; i
++) {
613 MachineInstr
&MI
= *MIs
[i
];
614 if (!isALUInstr(MI
.getOpcode()))
617 for (const auto &Src
: getSrcs(MI
)) {
618 if (Src
.first
->getReg() == R600::ALU_LITERAL_X
)
619 Literals
.insert(Src
.second
);
620 if (Literals
.size() > 4)
622 if (Src
.first
->getReg() == R600::ALU_CONST
)
623 Consts
.push_back(Src
.second
);
624 if (R600::R600_KC0RegClass
.contains(Src
.first
->getReg()) ||
625 R600::R600_KC1RegClass
.contains(Src
.first
->getReg())) {
626 unsigned Index
= RI
.getEncodingValue(Src
.first
->getReg()) & 0xff;
627 unsigned Chan
= RI
.getHWRegChan(Src
.first
->getReg());
628 Consts
.push_back((Index
<< 2) | Chan
);
632 return fitsConstReadLimitations(Consts
);
636 R600InstrInfo::CreateTargetScheduleState(const TargetSubtargetInfo
&STI
) const {
637 const InstrItineraryData
*II
= STI
.getInstrItineraryData();
638 return static_cast<const R600Subtarget
&>(STI
).createDFAPacketizer(II
);
642 isPredicateSetter(unsigned Opcode
) {
651 static MachineInstr
*
652 findFirstPredicateSetterFrom(MachineBasicBlock
&MBB
,
653 MachineBasicBlock::iterator I
) {
654 while (I
!= MBB
.begin()) {
656 MachineInstr
&MI
= *I
;
657 if (isPredicateSetter(MI
.getOpcode()))
665 bool isJump(unsigned Opcode
) {
666 return Opcode
== R600::JUMP
|| Opcode
== R600::JUMP_COND
;
669 static bool isBranch(unsigned Opcode
) {
670 return Opcode
== R600::BRANCH
|| Opcode
== R600::BRANCH_COND_i32
||
671 Opcode
== R600::BRANCH_COND_f32
;
674 bool R600InstrInfo::analyzeBranch(MachineBasicBlock
&MBB
,
675 MachineBasicBlock
*&TBB
,
676 MachineBasicBlock
*&FBB
,
677 SmallVectorImpl
<MachineOperand
> &Cond
,
678 bool AllowModify
) const {
679 // Most of the following comes from the ARM implementation of AnalyzeBranch
681 // If the block has no terminators, it just falls into the block after it.
682 MachineBasicBlock::iterator I
= MBB
.getLastNonDebugInstr();
686 // R600::BRANCH* instructions are only available after isel and are not
688 if (isBranch(I
->getOpcode()))
690 if (!isJump(I
->getOpcode())) {
694 // Remove successive JUMP
695 while (I
!= MBB
.begin() && std::prev(I
)->getOpcode() == R600::JUMP
) {
696 MachineBasicBlock::iterator PriorI
= std::prev(I
);
698 I
->removeFromParent();
701 MachineInstr
&LastInst
= *I
;
703 // If there is only one terminator instruction, process it.
704 unsigned LastOpc
= LastInst
.getOpcode();
705 if (I
== MBB
.begin() || !isJump((--I
)->getOpcode())) {
706 if (LastOpc
== R600::JUMP
) {
707 TBB
= LastInst
.getOperand(0).getMBB();
709 } else if (LastOpc
== R600::JUMP_COND
) {
711 while (!isPredicateSetter(predSet
->getOpcode())) {
714 TBB
= LastInst
.getOperand(0).getMBB();
715 Cond
.push_back(predSet
->getOperand(1));
716 Cond
.push_back(predSet
->getOperand(2));
717 Cond
.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE
, false));
720 return true; // Can't handle indirect branch.
723 // Get the instruction before it if it is a terminator.
724 MachineInstr
&SecondLastInst
= *I
;
725 unsigned SecondLastOpc
= SecondLastInst
.getOpcode();
727 // If the block ends with a B and a Bcc, handle it.
728 if (SecondLastOpc
== R600::JUMP_COND
&& LastOpc
== R600::JUMP
) {
730 while (!isPredicateSetter(predSet
->getOpcode())) {
733 TBB
= SecondLastInst
.getOperand(0).getMBB();
734 FBB
= LastInst
.getOperand(0).getMBB();
735 Cond
.push_back(predSet
->getOperand(1));
736 Cond
.push_back(predSet
->getOperand(2));
737 Cond
.push_back(MachineOperand::CreateReg(R600::PRED_SEL_ONE
, false));
741 // Otherwise, can't handle this.
746 MachineBasicBlock::iterator
FindLastAluClause(MachineBasicBlock
&MBB
) {
747 for (MachineBasicBlock::reverse_iterator It
= MBB
.rbegin(), E
= MBB
.rend();
749 if (It
->getOpcode() == R600::CF_ALU
||
750 It
->getOpcode() == R600::CF_ALU_PUSH_BEFORE
)
751 return It
.getReverse();
756 unsigned R600InstrInfo::insertBranch(MachineBasicBlock
&MBB
,
757 MachineBasicBlock
*TBB
,
758 MachineBasicBlock
*FBB
,
759 ArrayRef
<MachineOperand
> Cond
,
761 int *BytesAdded
) const {
762 assert(TBB
&& "insertBranch must not be told to insert a fallthrough");
763 assert(!BytesAdded
&& "code size not handled");
767 BuildMI(&MBB
, DL
, get(R600::JUMP
)).addMBB(TBB
);
770 MachineInstr
*PredSet
= findFirstPredicateSetterFrom(MBB
, MBB
.end());
771 assert(PredSet
&& "No previous predicate !");
772 addFlag(*PredSet
, 0, MO_FLAG_PUSH
);
773 PredSet
->getOperand(2).setImm(Cond
[1].getImm());
775 BuildMI(&MBB
, DL
, get(R600::JUMP_COND
))
777 .addReg(R600::PREDICATE_BIT
, RegState::Kill
);
778 MachineBasicBlock::iterator CfAlu
= FindLastAluClause(MBB
);
779 if (CfAlu
== MBB
.end())
781 assert (CfAlu
->getOpcode() == R600::CF_ALU
);
782 CfAlu
->setDesc(get(R600::CF_ALU_PUSH_BEFORE
));
786 MachineInstr
*PredSet
= findFirstPredicateSetterFrom(MBB
, MBB
.end());
787 assert(PredSet
&& "No previous predicate !");
788 addFlag(*PredSet
, 0, MO_FLAG_PUSH
);
789 PredSet
->getOperand(2).setImm(Cond
[1].getImm());
790 BuildMI(&MBB
, DL
, get(R600::JUMP_COND
))
792 .addReg(R600::PREDICATE_BIT
, RegState::Kill
);
793 BuildMI(&MBB
, DL
, get(R600::JUMP
)).addMBB(FBB
);
794 MachineBasicBlock::iterator CfAlu
= FindLastAluClause(MBB
);
795 if (CfAlu
== MBB
.end())
797 assert (CfAlu
->getOpcode() == R600::CF_ALU
);
798 CfAlu
->setDesc(get(R600::CF_ALU_PUSH_BEFORE
));
803 unsigned R600InstrInfo::removeBranch(MachineBasicBlock
&MBB
,
804 int *BytesRemoved
) const {
805 assert(!BytesRemoved
&& "code size not handled");
807 // Note : we leave PRED* instructions there.
808 // They may be needed when predicating instructions.
810 MachineBasicBlock::iterator I
= MBB
.end();
812 if (I
== MBB
.begin()) {
816 switch (I
->getOpcode()) {
819 case R600::JUMP_COND
: {
820 MachineInstr
*predSet
= findFirstPredicateSetterFrom(MBB
, I
);
821 clearFlag(*predSet
, 0, MO_FLAG_PUSH
);
822 I
->eraseFromParent();
823 MachineBasicBlock::iterator CfAlu
= FindLastAluClause(MBB
);
824 if (CfAlu
== MBB
.end())
826 assert (CfAlu
->getOpcode() == R600::CF_ALU_PUSH_BEFORE
);
827 CfAlu
->setDesc(get(R600::CF_ALU
));
831 I
->eraseFromParent();
836 if (I
== MBB
.begin()) {
840 switch (I
->getOpcode()) {
841 // FIXME: only one case??
844 case R600::JUMP_COND
: {
845 MachineInstr
*predSet
= findFirstPredicateSetterFrom(MBB
, I
);
846 clearFlag(*predSet
, 0, MO_FLAG_PUSH
);
847 I
->eraseFromParent();
848 MachineBasicBlock::iterator CfAlu
= FindLastAluClause(MBB
);
849 if (CfAlu
== MBB
.end())
851 assert (CfAlu
->getOpcode() == R600::CF_ALU_PUSH_BEFORE
);
852 CfAlu
->setDesc(get(R600::CF_ALU
));
856 I
->eraseFromParent();
862 bool R600InstrInfo::isPredicated(const MachineInstr
&MI
) const {
863 int idx
= MI
.findFirstPredOperandIdx();
867 unsigned Reg
= MI
.getOperand(idx
).getReg();
869 default: return false;
870 case R600::PRED_SEL_ONE
:
871 case R600::PRED_SEL_ZERO
:
872 case R600::PREDICATE_BIT
:
877 bool R600InstrInfo::isPredicable(const MachineInstr
&MI
) const {
878 // XXX: KILL* instructions can be predicated, but they must be the last
879 // instruction in a clause, so this means any instructions after them cannot
880 // be predicated. Until we have proper support for instruction clauses in the
881 // backend, we will mark KILL* instructions as unpredicable.
883 if (MI
.getOpcode() == R600::KILLGT
) {
885 } else if (MI
.getOpcode() == R600::CF_ALU
) {
886 // If the clause start in the middle of MBB then the MBB has more
887 // than a single clause, unable to predicate several clauses.
888 if (MI
.getParent()->begin() != MachineBasicBlock::const_iterator(MI
))
890 // TODO: We don't support KC merging atm
891 return MI
.getOperand(3).getImm() == 0 && MI
.getOperand(4).getImm() == 0;
892 } else if (isVector(MI
)) {
895 return TargetInstrInfo::isPredicable(MI
);
900 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock
&MBB
,
902 unsigned ExtraPredCycles
,
903 BranchProbability Probability
) const{
908 R600InstrInfo::isProfitableToIfCvt(MachineBasicBlock
&TMBB
,
910 unsigned ExtraTCycles
,
911 MachineBasicBlock
&FMBB
,
913 unsigned ExtraFCycles
,
914 BranchProbability Probability
) const {
919 R600InstrInfo::isProfitableToDupForIfCvt(MachineBasicBlock
&MBB
,
921 BranchProbability Probability
)
927 R600InstrInfo::isProfitableToUnpredicate(MachineBasicBlock
&TMBB
,
928 MachineBasicBlock
&FMBB
) const {
933 R600InstrInfo::reverseBranchCondition(SmallVectorImpl
<MachineOperand
> &Cond
) const {
934 MachineOperand
&MO
= Cond
[1];
935 switch (MO
.getImm()) {
936 case R600::PRED_SETE_INT
:
937 MO
.setImm(R600::PRED_SETNE_INT
);
939 case R600::PRED_SETNE_INT
:
940 MO
.setImm(R600::PRED_SETE_INT
);
942 case R600::PRED_SETE
:
943 MO
.setImm(R600::PRED_SETNE
);
945 case R600::PRED_SETNE
:
946 MO
.setImm(R600::PRED_SETE
);
952 MachineOperand
&MO2
= Cond
[2];
953 switch (MO2
.getReg()) {
954 case R600::PRED_SEL_ZERO
:
955 MO2
.setReg(R600::PRED_SEL_ONE
);
957 case R600::PRED_SEL_ONE
:
958 MO2
.setReg(R600::PRED_SEL_ZERO
);
966 bool R600InstrInfo::DefinesPredicate(MachineInstr
&MI
,
967 std::vector
<MachineOperand
> &Pred
) const {
968 return isPredicateSetter(MI
.getOpcode());
971 bool R600InstrInfo::PredicateInstruction(MachineInstr
&MI
,
972 ArrayRef
<MachineOperand
> Pred
) const {
973 int PIdx
= MI
.findFirstPredOperandIdx();
975 if (MI
.getOpcode() == R600::CF_ALU
) {
976 MI
.getOperand(8).setImm(0);
980 if (MI
.getOpcode() == R600::DOT_4
) {
981 MI
.getOperand(getOperandIdx(MI
, R600::OpName::pred_sel_X
))
982 .setReg(Pred
[2].getReg());
983 MI
.getOperand(getOperandIdx(MI
, R600::OpName::pred_sel_Y
))
984 .setReg(Pred
[2].getReg());
985 MI
.getOperand(getOperandIdx(MI
, R600::OpName::pred_sel_Z
))
986 .setReg(Pred
[2].getReg());
987 MI
.getOperand(getOperandIdx(MI
, R600::OpName::pred_sel_W
))
988 .setReg(Pred
[2].getReg());
989 MachineInstrBuilder
MIB(*MI
.getParent()->getParent(), MI
);
990 MIB
.addReg(R600::PREDICATE_BIT
, RegState::Implicit
);
995 MachineOperand
&PMO
= MI
.getOperand(PIdx
);
996 PMO
.setReg(Pred
[2].getReg());
997 MachineInstrBuilder
MIB(*MI
.getParent()->getParent(), MI
);
998 MIB
.addReg(R600::PREDICATE_BIT
, RegState::Implicit
);
1005 unsigned int R600InstrInfo::getPredicationCost(const MachineInstr
&) const {
1009 unsigned int R600InstrInfo::getInstrLatency(const InstrItineraryData
*ItinData
,
1010 const MachineInstr
&,
1011 unsigned *PredCost
) const {
1017 unsigned R600InstrInfo::calculateIndirectAddress(unsigned RegIndex
,
1018 unsigned Channel
) const {
1019 assert(Channel
== 0);
1023 bool R600InstrInfo::expandPostRAPseudo(MachineInstr
&MI
) const {
1024 switch (MI
.getOpcode()) {
1026 MachineBasicBlock
*MBB
= MI
.getParent();
1028 R600::getNamedOperandIdx(MI
.getOpcode(), R600::OpName::addr
);
1029 // addr is a custom operand with multiple MI operands, and only the
1030 // first MI operand is given a name.
1031 int RegOpIdx
= OffsetOpIdx
+ 1;
1033 R600::getNamedOperandIdx(MI
.getOpcode(), R600::OpName::chan
);
1034 if (isRegisterLoad(MI
)) {
1036 R600::getNamedOperandIdx(MI
.getOpcode(), R600::OpName::dst
);
1037 unsigned RegIndex
= MI
.getOperand(RegOpIdx
).getImm();
1038 unsigned Channel
= MI
.getOperand(ChanOpIdx
).getImm();
1039 unsigned Address
= calculateIndirectAddress(RegIndex
, Channel
);
1040 unsigned OffsetReg
= MI
.getOperand(OffsetOpIdx
).getReg();
1041 if (OffsetReg
== R600::INDIRECT_BASE_ADDR
) {
1042 buildMovInstr(MBB
, MI
, MI
.getOperand(DstOpIdx
).getReg(),
1043 getIndirectAddrRegClass()->getRegister(Address
));
1045 buildIndirectRead(MBB
, MI
, MI
.getOperand(DstOpIdx
).getReg(), Address
,
1048 } else if (isRegisterStore(MI
)) {
1050 R600::getNamedOperandIdx(MI
.getOpcode(), R600::OpName::val
);
1051 unsigned RegIndex
= MI
.getOperand(RegOpIdx
).getImm();
1052 unsigned Channel
= MI
.getOperand(ChanOpIdx
).getImm();
1053 unsigned Address
= calculateIndirectAddress(RegIndex
, Channel
);
1054 unsigned OffsetReg
= MI
.getOperand(OffsetOpIdx
).getReg();
1055 if (OffsetReg
== R600::INDIRECT_BASE_ADDR
) {
1056 buildMovInstr(MBB
, MI
, getIndirectAddrRegClass()->getRegister(Address
),
1057 MI
.getOperand(ValOpIdx
).getReg());
1059 buildIndirectWrite(MBB
, MI
, MI
.getOperand(ValOpIdx
).getReg(),
1060 calculateIndirectAddress(RegIndex
, Channel
),
1070 case R600::R600_EXTRACT_ELT_V2
:
1071 case R600::R600_EXTRACT_ELT_V4
:
1072 buildIndirectRead(MI
.getParent(), MI
, MI
.getOperand(0).getReg(),
1073 RI
.getHWRegIndex(MI
.getOperand(1).getReg()), // Address
1074 MI
.getOperand(2).getReg(),
1075 RI
.getHWRegChan(MI
.getOperand(1).getReg()));
1077 case R600::R600_INSERT_ELT_V2
:
1078 case R600::R600_INSERT_ELT_V4
:
1079 buildIndirectWrite(MI
.getParent(), MI
, MI
.getOperand(2).getReg(), // Value
1080 RI
.getHWRegIndex(MI
.getOperand(1).getReg()), // Address
1081 MI
.getOperand(3).getReg(), // Offset
1082 RI
.getHWRegChan(MI
.getOperand(1).getReg())); // Channel
1085 MI
.eraseFromParent();
1089 void R600InstrInfo::reserveIndirectRegisters(BitVector
&Reserved
,
1090 const MachineFunction
&MF
,
1091 const R600RegisterInfo
&TRI
) const {
1092 const R600Subtarget
&ST
= MF
.getSubtarget
<R600Subtarget
>();
1093 const R600FrameLowering
*TFL
= ST
.getFrameLowering();
1095 unsigned StackWidth
= TFL
->getStackWidth(MF
);
1096 int End
= getIndirectIndexEnd(MF
);
1101 for (int Index
= getIndirectIndexBegin(MF
); Index
<= End
; ++Index
) {
1102 for (unsigned Chan
= 0; Chan
< StackWidth
; ++Chan
) {
1103 unsigned Reg
= R600::R600_TReg32RegClass
.getRegister((4 * Index
) + Chan
);
1104 TRI
.reserveRegisterTuples(Reserved
, Reg
);
1109 const TargetRegisterClass
*R600InstrInfo::getIndirectAddrRegClass() const {
1110 return &R600::R600_TReg32_XRegClass
;
1113 MachineInstrBuilder
R600InstrInfo::buildIndirectWrite(MachineBasicBlock
*MBB
,
1114 MachineBasicBlock::iterator I
,
1115 unsigned ValueReg
, unsigned Address
,
1116 unsigned OffsetReg
) const {
1117 return buildIndirectWrite(MBB
, I
, ValueReg
, Address
, OffsetReg
, 0);
1120 MachineInstrBuilder
R600InstrInfo::buildIndirectWrite(MachineBasicBlock
*MBB
,
1121 MachineBasicBlock::iterator I
,
1122 unsigned ValueReg
, unsigned Address
,
1124 unsigned AddrChan
) const {
1127 default: llvm_unreachable("Invalid Channel");
1128 case 0: AddrReg
= R600::R600_AddrRegClass
.getRegister(Address
); break;
1129 case 1: AddrReg
= R600::R600_Addr_YRegClass
.getRegister(Address
); break;
1130 case 2: AddrReg
= R600::R600_Addr_ZRegClass
.getRegister(Address
); break;
1131 case 3: AddrReg
= R600::R600_Addr_WRegClass
.getRegister(Address
); break;
1133 MachineInstr
*MOVA
= buildDefaultInstruction(*MBB
, I
, R600::MOVA_INT_eg
,
1134 R600::AR_X
, OffsetReg
);
1135 setImmOperand(*MOVA
, R600::OpName::write
, 0);
1137 MachineInstrBuilder Mov
= buildDefaultInstruction(*MBB
, I
, R600::MOV
,
1140 RegState::Implicit
| RegState::Kill
);
1141 setImmOperand(*Mov
, R600::OpName::dst_rel
, 1);
1145 MachineInstrBuilder
R600InstrInfo::buildIndirectRead(MachineBasicBlock
*MBB
,
1146 MachineBasicBlock::iterator I
,
1147 unsigned ValueReg
, unsigned Address
,
1148 unsigned OffsetReg
) const {
1149 return buildIndirectRead(MBB
, I
, ValueReg
, Address
, OffsetReg
, 0);
1152 MachineInstrBuilder
R600InstrInfo::buildIndirectRead(MachineBasicBlock
*MBB
,
1153 MachineBasicBlock::iterator I
,
1154 unsigned ValueReg
, unsigned Address
,
1156 unsigned AddrChan
) const {
1159 default: llvm_unreachable("Invalid Channel");
1160 case 0: AddrReg
= R600::R600_AddrRegClass
.getRegister(Address
); break;
1161 case 1: AddrReg
= R600::R600_Addr_YRegClass
.getRegister(Address
); break;
1162 case 2: AddrReg
= R600::R600_Addr_ZRegClass
.getRegister(Address
); break;
1163 case 3: AddrReg
= R600::R600_Addr_WRegClass
.getRegister(Address
); break;
1165 MachineInstr
*MOVA
= buildDefaultInstruction(*MBB
, I
, R600::MOVA_INT_eg
,
1168 setImmOperand(*MOVA
, R600::OpName::write
, 0);
1169 MachineInstrBuilder Mov
= buildDefaultInstruction(*MBB
, I
, R600::MOV
,
1173 RegState::Implicit
| RegState::Kill
);
1174 setImmOperand(*Mov
, R600::OpName::src0_rel
, 1);
1179 int R600InstrInfo::getIndirectIndexBegin(const MachineFunction
&MF
) const {
1180 const MachineRegisterInfo
&MRI
= MF
.getRegInfo();
1181 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1184 if (MFI
.getNumObjects() == 0) {
1188 if (MRI
.livein_empty()) {
1192 const TargetRegisterClass
*IndirectRC
= getIndirectAddrRegClass();
1193 for (std::pair
<unsigned, unsigned> LI
: MRI
.liveins()) {
1194 unsigned Reg
= LI
.first
;
1195 if (TargetRegisterInfo::isVirtualRegister(Reg
) ||
1196 !IndirectRC
->contains(Reg
))
1201 for (RegIndex
= 0, RegEnd
= IndirectRC
->getNumRegs(); RegIndex
!= RegEnd
;
1203 if (IndirectRC
->getRegister(RegIndex
) == Reg
)
1206 Offset
= std::max(Offset
, (int)RegIndex
);
1212 int R600InstrInfo::getIndirectIndexEnd(const MachineFunction
&MF
) const {
1214 const MachineFrameInfo
&MFI
= MF
.getFrameInfo();
1216 // Variable sized objects are not supported
1217 if (MFI
.hasVarSizedObjects()) {
1221 if (MFI
.getNumObjects() == 0) {
1225 const R600Subtarget
&ST
= MF
.getSubtarget
<R600Subtarget
>();
1226 const R600FrameLowering
*TFL
= ST
.getFrameLowering();
1228 unsigned IgnoredFrameReg
;
1229 Offset
= TFL
->getFrameIndexReference(MF
, -1, IgnoredFrameReg
);
1231 return getIndirectIndexBegin(MF
) + Offset
;
1234 unsigned R600InstrInfo::getMaxAlusPerClause() const {
1238 MachineInstrBuilder
R600InstrInfo::buildDefaultInstruction(MachineBasicBlock
&MBB
,
1239 MachineBasicBlock::iterator I
,
1243 unsigned Src1Reg
) const {
1244 MachineInstrBuilder MIB
= BuildMI(MBB
, I
, MBB
.findDebugLoc(I
), get(Opcode
),
1248 MIB
.addImm(0) // $update_exec_mask
1249 .addImm(0); // $update_predicate
1251 MIB
.addImm(1) // $write
1253 .addImm(0) // $dst_rel
1254 .addImm(0) // $dst_clamp
1255 .addReg(Src0Reg
) // $src0
1256 .addImm(0) // $src0_neg
1257 .addImm(0) // $src0_rel
1258 .addImm(0) // $src0_abs
1259 .addImm(-1); // $src0_sel
1262 MIB
.addReg(Src1Reg
) // $src1
1263 .addImm(0) // $src1_neg
1264 .addImm(0) // $src1_rel
1265 .addImm(0) // $src1_abs
1266 .addImm(-1); // $src1_sel
1269 //XXX: The r600g finalizer expects this to be 1, once we've moved the
1270 //scheduling to the backend, we can change the default to 0.
1271 MIB
.addImm(1) // $last
1272 .addReg(R600::PRED_SEL_OFF
) // $pred_sel
1273 .addImm(0) // $literal
1274 .addImm(0); // $bank_swizzle
1279 #define OPERAND_CASE(Label) \
1281 static const unsigned Ops[] = \
1291 static unsigned getSlotedOps(unsigned Op
, unsigned Slot
) {
1293 OPERAND_CASE(R600::OpName::update_exec_mask
)
1294 OPERAND_CASE(R600::OpName::update_pred
)
1295 OPERAND_CASE(R600::OpName::write
)
1296 OPERAND_CASE(R600::OpName::omod
)
1297 OPERAND_CASE(R600::OpName::dst_rel
)
1298 OPERAND_CASE(R600::OpName::clamp
)
1299 OPERAND_CASE(R600::OpName::src0
)
1300 OPERAND_CASE(R600::OpName::src0_neg
)
1301 OPERAND_CASE(R600::OpName::src0_rel
)
1302 OPERAND_CASE(R600::OpName::src0_abs
)
1303 OPERAND_CASE(R600::OpName::src0_sel
)
1304 OPERAND_CASE(R600::OpName::src1
)
1305 OPERAND_CASE(R600::OpName::src1_neg
)
1306 OPERAND_CASE(R600::OpName::src1_rel
)
1307 OPERAND_CASE(R600::OpName::src1_abs
)
1308 OPERAND_CASE(R600::OpName::src1_sel
)
1309 OPERAND_CASE(R600::OpName::pred_sel
)
1311 llvm_unreachable("Wrong Operand");
1317 MachineInstr
*R600InstrInfo::buildSlotOfVectorInstruction(
1318 MachineBasicBlock
&MBB
, MachineInstr
*MI
, unsigned Slot
, unsigned DstReg
)
1320 assert (MI
->getOpcode() == R600::DOT_4
&& "Not Implemented");
1322 if (ST
.getGeneration() <= AMDGPUSubtarget::R700
)
1323 Opcode
= R600::DOT4_r600
;
1325 Opcode
= R600::DOT4_eg
;
1326 MachineBasicBlock::iterator I
= MI
;
1327 MachineOperand
&Src0
= MI
->getOperand(
1328 getOperandIdx(MI
->getOpcode(), getSlotedOps(R600::OpName::src0
, Slot
)));
1329 MachineOperand
&Src1
= MI
->getOperand(
1330 getOperandIdx(MI
->getOpcode(), getSlotedOps(R600::OpName::src1
, Slot
)));
1331 MachineInstr
*MIB
= buildDefaultInstruction(
1332 MBB
, I
, Opcode
, DstReg
, Src0
.getReg(), Src1
.getReg());
1333 static const unsigned Operands
[14] = {
1334 R600::OpName::update_exec_mask
,
1335 R600::OpName::update_pred
,
1336 R600::OpName::write
,
1338 R600::OpName::dst_rel
,
1339 R600::OpName::clamp
,
1340 R600::OpName::src0_neg
,
1341 R600::OpName::src0_rel
,
1342 R600::OpName::src0_abs
,
1343 R600::OpName::src0_sel
,
1344 R600::OpName::src1_neg
,
1345 R600::OpName::src1_rel
,
1346 R600::OpName::src1_abs
,
1347 R600::OpName::src1_sel
,
1350 MachineOperand
&MO
= MI
->getOperand(getOperandIdx(MI
->getOpcode(),
1351 getSlotedOps(R600::OpName::pred_sel
, Slot
)));
1352 MIB
->getOperand(getOperandIdx(Opcode
, R600::OpName::pred_sel
))
1353 .setReg(MO
.getReg());
1355 for (unsigned i
= 0; i
< 14; i
++) {
1356 MachineOperand
&MO
= MI
->getOperand(
1357 getOperandIdx(MI
->getOpcode(), getSlotedOps(Operands
[i
], Slot
)));
1358 assert (MO
.isImm());
1359 setImmOperand(*MIB
, Operands
[i
], MO
.getImm());
1361 MIB
->getOperand(20).setImm(0);
1365 MachineInstr
*R600InstrInfo::buildMovImm(MachineBasicBlock
&BB
,
1366 MachineBasicBlock::iterator I
,
1368 uint64_t Imm
) const {
1369 MachineInstr
*MovImm
= buildDefaultInstruction(BB
, I
, R600::MOV
, DstReg
,
1370 R600::ALU_LITERAL_X
);
1371 setImmOperand(*MovImm
, R600::OpName::literal
, Imm
);
1375 MachineInstr
*R600InstrInfo::buildMovInstr(MachineBasicBlock
*MBB
,
1376 MachineBasicBlock::iterator I
,
1377 unsigned DstReg
, unsigned SrcReg
) const {
1378 return buildDefaultInstruction(*MBB
, I
, R600::MOV
, DstReg
, SrcReg
);
1381 int R600InstrInfo::getOperandIdx(const MachineInstr
&MI
, unsigned Op
) const {
1382 return getOperandIdx(MI
.getOpcode(), Op
);
1385 int R600InstrInfo::getOperandIdx(unsigned Opcode
, unsigned Op
) const {
1386 return R600::getNamedOperandIdx(Opcode
, Op
);
1389 void R600InstrInfo::setImmOperand(MachineInstr
&MI
, unsigned Op
,
1390 int64_t Imm
) const {
1391 int Idx
= getOperandIdx(MI
, Op
);
1392 assert(Idx
!= -1 && "Operand not supported for this instruction.");
1393 assert(MI
.getOperand(Idx
).isImm());
1394 MI
.getOperand(Idx
).setImm(Imm
);
1397 //===----------------------------------------------------------------------===//
1398 // Instruction flag getters/setters
1399 //===----------------------------------------------------------------------===//
1401 MachineOperand
&R600InstrInfo::getFlagOp(MachineInstr
&MI
, unsigned SrcIdx
,
1402 unsigned Flag
) const {
1403 unsigned TargetFlags
= get(MI
.getOpcode()).TSFlags
;
1406 // If we pass something other than the default value of Flag to this
1407 // function, it means we are want to set a flag on an instruction
1408 // that uses native encoding.
1409 assert(HAS_NATIVE_OPERANDS(TargetFlags
));
1410 bool IsOP3
= (TargetFlags
& R600_InstFlag::OP3
) == R600_InstFlag::OP3
;
1413 FlagIndex
= getOperandIdx(MI
, R600::OpName::clamp
);
1416 FlagIndex
= getOperandIdx(MI
, R600::OpName::write
);
1418 case MO_FLAG_NOT_LAST
:
1420 FlagIndex
= getOperandIdx(MI
, R600::OpName::last
);
1425 FlagIndex
= getOperandIdx(MI
, R600::OpName::src0_neg
);
1428 FlagIndex
= getOperandIdx(MI
, R600::OpName::src1_neg
);
1431 FlagIndex
= getOperandIdx(MI
, R600::OpName::src2_neg
);
1437 assert(!IsOP3
&& "Cannot set absolute value modifier for OP3 "
1442 FlagIndex
= getOperandIdx(MI
, R600::OpName::src0_abs
);
1445 FlagIndex
= getOperandIdx(MI
, R600::OpName::src1_abs
);
1454 assert(FlagIndex
!= -1 && "Flag not supported for this instruction");
1456 FlagIndex
= GET_FLAG_OPERAND_IDX(TargetFlags
);
1457 assert(FlagIndex
!= 0 &&
1458 "Instruction flags not supported for this instruction");
1461 MachineOperand
&FlagOp
= MI
.getOperand(FlagIndex
);
1462 assert(FlagOp
.isImm());
1466 void R600InstrInfo::addFlag(MachineInstr
&MI
, unsigned Operand
,
1467 unsigned Flag
) const {
1468 unsigned TargetFlags
= get(MI
.getOpcode()).TSFlags
;
1472 if (HAS_NATIVE_OPERANDS(TargetFlags
)) {
1473 MachineOperand
&FlagOp
= getFlagOp(MI
, Operand
, Flag
);
1474 if (Flag
== MO_FLAG_NOT_LAST
) {
1475 clearFlag(MI
, Operand
, MO_FLAG_LAST
);
1476 } else if (Flag
== MO_FLAG_MASK
) {
1477 clearFlag(MI
, Operand
, Flag
);
1482 MachineOperand
&FlagOp
= getFlagOp(MI
, Operand
);
1483 FlagOp
.setImm(FlagOp
.getImm() | (Flag
<< (NUM_MO_FLAGS
* Operand
)));
1487 void R600InstrInfo::clearFlag(MachineInstr
&MI
, unsigned Operand
,
1488 unsigned Flag
) const {
1489 unsigned TargetFlags
= get(MI
.getOpcode()).TSFlags
;
1490 if (HAS_NATIVE_OPERANDS(TargetFlags
)) {
1491 MachineOperand
&FlagOp
= getFlagOp(MI
, Operand
, Flag
);
1494 MachineOperand
&FlagOp
= getFlagOp(MI
);
1495 unsigned InstFlags
= FlagOp
.getImm();
1496 InstFlags
&= ~(Flag
<< (NUM_MO_FLAGS
* Operand
));
1497 FlagOp
.setImm(InstFlags
);
1501 unsigned R600InstrInfo::getAddressSpaceForPseudoSourceKind(
1502 unsigned Kind
) const {
1504 case PseudoSourceValue::Stack
:
1505 case PseudoSourceValue::FixedStack
:
1506 return AMDGPUAS::PRIVATE_ADDRESS
;
1507 case PseudoSourceValue::ConstantPool
:
1508 case PseudoSourceValue::GOT
:
1509 case PseudoSourceValue::JumpTable
:
1510 case PseudoSourceValue::GlobalValueCallEntry
:
1511 case PseudoSourceValue::ExternalSymbolCallEntry
:
1512 case PseudoSourceValue::TargetCustom
:
1513 return AMDGPUAS::CONSTANT_ADDRESS
;
1516 llvm_unreachable("Invalid pseudo source kind");