1 //===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
9 def isCI : Predicate<"Subtarget->getGeneration() "
10 ">= AMDGPUSubtarget::SEA_ISLANDS">;
11 def isCIOnly : Predicate<"Subtarget->getGeneration() =="
12 "AMDGPUSubtarget::SEA_ISLANDS">,
13 AssemblerPredicate <"FeatureSeaIslands">;
14 def isVIOnly : Predicate<"Subtarget->getGeneration() =="
15 "AMDGPUSubtarget::VOLCANIC_ISLANDS">,
16 AssemblerPredicate <"FeatureVolcanicIslands">;
18 def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
20 class GCNPredicateControl : PredicateControl {
21 Predicate SIAssemblerPredicate = isSICI;
22 Predicate VIAssemblerPredicate = isVI;
25 // Execpt for the NONE field, this must be kept in sync with the
26 // SIEncodingFamily enum in AMDGPUInstrInfo.cpp
27 def SIEncodingFamily {
37 //===----------------------------------------------------------------------===//
39 //===----------------------------------------------------------------------===//
41 def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPUnaryOp>;
43 def SIsbuffer_load : SDNode<"AMDGPUISD::SBUFFER_LOAD",
44 SDTypeProfile<1, 3, [SDTCisVT<1, v4i32>, SDTCisVT<2, i32>, SDTCisVT<3, i1>]>,
45 [SDNPMayLoad, SDNPMemOperand]
48 def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2,
49 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
52 def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2,
53 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
56 def SDTAtomic2_f32 : SDTypeProfile<1, 2, [
57 SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
60 def SIatomic_fadd : SDNode<"AMDGPUISD::ATOMIC_LOAD_FADD", SDTAtomic2_f32,
61 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
64 def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32,
65 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
68 def SIatomic_fmax : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMAX", SDTAtomic2_f32,
69 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
72 def SDTtbuffer_load : SDTypeProfile<1, 8,
74 SDTCisVT<1, v4i32>, // rsrc
75 SDTCisVT<2, i32>, // vindex(VGPR)
76 SDTCisVT<3, i32>, // voffset(VGPR)
77 SDTCisVT<4, i32>, // soffset(SGPR)
78 SDTCisVT<5, i32>, // offset(imm)
79 SDTCisVT<6, i32>, // format(imm)
80 SDTCisVT<7, i32>, // cachecontrol(imm)
81 SDTCisVT<8, i1> // idxen(imm)
84 def SItbuffer_load : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT", SDTtbuffer_load,
85 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
86 def SItbuffer_load_d16 : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT_D16",
88 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
90 def SDTtbuffer_store : SDTypeProfile<0, 9,
92 SDTCisVT<1, v4i32>, // rsrc
93 SDTCisVT<2, i32>, // vindex(VGPR)
94 SDTCisVT<3, i32>, // voffset(VGPR)
95 SDTCisVT<4, i32>, // soffset(SGPR)
96 SDTCisVT<5, i32>, // offset(imm)
97 SDTCisVT<6, i32>, // format(imm)
98 SDTCisVT<7, i32>, // cachecontrol(imm)
99 SDTCisVT<8, i1> // idxen(imm)
102 def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store,
103 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
104 def SItbuffer_store_x3 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_X3",
106 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
107 def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16",
109 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
111 def SDTBufferLoad : SDTypeProfile<1, 7,
113 SDTCisVT<1, v4i32>, // rsrc
114 SDTCisVT<2, i32>, // vindex(VGPR)
115 SDTCisVT<3, i32>, // voffset(VGPR)
116 SDTCisVT<4, i32>, // soffset(SGPR)
117 SDTCisVT<5, i32>, // offset(imm)
118 SDTCisVT<6, i32>, // cachepolicy(imm)
119 SDTCisVT<7, i1>]>; // idxen(imm)
121 def SIbuffer_load : SDNode <"AMDGPUISD::BUFFER_LOAD", SDTBufferLoad,
122 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
123 def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad,
124 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
125 def SIbuffer_load_format_d16 : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_D16",
127 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
129 def SDTBufferStore : SDTypeProfile<0, 8,
131 SDTCisVT<1, v4i32>, // rsrc
132 SDTCisVT<2, i32>, // vindex(VGPR)
133 SDTCisVT<3, i32>, // voffset(VGPR)
134 SDTCisVT<4, i32>, // soffset(SGPR)
135 SDTCisVT<5, i32>, // offset(imm)
136 SDTCisVT<6, i32>, // cachepolicy(imm)
137 SDTCisVT<7, i1>]>; // idxen(imm)
139 def SIbuffer_store : SDNode <"AMDGPUISD::BUFFER_STORE", SDTBufferStore,
140 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
141 def SIbuffer_store_format : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT",
143 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
144 def SIbuffer_store_format_d16 : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT_D16",
146 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
148 class SDBufferAtomic<string opcode> : SDNode <opcode,
150 [SDTCisVT<0, i32>, // dst
151 SDTCisVT<1, i32>, // vdata
152 SDTCisVT<2, v4i32>, // rsrc
153 SDTCisVT<3, i32>, // vindex(VGPR)
154 SDTCisVT<4, i32>, // voffset(VGPR)
155 SDTCisVT<5, i32>, // soffset(SGPR)
156 SDTCisVT<6, i32>, // offset(imm)
157 SDTCisVT<7, i32>, // cachepolicy(imm)
158 SDTCisVT<8, i1>]>, // idxen(imm)
159 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
162 def SIbuffer_atomic_swap : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SWAP">;
163 def SIbuffer_atomic_add : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_ADD">;
164 def SIbuffer_atomic_sub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SUB">;
165 def SIbuffer_atomic_smin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMIN">;
166 def SIbuffer_atomic_umin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMIN">;
167 def SIbuffer_atomic_smax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMAX">;
168 def SIbuffer_atomic_umax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMAX">;
169 def SIbuffer_atomic_and : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_AND">;
170 def SIbuffer_atomic_or : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_OR">;
171 def SIbuffer_atomic_xor : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_XOR">;
173 def SIbuffer_atomic_cmpswap : SDNode <"AMDGPUISD::BUFFER_ATOMIC_CMPSWAP",
175 [SDTCisVT<0, i32>, // dst
176 SDTCisVT<1, i32>, // src
177 SDTCisVT<2, i32>, // cmp
178 SDTCisVT<3, v4i32>, // rsrc
179 SDTCisVT<4, i32>, // vindex(VGPR)
180 SDTCisVT<5, i32>, // voffset(VGPR)
181 SDTCisVT<6, i32>, // soffset(SGPR)
182 SDTCisVT<7, i32>, // offset(imm)
183 SDTCisVT<8, i32>, // cachepolicy(imm)
184 SDTCisVT<9, i1>]>, // idxen(imm)
185 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
188 def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET",
189 SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>
192 //===----------------------------------------------------------------------===//
194 //===----------------------------------------------------------------------===//
196 // Returns 1 if the source arguments have modifiers, 0 if they do not.
197 // XXX - do f16 instructions?
198 class isFloatType<ValueType SrcVT> {
200 !if(!eq(SrcVT.Value, f16.Value), 1,
201 !if(!eq(SrcVT.Value, f32.Value), 1,
202 !if(!eq(SrcVT.Value, f64.Value), 1,
203 !if(!eq(SrcVT.Value, v2f16.Value), 1,
207 class isIntType<ValueType SrcVT> {
209 !if(!eq(SrcVT.Value, i16.Value), 1,
210 !if(!eq(SrcVT.Value, i32.Value), 1,
211 !if(!eq(SrcVT.Value, i64.Value), 1,
215 class isPackedType<ValueType SrcVT> {
217 !if(!eq(SrcVT.Value, v2i16.Value), 1,
218 !if(!eq(SrcVT.Value, v2f16.Value), 1, 0)
222 //===----------------------------------------------------------------------===//
223 // PatFrags for global memory operations
224 //===----------------------------------------------------------------------===//
226 defm atomic_inc_global : global_binary_atomic_op<SIatomic_inc>;
227 defm atomic_dec_global : global_binary_atomic_op<SIatomic_dec>;
229 def atomic_inc_local : local_binary_atomic_op<SIatomic_inc>;
230 def atomic_dec_local : local_binary_atomic_op<SIatomic_dec>;
231 def atomic_load_fadd_local : local_binary_atomic_op<SIatomic_fadd>;
232 def atomic_load_fmin_local : local_binary_atomic_op<SIatomic_fmin>;
233 def atomic_load_fmax_local : local_binary_atomic_op<SIatomic_fmax>;
235 //===----------------------------------------------------------------------===//
236 // SDNodes PatFrags for loads/stores with a glue input.
237 // This is for SDNodes and PatFrag for local loads and stores to
238 // enable s_mov_b32 m0, -1 to be glued to the memory instructions.
240 // These mirror the regular load/store PatFrags and rely on special
241 // processing during Select() to add the glued copy.
243 //===----------------------------------------------------------------------===//
245 def AMDGPUld_glue : SDNode <"ISD::LOAD", SDTLoad,
246 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
249 def AMDGPUatomic_ld_glue : SDNode <"ISD::ATOMIC_LOAD", SDTAtomicLoad,
250 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
253 def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr), [{
254 return cast<LoadSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
257 def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr), [{
258 return cast<LoadSDNode>(N)->getExtensionType() == ISD::NON_EXTLOAD;
261 def atomic_load_32_glue : PatFrag<(ops node:$ptr),
262 (AMDGPUatomic_ld_glue node:$ptr)> {
267 def atomic_load_64_glue : PatFrag<(ops node:$ptr),
268 (AMDGPUatomic_ld_glue node:$ptr)> {
273 def extload_glue : PatFrag<(ops node:$ptr), (load_glue node:$ptr), [{
274 return cast<LoadSDNode>(N)->getExtensionType() == ISD::EXTLOAD;
277 def sextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr), [{
278 return cast<LoadSDNode>(N)->getExtensionType() == ISD::SEXTLOAD;
281 def zextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr), [{
282 return cast<LoadSDNode>(N)->getExtensionType() == ISD::ZEXTLOAD;
285 def az_extload_glue : AZExtLoadBase <unindexedload_glue>;
287 def az_extloadi8_glue : PatFrag<(ops node:$ptr), (az_extload_glue node:$ptr), [{
288 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
291 def az_extloadi16_glue : PatFrag<(ops node:$ptr), (az_extload_glue node:$ptr), [{
292 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
295 def sextloadi8_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr), [{
296 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i8;
299 def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr), [{
300 return cast<LoadSDNode>(N)->getMemoryVT() == MVT::i16;
303 def load_glue_align8 : Aligned8Bytes <
304 (ops node:$ptr), (load_glue node:$ptr)
306 def load_glue_align16 : Aligned16Bytes <
307 (ops node:$ptr), (load_glue node:$ptr)
311 def load_local_m0 : LoadFrag<load_glue>, LocalAddress;
312 def sextloadi8_local_m0 : LoadFrag<sextloadi8_glue>, LocalAddress;
313 def sextloadi16_local_m0 : LoadFrag<sextloadi16_glue>, LocalAddress;
314 def az_extloadi8_local_m0 : LoadFrag<az_extloadi8_glue>, LocalAddress;
315 def az_extloadi16_local_m0 : LoadFrag<az_extloadi16_glue>, LocalAddress;
316 def load_align8_local_m0 : LoadFrag <load_glue_align8>, LocalAddress;
317 def load_align16_local_m0 : LoadFrag <load_glue_align16>, LocalAddress;
318 def atomic_load_32_local_m0 : LoadFrag<atomic_load_32_glue>, LocalAddress;
319 def atomic_load_64_local_m0 : LoadFrag<atomic_load_64_glue>, LocalAddress;
322 def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore,
323 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
326 def AMDGPUatomic_st_glue : SDNode <"ISD::ATOMIC_STORE", SDTAtomicStore,
327 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
330 def atomic_store_glue : PatFrag<(ops node:$ptr, node:$val),
331 (AMDGPUatomic_st_glue node:$ptr, node:$val)> {
334 def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr),
335 (AMDGPUst_glue node:$val, node:$ptr), [{
336 return cast<StoreSDNode>(N)->getAddressingMode() == ISD::UNINDEXED;
339 def store_glue : PatFrag<(ops node:$val, node:$ptr),
340 (unindexedstore_glue node:$val, node:$ptr), [{
341 return !cast<StoreSDNode>(N)->isTruncatingStore();
344 def truncstore_glue : PatFrag<(ops node:$val, node:$ptr),
345 (unindexedstore_glue node:$val, node:$ptr), [{
346 return cast<StoreSDNode>(N)->isTruncatingStore();
349 def truncstorei8_glue : PatFrag<(ops node:$val, node:$ptr),
350 (truncstore_glue node:$val, node:$ptr), [{
351 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i8;
354 def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr),
355 (truncstore_glue node:$val, node:$ptr), [{
356 return cast<StoreSDNode>(N)->getMemoryVT() == MVT::i16;
359 def store_glue_align8 : Aligned8Bytes <
360 (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
363 def store_glue_align16 : Aligned16Bytes <
364 (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
367 def store_local_m0 : StoreFrag<store_glue>, LocalAddress;
368 def truncstorei8_local_m0 : StoreFrag<truncstorei8_glue>, LocalAddress;
369 def truncstorei16_local_m0 : StoreFrag<truncstorei16_glue>, LocalAddress;
370 def atomic_store_local_m0 : StoreFrag<AMDGPUatomic_st_glue>, LocalAddress;
372 def store_align8_local_m0 : StoreFrag<store_glue_align8>, LocalAddress;
373 def store_align16_local_m0 : StoreFrag<store_glue_align16>, LocalAddress;
375 def si_setcc_uniform : PatFrag <
376 (ops node:$lhs, node:$rhs, node:$cond),
377 (setcc node:$lhs, node:$rhs, node:$cond), [{
378 for (SDNode *Use : N->uses()) {
379 if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
382 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
383 if (Reg != AMDGPU::SCC)
389 def lshr_rev : PatFrag <
390 (ops node:$src1, node:$src0),
394 def ashr_rev : PatFrag <
395 (ops node:$src1, node:$src0),
399 def lshl_rev : PatFrag <
400 (ops node:$src1, node:$src0),
404 multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0,
405 SDTypeProfile tc = SDTAtomic2> {
408 !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, tc,
409 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
412 def _local_m0 : local_binary_atomic_op <!cast<SDNode>(NAME#"_glue")>;
415 defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
416 defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
417 defm atomic_inc : SIAtomicM0Glue2 <"INC", 1>;
418 defm atomic_dec : SIAtomicM0Glue2 <"DEC", 1>;
419 defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
420 defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
421 defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
422 defm atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
423 defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
424 defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
425 defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
426 defm atomic_swap : SIAtomicM0Glue2 <"SWAP">;
427 defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 1, SDTAtomic2_f32>;
428 defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32>;
429 defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32>;
431 def atomic_cmp_swap_glue : SDNode <"ISD::ATOMIC_CMP_SWAP", SDTAtomic3,
432 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
435 def atomic_cmp_swap_local_m0 : AtomicCmpSwapLocal<atomic_cmp_swap_glue>;
438 def as_i1imm : SDNodeXForm<imm, [{
439 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
442 def as_i8imm : SDNodeXForm<imm, [{
443 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
446 def as_i16imm : SDNodeXForm<imm, [{
447 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
450 def as_i32imm: SDNodeXForm<imm, [{
451 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
454 def as_i64imm: SDNodeXForm<imm, [{
455 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
458 def cond_as_i32imm: SDNodeXForm<cond, [{
459 return CurDAG->getTargetConstant(N->get(), SDLoc(N), MVT::i32);
462 // Copied from the AArch64 backend:
463 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
464 return CurDAG->getTargetConstant(
465 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
468 def frameindex_to_targetframeindex : SDNodeXForm<frameindex, [{
469 auto FI = cast<FrameIndexSDNode>(N);
470 return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
473 // Copied from the AArch64 backend:
474 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
475 return CurDAG->getTargetConstant(
476 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
479 class bitextract_imm<int bitnum> : SDNodeXForm<imm, [{
480 uint64_t Imm = N->getZExtValue();
481 unsigned Bit = (Imm >> }] # bitnum # [{ ) & 1;
482 return CurDAG->getTargetConstant(Bit, SDLoc(N), MVT::i1);
485 def SIMM16bit : PatLeaf <(imm),
486 [{return isInt<16>(N->getSExtValue());}]
489 class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
490 return isInlineImmediate(N);
493 class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
494 return isInlineImmediate(N);
497 class VGPRImm <dag frag> : PatLeaf<frag, [{
501 def NegateImm : SDNodeXForm<imm, [{
502 return CurDAG->getConstant(-N->getSExtValue(), SDLoc(N), MVT::i32);
505 // TODO: When FP inline imm values work?
506 def NegSubInlineConst32 : ImmLeaf<i32, [{
507 return Imm < -16 && Imm >= -64;
510 def NegSubInlineConst16 : ImmLeaf<i16, [{
511 return Imm < -16 && Imm >= -64;
514 def ShiftAmt32Imm : PatLeaf <(imm), [{
515 return N->getZExtValue() < 32;
518 //===----------------------------------------------------------------------===//
520 //===----------------------------------------------------------------------===//
522 def SoppBrTarget : AsmOperandClass {
523 let Name = "SoppBrTarget";
524 let ParserMethod = "parseSOppBrTarget";
527 def sopp_brtarget : Operand<OtherVT> {
528 let EncoderMethod = "getSOPPBrEncoding";
529 let DecoderMethod = "decodeSoppBrTarget";
530 let OperandType = "OPERAND_PCREL";
531 let ParserMatchClass = SoppBrTarget;
534 def si_ga : Operand<iPTR>;
536 def InterpSlotMatchClass : AsmOperandClass {
537 let Name = "InterpSlot";
538 let PredicateMethod = "isInterpSlot";
539 let ParserMethod = "parseInterpSlot";
540 let RenderMethod = "addImmOperands";
543 def InterpSlot : Operand<i32> {
544 let PrintMethod = "printInterpSlot";
545 let ParserMatchClass = InterpSlotMatchClass;
546 let OperandType = "OPERAND_IMMEDIATE";
549 def AttrMatchClass : AsmOperandClass {
551 let PredicateMethod = "isInterpAttr";
552 let ParserMethod = "parseInterpAttr";
553 let RenderMethod = "addImmOperands";
556 // It appears to be necessary to create a separate operand for this to
557 // be able to parse attr<num> with no space.
558 def Attr : Operand<i32> {
559 let PrintMethod = "printInterpAttr";
560 let ParserMatchClass = AttrMatchClass;
561 let OperandType = "OPERAND_IMMEDIATE";
564 def AttrChanMatchClass : AsmOperandClass {
565 let Name = "AttrChan";
566 let PredicateMethod = "isAttrChan";
567 let RenderMethod = "addImmOperands";
570 def AttrChan : Operand<i32> {
571 let PrintMethod = "printInterpAttrChan";
572 let ParserMatchClass = AttrChanMatchClass;
573 let OperandType = "OPERAND_IMMEDIATE";
576 def SendMsgMatchClass : AsmOperandClass {
577 let Name = "SendMsg";
578 let PredicateMethod = "isSendMsg";
579 let ParserMethod = "parseSendMsgOp";
580 let RenderMethod = "addImmOperands";
583 def SwizzleMatchClass : AsmOperandClass {
584 let Name = "Swizzle";
585 let PredicateMethod = "isSwizzle";
586 let ParserMethod = "parseSwizzleOp";
587 let RenderMethod = "addImmOperands";
591 def ExpTgtMatchClass : AsmOperandClass {
593 let PredicateMethod = "isExpTgt";
594 let ParserMethod = "parseExpTgt";
595 let RenderMethod = "printExpTgt";
598 def SendMsgImm : Operand<i32> {
599 let PrintMethod = "printSendMsg";
600 let ParserMatchClass = SendMsgMatchClass;
603 def SwizzleImm : Operand<i16> {
604 let PrintMethod = "printSwizzle";
605 let ParserMatchClass = SwizzleMatchClass;
608 def SWaitMatchClass : AsmOperandClass {
609 let Name = "SWaitCnt";
610 let RenderMethod = "addImmOperands";
611 let ParserMethod = "parseSWaitCntOps";
614 def VReg32OrOffClass : AsmOperandClass {
615 let Name = "VReg32OrOff";
616 let ParserMethod = "parseVReg32OrOff";
619 def WAIT_FLAG : Operand <i32> {
620 let ParserMatchClass = SWaitMatchClass;
621 let PrintMethod = "printWaitFlag";
624 include "SIInstrFormats.td"
625 include "VIInstrFormats.td"
627 // ===----------------------------------------------------------------------===//
628 // ExpSrc* Special cases for exp src operands which are printed as
629 // "off" depending on en operand.
630 // ===----------------------------------------------------------------------===//
632 def ExpSrc0 : RegisterOperand<VGPR_32> {
633 let PrintMethod = "printExpSrc0";
634 let ParserMatchClass = VReg32OrOffClass;
637 def ExpSrc1 : RegisterOperand<VGPR_32> {
638 let PrintMethod = "printExpSrc1";
639 let ParserMatchClass = VReg32OrOffClass;
642 def ExpSrc2 : RegisterOperand<VGPR_32> {
643 let PrintMethod = "printExpSrc2";
644 let ParserMatchClass = VReg32OrOffClass;
647 def ExpSrc3 : RegisterOperand<VGPR_32> {
648 let PrintMethod = "printExpSrc3";
649 let ParserMatchClass = VReg32OrOffClass;
652 class SDWASrc<ValueType vt> : RegisterOperand<VS_32> {
653 let OperandNamespace = "AMDGPU";
654 string Type = !if(isFloatType<vt>.ret, "FP", "INT");
655 let OperandType = "OPERAND_REG_INLINE_C_"#Type#vt.Size;
656 let DecoderMethod = "decodeSDWASrc"#vt.Size;
657 let EncoderMethod = "getSDWASrcEncoding";
660 def SDWASrc_i32 : SDWASrc<i32>;
661 def SDWASrc_i16 : SDWASrc<i16>;
662 def SDWASrc_f32 : SDWASrc<f32>;
663 def SDWASrc_f16 : SDWASrc<f16>;
665 def SDWAVopcDst : VOPDstOperand<SReg_64> {
666 let OperandNamespace = "AMDGPU";
667 let OperandType = "OPERAND_SDWA_VOPC_DST";
668 let EncoderMethod = "getSDWAVopcDstEncoding";
669 let DecoderMethod = "decodeSDWAVopcDst";
672 class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
673 let Name = "Imm"#CName;
674 let PredicateMethod = "is"#CName;
675 let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
676 let RenderMethod = "addImmOperands";
677 let IsOptional = Optional;
678 let DefaultMethod = !if(Optional, "default"#CName, ?);
681 class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> {
682 let PrintMethod = "print"#Name;
683 let ParserMatchClass = MatchClass;
686 class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> {
687 let PrintMethod = "print"#Name;
688 let ParserMatchClass = MatchClass;
691 class NamedOperandU12<string Name, AsmOperandClass MatchClass> : Operand<i16> {
692 let PrintMethod = "print"#Name;
693 let ParserMatchClass = MatchClass;
696 class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> {
697 let PrintMethod = "print"#Name;
698 let ParserMatchClass = MatchClass;
701 class NamedOperandS13<string Name, AsmOperandClass MatchClass> : Operand<i16> {
702 let PrintMethod = "print"#Name;
703 let ParserMatchClass = MatchClass;
706 class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> {
707 let PrintMethod = "print"#Name;
708 let ParserMatchClass = MatchClass;
711 class NamedOperandU32Default0<string Name, AsmOperandClass MatchClass> :
712 OperandWithDefaultOps<i32, (ops (i32 0))> {
713 let PrintMethod = "print"#Name;
714 let ParserMatchClass = MatchClass;
717 let OperandType = "OPERAND_IMMEDIATE" in {
719 def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>;
720 def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>;
721 def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>;
723 def offset_u12 : NamedOperandU12<"Offset", NamedMatchClass<"OffsetU12">>;
724 def offset_s13 : NamedOperandS13<"OffsetS13", NamedMatchClass<"OffsetS13">>;
725 def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>;
726 def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
727 def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
729 def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>;
731 def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>;
732 def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>;
733 def highmod : NamedOperandBit<"High", NamedMatchClass<"High">>;
735 def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>;
736 def SLC : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>;
737 def TFE : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>;
738 def UNorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>;
739 def DA : NamedOperandBit<"DA", NamedMatchClass<"DA">>;
740 def R128A16 : NamedOperandBit<"R128A16", NamedMatchClass<"R128A16">>;
741 def D16 : NamedOperandBit<"D16", NamedMatchClass<"D16">>;
742 def LWE : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>;
743 def exp_compr : NamedOperandBit<"ExpCompr", NamedMatchClass<"ExpCompr">>;
744 def exp_vm : NamedOperandBit<"ExpVM", NamedMatchClass<"ExpVM">>;
746 def FORMAT : NamedOperandU8<"FORMAT", NamedMatchClass<"FORMAT">>;
748 def DMask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>;
750 def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>;
751 def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>;
752 def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>;
753 def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>;
755 def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>;
756 def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>;
757 def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>;
758 def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
760 def op_sel : NamedOperandU32Default0<"OpSel", NamedMatchClass<"OpSel">>;
761 def op_sel_hi : NamedOperandU32Default0<"OpSelHi", NamedMatchClass<"OpSelHi">>;
762 def neg_lo : NamedOperandU32Default0<"NegLo", NamedMatchClass<"NegLo">>;
763 def neg_hi : NamedOperandU32Default0<"NegHi", NamedMatchClass<"NegHi">>;
765 def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
767 def exp_tgt : NamedOperandU8<"ExpTgt", NamedMatchClass<"ExpTgt", 0>> {
771 } // End OperandType = "OPERAND_IMMEDIATE"
773 class KImmMatchClass<int size> : AsmOperandClass {
774 let Name = "KImmFP"#size;
775 let PredicateMethod = "isKImmFP"#size;
776 let ParserMethod = "parseImm";
777 let RenderMethod = "addKImmFP"#size#"Operands";
780 class kimmOperand<ValueType vt> : Operand<vt> {
781 let OperandNamespace = "AMDGPU";
782 let OperandType = "OPERAND_KIMM"#vt.Size;
783 let PrintMethod = "printU"#vt.Size#"ImmOperand";
784 let ParserMatchClass = !cast<AsmOperandClass>("KImmFP"#vt.Size#"MatchClass");
787 // 32-bit VALU immediate operand that uses the constant bus.
788 def KImmFP32MatchClass : KImmMatchClass<32>;
789 def f32kimm : kimmOperand<i32>;
791 // 32-bit VALU immediate operand with a 16-bit value that uses the
793 def KImmFP16MatchClass : KImmMatchClass<16>;
794 def f16kimm : kimmOperand<i16>;
797 def VOPDstS64 : VOPDstOperand <SReg_64>;
799 class FPInputModsMatchClass <int opSize> : AsmOperandClass {
800 let Name = "RegOrImmWithFP"#opSize#"InputMods";
801 let ParserMethod = "parseRegOrImmWithFPInputMods";
802 let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods";
805 def FP16InputModsMatchClass : FPInputModsMatchClass<16>;
806 def FP32InputModsMatchClass : FPInputModsMatchClass<32>;
807 def FP64InputModsMatchClass : FPInputModsMatchClass<64>;
809 class InputMods <AsmOperandClass matchClass> : Operand <i32> {
810 let OperandNamespace = "AMDGPU";
811 let OperandType = "OPERAND_INPUT_MODS";
812 let ParserMatchClass = matchClass;
815 class FPInputMods <FPInputModsMatchClass matchClass> : InputMods <matchClass> {
816 let PrintMethod = "printOperandAndFPInputMods";
819 def FP16InputMods : FPInputMods<FP16InputModsMatchClass>;
820 def FP32InputMods : FPInputMods<FP32InputModsMatchClass>;
821 def FP64InputMods : FPInputMods<FP64InputModsMatchClass>;
823 class IntInputModsMatchClass <int opSize> : AsmOperandClass {
824 let Name = "RegOrImmWithInt"#opSize#"InputMods";
825 let ParserMethod = "parseRegOrImmWithIntInputMods";
826 let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods";
828 def Int32InputModsMatchClass : IntInputModsMatchClass<32>;
829 def Int64InputModsMatchClass : IntInputModsMatchClass<64>;
831 class IntInputMods <IntInputModsMatchClass matchClass> : InputMods <matchClass> {
832 let PrintMethod = "printOperandAndIntInputMods";
834 def Int32InputMods : IntInputMods<Int32InputModsMatchClass>;
835 def Int64InputMods : IntInputMods<Int64InputModsMatchClass>;
837 class OpSelModsMatchClass : AsmOperandClass {
838 let Name = "OpSelMods";
839 let ParserMethod = "parseRegOrImm";
840 let PredicateMethod = "isRegOrImm";
843 def IntOpSelModsMatchClass : OpSelModsMatchClass;
844 def IntOpSelMods : InputMods<IntOpSelModsMatchClass>;
846 class FPSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
847 let Name = "SDWAWithFP"#opSize#"InputMods";
848 let ParserMethod = "parseRegOrImmWithFPInputMods";
849 let PredicateMethod = "isSDWAFP"#opSize#"Operand";
852 def FP16SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<16>;
853 def FP32SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<32>;
855 class FPSDWAInputMods <FPSDWAInputModsMatchClass matchClass> :
856 InputMods <matchClass> {
857 let PrintMethod = "printOperandAndFPInputMods";
860 def FP16SDWAInputMods : FPSDWAInputMods<FP16SDWAInputModsMatchClass>;
861 def FP32SDWAInputMods : FPSDWAInputMods<FP32SDWAInputModsMatchClass>;
863 def FPVRegInputModsMatchClass : AsmOperandClass {
864 let Name = "VRegWithFPInputMods";
865 let ParserMethod = "parseRegWithFPInputMods";
866 let PredicateMethod = "isVReg";
869 def FPVRegInputMods : InputMods <FPVRegInputModsMatchClass> {
870 let PrintMethod = "printOperandAndFPInputMods";
873 class IntSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
874 let Name = "SDWAWithInt"#opSize#"InputMods";
875 let ParserMethod = "parseRegOrImmWithIntInputMods";
876 let PredicateMethod = "isSDWAInt"#opSize#"Operand";
879 def Int16SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<16>;
880 def Int32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32>;
882 class IntSDWAInputMods <IntSDWAInputModsMatchClass matchClass> :
883 InputMods <matchClass> {
884 let PrintMethod = "printOperandAndIntInputMods";
887 def Int16SDWAInputMods : IntSDWAInputMods<Int16SDWAInputModsMatchClass>;
888 def Int32SDWAInputMods : IntSDWAInputMods<Int32SDWAInputModsMatchClass>;
890 def IntVRegInputModsMatchClass : AsmOperandClass {
891 let Name = "VRegWithIntInputMods";
892 let ParserMethod = "parseRegWithIntInputMods";
893 let PredicateMethod = "isVReg";
896 def IntVRegInputMods : InputMods <IntVRegInputModsMatchClass> {
897 let PrintMethod = "printOperandAndIntInputMods";
900 class PackedFPInputModsMatchClass <int opSize> : AsmOperandClass {
901 let Name = "PackedFP"#opSize#"InputMods";
902 let ParserMethod = "parseRegOrImm";
903 let PredicateMethod = "isRegOrImm";
904 // let PredicateMethod = "isPackedFP"#opSize#"InputMods";
907 class PackedIntInputModsMatchClass <int opSize> : AsmOperandClass {
908 let Name = "PackedInt"#opSize#"InputMods";
909 let ParserMethod = "parseRegOrImm";
910 let PredicateMethod = "isRegOrImm";
911 // let PredicateMethod = "isPackedInt"#opSize#"InputMods";
914 def PackedF16InputModsMatchClass : PackedFPInputModsMatchClass<16>;
915 def PackedI16InputModsMatchClass : PackedIntInputModsMatchClass<16>;
917 class PackedFPInputMods <PackedFPInputModsMatchClass matchClass> : InputMods <matchClass> {
918 // let PrintMethod = "printPackedFPInputMods";
921 class PackedIntInputMods <PackedIntInputModsMatchClass matchClass> : InputMods <matchClass> {
922 //let PrintMethod = "printPackedIntInputMods";
925 def PackedF16InputMods : PackedFPInputMods<PackedF16InputModsMatchClass>;
926 def PackedI16InputMods : PackedIntInputMods<PackedI16InputModsMatchClass>;
928 //===----------------------------------------------------------------------===//
930 //===----------------------------------------------------------------------===//
932 def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
933 def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
935 def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">;
937 def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
938 def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
939 def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
940 def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
941 def VOP3NoMods : ComplexPattern<untyped, 1, "SelectVOP3NoMods">;
942 // VOP3Mods, but the input source is known to never be NaN.
943 def VOP3Mods_nnan : ComplexPattern<fAny, 2, "SelectVOP3Mods_NNaN">;
945 def VOP3OMods : ComplexPattern<untyped, 3, "SelectVOP3OMods">;
947 def VOP3PMods : ComplexPattern<untyped, 2, "SelectVOP3PMods">;
948 def VOP3PMods0 : ComplexPattern<untyped, 3, "SelectVOP3PMods0">;
950 def VOP3OpSel : ComplexPattern<untyped, 2, "SelectVOP3OpSel">;
951 def VOP3OpSel0 : ComplexPattern<untyped, 3, "SelectVOP3OpSel0">;
953 def VOP3OpSelMods : ComplexPattern<untyped, 2, "SelectVOP3OpSelMods">;
954 def VOP3OpSelMods0 : ComplexPattern<untyped, 3, "SelectVOP3OpSelMods0">;
956 def VOP3PMadMixMods : ComplexPattern<untyped, 2, "SelectVOP3PMadMixMods">;
959 def Hi16Elt : ComplexPattern<untyped, 1, "SelectHi16Elt">;
961 //===----------------------------------------------------------------------===//
962 // SI assembler operands
963 //===----------------------------------------------------------------------===//
971 // This should be kept in sync with SISrcMods enum
995 int LLVM_DEBUG_TRAP = 3;
998 //===----------------------------------------------------------------------===//
1000 // SI Instruction multiclass helpers.
1002 // Instructions with _32 take 32-bit operands.
1003 // Instructions with _64 take 64-bit operands.
1005 // VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit
1006 // encoding is the standard encoding, but instruction that make use of
1007 // any of the instruction modifiers must use the 64-bit encoding.
1009 // Instructions with _e32 use the 32-bit encoding.
1010 // Instructions with _e64 use the 64-bit encoding.
1012 //===----------------------------------------------------------------------===//
1014 class SIMCInstr <string pseudo, int subtarget> {
1015 string PseudoInstr = pseudo;
1016 int Subtarget = subtarget;
1019 //===----------------------------------------------------------------------===//
1021 //===----------------------------------------------------------------------===//
1023 class EXP_Helper<bit done, SDPatternOperator node = null_frag> : EXPCommon<
1026 ExpSrc0:$src0, ExpSrc1:$src1, ExpSrc2:$src2, ExpSrc3:$src3,
1027 exp_vm:$vm, exp_compr:$compr, i8imm:$en),
1028 "exp$tgt $src0, $src1, $src2, $src3"#!if(done, " done", "")#"$compr$vm",
1029 [(node (i8 timm:$tgt), (i8 timm:$en),
1030 f32:$src0, f32:$src1, f32:$src2, f32:$src3,
1031 (i1 timm:$compr), (i1 timm:$vm))]> {
1032 let AsmMatchConverter = "cvtExp";
1035 // Split EXP instruction into EXP and EXP_DONE so we can set
1036 // mayLoad for done=1.
1037 multiclass EXP_m<bit done, SDPatternOperator node> {
1038 let mayLoad = done, DisableWQM = 1 in {
1039 let isPseudo = 1, isCodeGenOnly = 1 in {
1040 def "" : EXP_Helper<done, node>,
1041 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.NONE>;
1044 let done = done in {
1045 def _si : EXP_Helper<done>,
1046 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.SI>,
1048 let AssemblerPredicates = [isSICI];
1049 let DecoderNamespace = "SICI";
1050 let DisableDecoder = DisableSIDecoder;
1053 def _vi : EXP_Helper<done>,
1054 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.VI>,
1056 let AssemblerPredicates = [isVI];
1057 let DecoderNamespace = "VI";
1058 let DisableDecoder = DisableVIDecoder;
1064 //===----------------------------------------------------------------------===//
1065 // Vector ALU classes
1066 //===----------------------------------------------------------------------===//
1068 class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> {
1070 !if (!eq(Src0.Value, untyped.Value), 0,
1071 !if (!eq(Src1.Value, untyped.Value), 1, // VOP1
1072 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2
1076 // Returns the register class to use for the destination of VOP[123C]
1077 // instructions for the given VT.
1078 class getVALUDstForVT<ValueType VT> {
1079 RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>,
1080 !if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>,
1081 !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
1082 !if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>,
1083 VOPDstOperand<SReg_64>)))); // else VT == i1
1086 // Returns the register class to use for the destination of VOP[12C]
1087 // instructions with SDWA extension
1088 class getSDWADstForVT<ValueType VT> {
1089 RegisterOperand ret = !if(!eq(VT.Size, 1),
1090 SDWAVopcDst, // VOPC
1091 VOPDstOperand<VGPR_32>); // VOP1/2 32-bit dst
1094 // Returns the register class to use for source 0 of VOP[12C]
1095 // instructions for the given VT.
1096 class getVOPSrc0ForVT<ValueType VT> {
1097 bit isFP = !if(!eq(VT.Value, f16.Value), 1,
1098 !if(!eq(VT.Value, v2f16.Value), 1,
1099 !if(!eq(VT.Value, f32.Value), 1,
1100 !if(!eq(VT.Value, f64.Value), 1,
1103 RegisterOperand ret =
1105 !if(!eq(VT.Size, 64),
1107 !if(!eq(VT.Value, f16.Value),
1109 !if(!eq(VT.Value, v2f16.Value),
1115 !if(!eq(VT.Size, 64),
1117 !if(!eq(VT.Value, i16.Value),
1119 !if(!eq(VT.Value, v2i16.Value),
1128 // Returns the vreg register class to use for source operand given VT
1129 class getVregSrcForVT<ValueType VT> {
1130 RegisterClass ret = !if(!eq(VT.Size, 128), VReg_128,
1131 !if(!eq(VT.Size, 64), VReg_64, VGPR_32));
1134 class getSDWASrcForVT <ValueType VT> {
1135 bit isFP = !if(!eq(VT.Value, f16.Value), 1,
1136 !if(!eq(VT.Value, f32.Value), 1,
1138 RegisterOperand retFlt = !if(!eq(VT.Size, 16), SDWASrc_f16, SDWASrc_f32);
1139 RegisterOperand retInt = !if(!eq(VT.Size, 16), SDWASrc_i16, SDWASrc_i32);
1140 RegisterOperand ret = !if(isFP, retFlt, retInt);
1143 // Returns the register class to use for sources of VOP3 instructions for the
1145 class getVOP3SrcForVT<ValueType VT> {
1146 bit isFP = !if(!eq(VT.Value, f16.Value), 1,
1147 !if(!eq(VT.Value, v2f16.Value), 1,
1148 !if(!eq(VT.Value, f32.Value), 1,
1149 !if(!eq(VT.Value, f64.Value), 1,
1151 RegisterOperand ret =
1152 !if(!eq(VT.Size, 128),
1154 !if(!eq(VT.Size, 64),
1158 !if(!eq(VT.Value, i1.Value),
1161 !if(!eq(VT.Value, f16.Value),
1163 !if(!eq(VT.Value, v2f16.Value),
1168 !if(!eq(VT.Value, i16.Value),
1170 !if(!eq(VT.Value, v2i16.Value),
1181 // Float or packed int
1182 class isModifierType<ValueType SrcVT> {
1184 !if(!eq(SrcVT.Value, f16.Value), 1,
1185 !if(!eq(SrcVT.Value, f32.Value), 1,
1186 !if(!eq(SrcVT.Value, f64.Value), 1,
1187 !if(!eq(SrcVT.Value, v2f16.Value), 1,
1188 !if(!eq(SrcVT.Value, v2i16.Value), 1,
1192 // Return type of input modifiers operand for specified input operand
1193 class getSrcMod <ValueType VT> {
1194 bit isFP = !if(!eq(VT.Value, f16.Value), 1,
1195 !if(!eq(VT.Value, f32.Value), 1,
1196 !if(!eq(VT.Value, f64.Value), 1,
1198 bit isPacked = isPackedType<VT>.ret;
1199 Operand ret = !if(!eq(VT.Size, 64),
1200 !if(isFP, FP64InputMods, Int64InputMods),
1202 !if(!eq(VT.Value, f16.Value),
1210 class getOpSelMod <ValueType VT> {
1211 Operand ret = !if(!eq(VT.Value, f16.Value), FP16InputMods, IntOpSelMods);
1214 // Return type of input modifiers operand specified input operand for DPP
1215 class getSrcModExt <ValueType VT> {
1216 bit isFP = !if(!eq(VT.Value, f16.Value), 1,
1217 !if(!eq(VT.Value, f32.Value), 1,
1218 !if(!eq(VT.Value, f64.Value), 1,
1220 Operand ret = !if(isFP, FPVRegInputMods, IntVRegInputMods);
1223 // Return type of input modifiers operand specified input operand for SDWA
1224 class getSrcModSDWA <ValueType VT> {
1225 Operand ret = !if(!eq(VT.Value, f16.Value), FP16SDWAInputMods,
1226 !if(!eq(VT.Value, f32.Value), FP32SDWAInputMods,
1227 !if(!eq(VT.Value, i16.Value), Int16SDWAInputMods,
1228 Int32SDWAInputMods)));
1231 // Returns the input arguments for VOP[12C] instructions for the given SrcVT.
1232 class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
1233 dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1
1234 !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
1238 // Returns the input arguments for VOP3 instructions for the given SrcVT.
1239 class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
1240 RegisterOperand Src2RC, int NumSrcArgs,
1241 bit HasIntClamp, bit HasModifiers, bit HasOMod,
1242 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1245 !if (!eq(NumSrcArgs, 0),
1246 // VOP1 without input operands (V_NOP, V_CLREXCP)
1249 !if (!eq(NumSrcArgs, 1),
1250 !if (!eq(HasModifiers, 1),
1251 // VOP1 with modifiers
1252 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1253 clampmod:$clamp, omod:$omod)
1255 // VOP1 without modifiers
1256 !if (!eq(HasIntClamp, 1),
1257 (ins Src0RC:$src0, clampmod:$clamp),
1260 !if (!eq(NumSrcArgs, 2),
1261 !if (!eq(HasModifiers, 1),
1262 // VOP 2 with modifiers
1263 !if( !eq(HasOMod, 1),
1264 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1265 Src1Mod:$src1_modifiers, Src1RC:$src1,
1266 clampmod:$clamp, omod:$omod),
1267 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1268 Src1Mod:$src1_modifiers, Src1RC:$src1,
1271 // VOP2 without modifiers
1272 !if (!eq(HasIntClamp, 1),
1273 (ins Src0RC:$src0, Src1RC:$src1, clampmod:$clamp),
1274 (ins Src0RC:$src0, Src1RC:$src1))
1277 /* NumSrcArgs == 3 */,
1278 !if (!eq(HasModifiers, 1),
1279 // VOP3 with modifiers
1280 !if (!eq(HasOMod, 1),
1281 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1282 Src1Mod:$src1_modifiers, Src1RC:$src1,
1283 Src2Mod:$src2_modifiers, Src2RC:$src2,
1284 clampmod:$clamp, omod:$omod),
1285 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1286 Src1Mod:$src1_modifiers, Src1RC:$src1,
1287 Src2Mod:$src2_modifiers, Src2RC:$src2,
1290 // VOP3 without modifiers
1291 !if (!eq(HasIntClamp, 1),
1292 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2, clampmod:$clamp),
1293 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2))
1297 /// XXX - src1 may only allow VGPRs?
1299 // The modifiers (except clamp) are dummy operands for the benefit of
1300 // printing and parsing. They defer their values to looking at the
1301 // srcN_modifiers for what to print.
1302 class getInsVOP3P <RegisterOperand Src0RC, RegisterOperand Src1RC,
1303 RegisterOperand Src2RC, int NumSrcArgs,
1305 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1306 dag ret = !if (!eq(NumSrcArgs, 2),
1308 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1309 Src1Mod:$src1_modifiers, Src1RC:$src1,
1311 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1312 neg_lo:$neg_lo, neg_hi:$neg_hi),
1313 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1314 Src1Mod:$src1_modifiers, Src1RC:$src1,
1315 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1316 neg_lo:$neg_lo, neg_hi:$neg_hi)),
1317 // else NumSrcArgs == 3
1319 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1320 Src1Mod:$src1_modifiers, Src1RC:$src1,
1321 Src2Mod:$src2_modifiers, Src2RC:$src2,
1323 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1324 neg_lo:$neg_lo, neg_hi:$neg_hi),
1325 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1326 Src1Mod:$src1_modifiers, Src1RC:$src1,
1327 Src2Mod:$src2_modifiers, Src2RC:$src2,
1328 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1329 neg_lo:$neg_lo, neg_hi:$neg_hi))
1333 class getInsVOP3OpSel <RegisterOperand Src0RC,
1334 RegisterOperand Src1RC,
1335 RegisterOperand Src2RC,
1341 dag ret = !if (!eq(NumSrcArgs, 2),
1343 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1344 Src1Mod:$src1_modifiers, Src1RC:$src1,
1347 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1348 Src1Mod:$src1_modifiers, Src1RC:$src1,
1350 // else NumSrcArgs == 3
1352 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1353 Src1Mod:$src1_modifiers, Src1RC:$src1,
1354 Src2Mod:$src2_modifiers, Src2RC:$src2,
1357 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1358 Src1Mod:$src1_modifiers, Src1RC:$src1,
1359 Src2Mod:$src2_modifiers, Src2RC:$src2,
1364 class getInsDPP <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1365 int NumSrcArgs, bit HasModifiers,
1366 Operand Src0Mod, Operand Src1Mod> {
1368 dag ret = !if (!eq(NumSrcArgs, 0),
1369 // VOP1 without input operands (V_NOP)
1370 (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1371 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl),
1372 !if (!eq(NumSrcArgs, 1),
1373 !if (!eq(HasModifiers, 1),
1374 // VOP1_DPP with modifiers
1375 (ins DstRC:$old, Src0Mod:$src0_modifiers,
1376 Src0RC:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1377 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1379 // VOP1_DPP without modifiers
1380 (ins DstRC:$old, Src0RC:$src0,
1381 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1382 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1384 /* NumSrcArgs == 2 */,
1385 !if (!eq(HasModifiers, 1),
1386 // VOP2_DPP with modifiers
1388 Src0Mod:$src0_modifiers, Src0RC:$src0,
1389 Src1Mod:$src1_modifiers, Src1RC:$src1,
1390 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1391 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1393 // VOP2_DPP without modifiers
1395 Src0RC:$src0, Src1RC:$src1, dpp_ctrl:$dpp_ctrl,
1396 row_mask:$row_mask, bank_mask:$bank_mask,
1397 bound_ctrl:$bound_ctrl)
1404 class getInsSDWA <RegisterOperand Src0RC, RegisterOperand Src1RC, int NumSrcArgs,
1405 bit HasSDWAOMod, Operand Src0Mod, Operand Src1Mod,
1408 dag ret = !if(!eq(NumSrcArgs, 0),
1409 // VOP1 without input operands (V_NOP)
1411 !if(!eq(NumSrcArgs, 1),
1413 !if(!eq(HasSDWAOMod, 0),
1414 // VOP1_SDWA without omod
1415 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1417 dst_sel:$dst_sel, dst_unused:$dst_unused,
1418 src0_sel:$src0_sel),
1419 // VOP1_SDWA with omod
1420 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1421 clampmod:$clamp, omod:$omod,
1422 dst_sel:$dst_sel, dst_unused:$dst_unused,
1423 src0_sel:$src0_sel)),
1424 !if(!eq(NumSrcArgs, 2),
1425 !if(!eq(DstVT.Size, 1),
1427 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1428 Src1Mod:$src1_modifiers, Src1RC:$src1,
1429 clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
1431 !if(!eq(HasSDWAOMod, 0),
1432 // VOP2_SDWA without omod
1433 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1434 Src1Mod:$src1_modifiers, Src1RC:$src1,
1436 dst_sel:$dst_sel, dst_unused:$dst_unused,
1437 src0_sel:$src0_sel, src1_sel:$src1_sel),
1438 // VOP2_SDWA with omod
1439 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1440 Src1Mod:$src1_modifiers, Src1RC:$src1,
1441 clampmod:$clamp, omod:$omod,
1442 dst_sel:$dst_sel, dst_unused:$dst_unused,
1443 src0_sel:$src0_sel, src1_sel:$src1_sel))),
1444 (ins)/* endif */)));
1447 // Outs for DPP and SDWA
1448 class getOutsExt <bit HasDst, ValueType DstVT, RegisterOperand DstRCExt> {
1449 dag ret = !if(HasDst,
1450 !if(!eq(DstVT.Size, 1),
1451 (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions
1452 (outs DstRCExt:$vdst)),
1457 class getOutsSDWA <bit HasDst, ValueType DstVT, RegisterOperand DstRCSDWA> {
1458 dag ret = !if(HasDst,
1459 !if(!eq(DstVT.Size, 1),
1460 (outs DstRCSDWA:$sdst),
1461 (outs DstRCSDWA:$vdst)),
1465 // Returns the assembly string for the inputs and outputs of a VOP[12C]
1466 // instruction. This does not add the _e32 suffix, so it can be reused
1468 class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1469 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1470 string src0 = ", $src0";
1471 string src1 = ", $src1";
1472 string src2 = ", $src2";
1473 string ret = !if(HasDst, dst, "") #
1474 !if(!eq(NumSrcArgs, 1), src0, "") #
1475 !if(!eq(NumSrcArgs, 2), src0#src1, "") #
1476 !if(!eq(NumSrcArgs, 3), src0#src1#src2, "");
1479 // Returns the assembly string for the inputs and outputs of a VOP3
1481 class getAsm64 <bit HasDst, int NumSrcArgs, bit HasIntClamp, bit HasModifiers,
1482 bit HasOMod, ValueType DstVT = i32> {
1483 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1484 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1485 string src1 = !if(!eq(NumSrcArgs, 1), "",
1486 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1487 " $src1_modifiers,"));
1488 string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1489 string iclamp = !if(HasIntClamp, "$clamp", "");
1491 !if(!eq(HasModifiers, 0),
1492 getAsm32<HasDst, NumSrcArgs, DstVT>.ret # iclamp,
1493 dst#", "#src0#src1#src2#"$clamp"#!if(HasOMod, "$omod", ""));
1496 // Returns the assembly string for the inputs and outputs of a VOP3P
1498 class getAsmVOP3P <bit HasDst, int NumSrcArgs, bit HasModifiers,
1499 bit HasClamp, ValueType DstVT = i32> {
1500 string dst = " $vdst";
1501 string src0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1502 string src1 = !if(!eq(NumSrcArgs, 1), "",
1503 !if(!eq(NumSrcArgs, 2), " $src1",
1505 string src2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1507 string mods = !if(HasModifiers, "$neg_lo$neg_hi", "");
1508 string clamp = !if(HasClamp, "$clamp", "");
1510 // Each modifier is printed as an array of bits for each operand, so
1511 // all operands are printed as part of src0_modifiers.
1512 string ret = dst#", "#src0#src1#src2#"$op_sel$op_sel_hi"#mods#clamp;
1515 class getAsmVOP3OpSel <int NumSrcArgs,
1520 string dst = " $vdst";
1522 string isrc0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1523 string isrc1 = !if(!eq(NumSrcArgs, 1), "",
1524 !if(!eq(NumSrcArgs, 2), " $src1",
1526 string isrc2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1528 string fsrc0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1529 string fsrc1 = !if(!eq(NumSrcArgs, 1), "",
1530 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1531 " $src1_modifiers,"));
1532 string fsrc2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1534 string src0 = !if(Src0HasMods, fsrc0, isrc0);
1535 string src1 = !if(Src1HasMods, fsrc1, isrc1);
1536 string src2 = !if(Src2HasMods, fsrc2, isrc2);
1538 string clamp = !if(HasClamp, "$clamp", "");
1540 string ret = dst#", "#src0#src1#src2#"$op_sel"#clamp;
1543 class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1544 string dst = !if(HasDst,
1545 !if(!eq(DstVT.Size, 1),
1548 ""); // use $sdst for VOPC
1549 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1550 string src1 = !if(!eq(NumSrcArgs, 1), "",
1551 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1552 " $src1_modifiers,"));
1553 string args = !if(!eq(HasModifiers, 0),
1554 getAsm32<0, NumSrcArgs, DstVT>.ret,
1556 string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
1559 class getAsmSDWA <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1560 string dst = !if(HasDst,
1561 !if(!eq(DstVT.Size, 1),
1562 " vcc", // use vcc token as dst for VOPC instructioins
1565 string src0 = "$src0_modifiers";
1566 string src1 = "$src1_modifiers";
1567 string args = !if(!eq(NumSrcArgs, 0),
1569 !if(!eq(NumSrcArgs, 1),
1571 ", "#src0#", "#src1#"$clamp"
1574 string sdwa = !if(!eq(NumSrcArgs, 0),
1576 !if(!eq(NumSrcArgs, 1),
1577 " $dst_sel $dst_unused $src0_sel",
1578 !if(!eq(DstVT.Size, 1),
1579 " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC
1580 " $dst_sel $dst_unused $src0_sel $src1_sel"
1584 string ret = dst#args#sdwa;
1587 class getAsmSDWA9 <bit HasDst, bit HasOMod, int NumSrcArgs,
1588 ValueType DstVT = i32> {
1589 string dst = !if(HasDst,
1590 !if(!eq(DstVT.Size, 1),
1594 string src0 = "$src0_modifiers";
1595 string src1 = "$src1_modifiers";
1596 string out_mods = !if(!eq(HasOMod, 0), "$clamp", "$clamp$omod");
1597 string args = !if(!eq(NumSrcArgs, 0), "",
1598 !if(!eq(NumSrcArgs, 1),
1603 string sdwa = !if(!eq(NumSrcArgs, 0), "",
1604 !if(!eq(NumSrcArgs, 1),
1605 out_mods#" $dst_sel $dst_unused $src0_sel",
1606 !if(!eq(DstVT.Size, 1),
1607 " $src0_sel $src1_sel", // No dst_sel, dst_unused and output modifiers for VOPC
1608 out_mods#" $dst_sel $dst_unused $src0_sel $src1_sel"
1612 string ret = dst#args#sdwa;
1616 // Function that checks if instruction supports DPP and SDWA
1617 class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
1618 ValueType Src1VT = i32> {
1619 bit ret = !if(!eq(NumSrcArgs, 3),
1620 0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3
1621 !if(!eq(DstVT.Size, 64),
1622 0, // 64-bit dst - No DPP or SDWA for 64-bit operands
1623 !if(!eq(Src0VT.Size, 64),
1625 !if(!eq(Src0VT.Size, 64),
1634 class BitOr<bit a, bit b> {
1635 bit ret = !if(a, 1, !if(b, 1, 0));
1638 class BitAnd<bit a, bit b> {
1639 bit ret = !if(a, !if(b, 1, 0), 0);
1647 class VOPProfile <list<ValueType> _ArgVT> {
1649 field list<ValueType> ArgVT = _ArgVT;
1651 field ValueType DstVT = ArgVT[0];
1652 field ValueType Src0VT = ArgVT[1];
1653 field ValueType Src1VT = ArgVT[2];
1654 field ValueType Src2VT = ArgVT[3];
1655 field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
1656 field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret;
1657 field RegisterOperand DstRCSDWA = getSDWADstForVT<DstVT>.ret;
1658 field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
1659 field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret;
1660 field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
1661 field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
1662 field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
1663 field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret;
1664 field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret;
1665 field RegisterOperand Src0SDWA = getSDWASrcForVT<Src0VT>.ret;
1666 field RegisterOperand Src1SDWA = getSDWASrcForVT<Src0VT>.ret;
1667 field Operand Src0Mod = getSrcMod<Src0VT>.ret;
1668 field Operand Src1Mod = getSrcMod<Src1VT>.ret;
1669 field Operand Src2Mod = getSrcMod<Src2VT>.ret;
1670 field Operand Src0ModDPP = getSrcModExt<Src0VT>.ret;
1671 field Operand Src1ModDPP = getSrcModExt<Src1VT>.ret;
1672 field Operand Src0ModSDWA = getSrcModSDWA<Src0VT>.ret;
1673 field Operand Src1ModSDWA = getSrcModSDWA<Src1VT>.ret;
1676 field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
1677 field bit HasDst32 = HasDst;
1678 field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case
1679 field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
1680 field bit HasSrc0 = !if(!eq(Src0VT.Value, untyped.Value), 0, 1);
1681 field bit HasSrc1 = !if(!eq(Src1VT.Value, untyped.Value), 0, 1);
1682 field bit HasSrc2 = !if(!eq(Src2VT.Value, untyped.Value), 0, 1);
1684 // TODO: Modifiers logic is somewhat adhoc here, to be refined later
1685 field bit HasModifiers = isModifierType<Src0VT>.ret;
1687 field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret;
1688 field bit HasSrc1FloatMods = isFloatType<Src1VT>.ret;
1689 field bit HasSrc2FloatMods = isFloatType<Src2VT>.ret;
1691 field bit HasSrc0IntMods = isIntType<Src0VT>.ret;
1692 field bit HasSrc1IntMods = isIntType<Src1VT>.ret;
1693 field bit HasSrc2IntMods = isIntType<Src2VT>.ret;
1695 field bit HasSrc0Mods = HasModifiers;
1696 field bit HasSrc1Mods = !if(HasModifiers, BitOr<HasSrc1FloatMods, HasSrc1IntMods>.ret, 0);
1697 field bit HasSrc2Mods = !if(HasModifiers, BitOr<HasSrc2FloatMods, HasSrc2IntMods>.ret, 0);
1699 field bit HasClamp = HasModifiers;
1700 field bit HasSDWAClamp = EmitDst;
1701 field bit HasFPClamp = BitAnd<isFloatType<DstVT>.ret, HasClamp>.ret;
1702 field bit HasIntClamp = !if(isFloatType<DstVT>.ret, 0, HasClamp);
1703 field bit HasClampLo = HasClamp;
1704 field bit HasClampHi = BitAnd<isPackedType<DstVT>.ret, HasClamp>.ret;
1705 field bit HasHigh = 0;
1707 field bit IsPacked = isPackedType<Src0VT>.ret;
1708 field bit HasOpSel = IsPacked;
1709 field bit HasOMod = !if(HasOpSel, 0, isFloatType<DstVT>.ret);
1710 field bit HasSDWAOMod = isFloatType<DstVT>.ret;
1712 field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
1713 field bit HasExtDPP = HasExt;
1714 field bit HasExtSDWA = HasExt;
1715 field bit HasExtSDWA9 = HasExt;
1716 field int NeedPatGen = PatGenMode.NoPattern;
1718 field Operand Src0PackedMod = !if(HasSrc0FloatMods, PackedF16InputMods, PackedI16InputMods);
1719 field Operand Src1PackedMod = !if(HasSrc1FloatMods, PackedF16InputMods, PackedI16InputMods);
1720 field Operand Src2PackedMod = !if(HasSrc2FloatMods, PackedF16InputMods, PackedI16InputMods);
1722 field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs));
1724 // VOP3b instructions are a special case with a second explicit
1725 // output. This is manually overridden for them.
1726 field dag Outs32 = Outs;
1727 field dag Outs64 = Outs;
1728 field dag OutsDPP = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
1729 field dag OutsSDWA = getOutsSDWA<HasDst, DstVT, DstRCSDWA>.ret;
1731 field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
1732 field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
1733 HasIntClamp, HasModifiers, HasOMod, Src0Mod, Src1Mod,
1735 field dag InsVOP3P = getInsVOP3P<Src0RC64, Src1RC64, Src2RC64,
1736 NumSrcArgs, HasClamp,
1737 Src0PackedMod, Src1PackedMod, Src2PackedMod>.ret;
1738 field dag InsVOP3OpSel = getInsVOP3OpSel<Src0RC64, Src1RC64, Src2RC64,
1741 getOpSelMod<Src0VT>.ret,
1742 getOpSelMod<Src1VT>.ret,
1743 getOpSelMod<Src2VT>.ret>.ret;
1744 field dag InsDPP = getInsDPP<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs,
1745 HasModifiers, Src0ModDPP, Src1ModDPP>.ret;
1746 field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs,
1747 HasSDWAOMod, Src0ModSDWA, Src1ModSDWA,
1751 field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
1752 field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasIntClamp, HasModifiers, HasOMod, DstVT>.ret;
1753 field string AsmVOP3P = getAsmVOP3P<HasDst, NumSrcArgs, HasModifiers, HasClamp, DstVT>.ret;
1754 field string AsmVOP3OpSel = getAsmVOP3OpSel<NumSrcArgs,
1758 HasSrc2FloatMods>.ret;
1759 field string AsmDPP = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
1760 field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, DstVT>.ret;
1761 field string AsmSDWA9 = getAsmSDWA9<HasDst, HasSDWAOMod, NumSrcArgs, DstVT>.ret;
1764 class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> {
1768 let HasExtSDWA9 = 0;
1771 class VOP_PAT_GEN <VOPProfile p, int mode=PatGenMode.Pattern> : VOPProfile <p.ArgVT> {
1772 let NeedPatGen = mode;
1775 def VOP_F16_F16 : VOPProfile <[f16, f16, untyped, untyped]>;
1776 def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>;
1777 def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
1779 def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
1780 def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
1781 def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
1782 def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
1784 def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
1785 def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
1787 def VOP_I32_I16_I16_I32 : VOPProfile <[i32, i16, i16, i32, untyped]>;
1789 def VOP_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, untyped]>;
1790 def VOP_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, untyped]>;
1791 def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>;
1793 def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>;
1794 def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>;
1795 def VOP_V2I16_F32_F32 : VOPProfile <[v2i16, f32, f32, untyped]>;
1796 def VOP_V2I16_I32_I32 : VOPProfile <[v2i16, i32, i32, untyped]>;
1798 def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>;
1800 def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
1802 def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
1803 def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>;
1804 def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>;
1805 def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>;
1806 def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>;
1807 def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>;
1808 def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>;
1809 def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>;
1810 def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>;
1811 def VOP_F16_F32 : VOPProfile <[f16, f32, untyped, untyped]>;
1812 def VOP_F32_F16 : VOPProfile <[f32, f16, untyped, untyped]>;
1814 def VOP_F32_F32_F16 : VOPProfile <[f32, f32, f16, untyped]>;
1815 def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>;
1816 def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
1817 def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
1818 def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
1819 def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
1820 def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
1821 def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
1822 def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
1823 def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
1825 def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
1826 def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
1827 def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
1829 def VOP_F16_F32_F16_F32 : VOPProfile <[f16, f32, f16, f32]>;
1830 def VOP_F32_F32_F16_F16 : VOPProfile <[f32, f32, f16, f16]>;
1831 def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
1832 def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
1833 def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
1834 def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
1835 def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>;
1836 def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>;
1837 def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>;
1839 def VOP_F32_V2F16_V2F16_F32 : VOPProfile <[f32, v2f16, v2f16, f32]>;
1840 def VOP_I32_V2I16_V2I16_I32 : VOPProfile <[i32, v2i16, v2i16, i32]>;
1842 class Commutable_REV <string revOp, bit isOrig> {
1843 string RevOp = revOp;
1844 bit IsOrig = isOrig;
1847 class AtomicNoRet <string noRetOp, bit isRet> {
1848 string NoRetOp = noRetOp;
1852 //===----------------------------------------------------------------------===//
1853 // Interpolation opcodes
1854 //===----------------------------------------------------------------------===//
1856 class VINTRPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVINTRPDst">;
1858 class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
1859 VINTRPCommon <outs, ins, "", pattern>,
1860 SIMCInstr<opName, SIEncodingFamily.NONE> {
1862 let isCodeGenOnly = 1;
1865 class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
1867 VINTRPCommon <outs, ins, asm, []>,
1869 SIMCInstr<opName, SIEncodingFamily.SI> {
1870 let AssemblerPredicate = SIAssemblerPredicate;
1871 let DecoderNamespace = "SICI";
1872 let DisableDecoder = DisableSIDecoder;
1875 class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
1877 VINTRPCommon <outs, ins, asm, []>,
1879 SIMCInstr<opName, SIEncodingFamily.VI> {
1880 let AssemblerPredicate = VIAssemblerPredicate;
1881 let DecoderNamespace = "VI";
1882 let DisableDecoder = DisableVIDecoder;
1885 multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
1886 list<dag> pattern = []> {
1887 def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>;
1889 def _si : VINTRP_Real_si <op, NAME, outs, ins, asm>;
1891 def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>;
1894 //===----------------------------------------------------------------------===//
1895 // Vector instruction mappings
1896 //===----------------------------------------------------------------------===//
1898 // Maps an opcode in e32 form to its e64 equivalent
1899 def getVOPe64 : InstrMapping {
1900 let FilterClass = "VOP";
1901 let RowFields = ["OpName"];
1902 let ColFields = ["Size", "VOP3"];
1903 let KeyCol = ["4", "0"];
1904 let ValueCols = [["8", "1"]];
1907 // Maps an opcode in e64 form to its e32 equivalent
1908 def getVOPe32 : InstrMapping {
1909 let FilterClass = "VOP";
1910 let RowFields = ["OpName"];
1911 let ColFields = ["Size", "VOP3"];
1912 let KeyCol = ["8", "1"];
1913 let ValueCols = [["4", "0"]];
1916 // Maps ordinary instructions to their SDWA counterparts
1917 def getSDWAOp : InstrMapping {
1918 let FilterClass = "VOP";
1919 let RowFields = ["OpName"];
1920 let ColFields = ["AsmVariantName"];
1921 let KeyCol = ["Default"];
1922 let ValueCols = [["SDWA"]];
1925 // Maps SDWA instructions to their ordinary counterparts
1926 def getBasicFromSDWAOp : InstrMapping {
1927 let FilterClass = "VOP";
1928 let RowFields = ["OpName"];
1929 let ColFields = ["AsmVariantName"];
1930 let KeyCol = ["SDWA"];
1931 let ValueCols = [["Default"]];
1934 // Maps an commuted opcode to its original version
1935 def getCommuteOrig : InstrMapping {
1936 let FilterClass = "Commutable_REV";
1937 let RowFields = ["RevOp"];
1938 let ColFields = ["IsOrig"];
1940 let ValueCols = [["1"]];
1943 // Maps an original opcode to its commuted version
1944 def getCommuteRev : InstrMapping {
1945 let FilterClass = "Commutable_REV";
1946 let RowFields = ["RevOp"];
1947 let ColFields = ["IsOrig"];
1949 let ValueCols = [["0"]];
1952 def getMCOpcodeGen : InstrMapping {
1953 let FilterClass = "SIMCInstr";
1954 let RowFields = ["PseudoInstr"];
1955 let ColFields = ["Subtarget"];
1956 let KeyCol = [!cast<string>(SIEncodingFamily.NONE)];
1957 let ValueCols = [[!cast<string>(SIEncodingFamily.SI)],
1958 [!cast<string>(SIEncodingFamily.VI)],
1959 [!cast<string>(SIEncodingFamily.SDWA)],
1960 [!cast<string>(SIEncodingFamily.SDWA9)],
1961 // GFX80 encoding is added to work around a multiple matching
1962 // issue for buffer instructions with unpacked d16 data. This
1963 // does not actually change the encoding, and thus may be
1965 [!cast<string>(SIEncodingFamily.GFX80)],
1966 [!cast<string>(SIEncodingFamily.GFX9)]];
1969 // Get equivalent SOPK instruction.
1970 def getSOPKOp : InstrMapping {
1971 let FilterClass = "SOPKInstTable";
1972 let RowFields = ["BaseCmpOp"];
1973 let ColFields = ["IsSOPK"];
1975 let ValueCols = [["1"]];
1978 def getAddr64Inst : InstrMapping {
1979 let FilterClass = "MUBUFAddr64Table";
1980 let RowFields = ["OpName"];
1981 let ColFields = ["IsAddr64"];
1983 let ValueCols = [["1"]];
1986 def getIfAddr64Inst : InstrMapping {
1987 let FilterClass = "MUBUFAddr64Table";
1988 let RowFields = ["OpName"];
1989 let ColFields = ["IsAddr64"];
1991 let ValueCols = [["1"]];
1994 def getMUBUFNoLdsInst : InstrMapping {
1995 let FilterClass = "MUBUFLdsTable";
1996 let RowFields = ["OpName"];
1997 let ColFields = ["IsLds"];
1999 let ValueCols = [["0"]];
2002 // Maps an atomic opcode to its version with a return value.
2003 def getAtomicRetOp : InstrMapping {
2004 let FilterClass = "AtomicNoRet";
2005 let RowFields = ["NoRetOp"];
2006 let ColFields = ["IsRet"];
2008 let ValueCols = [["1"]];
2011 // Maps an atomic opcode to its returnless version.
2012 def getAtomicNoRetOp : InstrMapping {
2013 let FilterClass = "AtomicNoRet";
2014 let RowFields = ["NoRetOp"];
2015 let ColFields = ["IsRet"];
2017 let ValueCols = [["0"]];
2020 include "SIInstructions.td"
2022 include "DSInstructions.td"
2023 include "MIMGInstructions.td"