1 //===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 def isWave32 : Predicate<"Subtarget->getWavefrontSize() == 32">,
10 AssemblerPredicate <"FeatureWavefrontSize32">;
11 def isWave64 : Predicate<"Subtarget->getWavefrontSize() == 64">,
12 AssemblerPredicate <"FeatureWavefrontSize64">;
14 def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
16 class GCNPredicateControl : PredicateControl {
17 Predicate SIAssemblerPredicate = isGFX6GFX7;
18 Predicate VIAssemblerPredicate = isGFX8GFX9;
21 // Execpt for the NONE field, this must be kept in sync with the
22 // SIEncodingFamily enum in AMDGPUInstrInfo.cpp
23 def SIEncodingFamily {
35 //===----------------------------------------------------------------------===//
37 //===----------------------------------------------------------------------===//
39 def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPUnaryOp>;
41 def SIsbuffer_load : SDNode<"AMDGPUISD::SBUFFER_LOAD",
42 SDTypeProfile<1, 4, [SDTCisVT<1, v4i32>, SDTCisVT<2, i32>, SDTCisVT<3, i1>,
44 [SDNPMayLoad, SDNPMemOperand]
47 def SIds_ordered_count : SDNode<"AMDGPUISD::DS_ORDERED_COUNT",
48 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i16>]>,
49 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain, SDNPInGlue]
52 def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2,
53 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
56 def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2,
57 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
60 def SDTAtomic2_f32 : SDTypeProfile<1, 2, [
61 SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
64 def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32,
65 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
68 def SIatomic_fmax : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMAX", SDTAtomic2_f32,
69 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
72 // load_d16_{lo|hi} ptr, tied_input
73 def SIload_d16 : SDTypeProfile<1, 2, [
79 def SDTtbuffer_load : SDTypeProfile<1, 8,
81 SDTCisVT<1, v4i32>, // rsrc
82 SDTCisVT<2, i32>, // vindex(VGPR)
83 SDTCisVT<3, i32>, // voffset(VGPR)
84 SDTCisVT<4, i32>, // soffset(SGPR)
85 SDTCisVT<5, i32>, // offset(imm)
86 SDTCisVT<6, i32>, // format(imm)
87 SDTCisVT<7, i32>, // cachecontrol(imm)
88 SDTCisVT<8, i1> // idxen(imm)
91 def SItbuffer_load : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT", SDTtbuffer_load,
92 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
93 def SItbuffer_load_d16 : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT_D16",
95 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
97 def SDTtbuffer_store : SDTypeProfile<0, 9,
99 SDTCisVT<1, v4i32>, // rsrc
100 SDTCisVT<2, i32>, // vindex(VGPR)
101 SDTCisVT<3, i32>, // voffset(VGPR)
102 SDTCisVT<4, i32>, // soffset(SGPR)
103 SDTCisVT<5, i32>, // offset(imm)
104 SDTCisVT<6, i32>, // format(imm)
105 SDTCisVT<7, i32>, // cachecontrol(imm)
106 SDTCisVT<8, i1> // idxen(imm)
109 def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store,
110 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
111 def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16",
113 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
115 def SDTBufferLoad : SDTypeProfile<1, 7,
117 SDTCisVT<1, v4i32>, // rsrc
118 SDTCisVT<2, i32>, // vindex(VGPR)
119 SDTCisVT<3, i32>, // voffset(VGPR)
120 SDTCisVT<4, i32>, // soffset(SGPR)
121 SDTCisVT<5, i32>, // offset(imm)
122 SDTCisVT<6, i32>, // cachepolicy(imm)
123 SDTCisVT<7, i1>]>; // idxen(imm)
125 def SIbuffer_load : SDNode <"AMDGPUISD::BUFFER_LOAD", SDTBufferLoad,
126 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
127 def SIbuffer_load_ubyte : SDNode <"AMDGPUISD::BUFFER_LOAD_UBYTE", SDTBufferLoad,
128 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
129 def SIbuffer_load_ushort : SDNode <"AMDGPUISD::BUFFER_LOAD_USHORT", SDTBufferLoad,
130 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
131 def SIbuffer_load_byte : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE", SDTBufferLoad,
132 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
133 def SIbuffer_load_short: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT", SDTBufferLoad,
134 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
135 def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad,
136 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
137 def SIbuffer_load_format_d16 : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_D16",
139 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
141 def SDTBufferStore : SDTypeProfile<0, 8,
143 SDTCisVT<1, v4i32>, // rsrc
144 SDTCisVT<2, i32>, // vindex(VGPR)
145 SDTCisVT<3, i32>, // voffset(VGPR)
146 SDTCisVT<4, i32>, // soffset(SGPR)
147 SDTCisVT<5, i32>, // offset(imm)
148 SDTCisVT<6, i32>, // cachepolicy(imm)
149 SDTCisVT<7, i1>]>; // idxen(imm)
151 def SIbuffer_store : SDNode <"AMDGPUISD::BUFFER_STORE", SDTBufferStore,
152 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
153 def SIbuffer_store_byte: SDNode <"AMDGPUISD::BUFFER_STORE_BYTE",
155 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
156 def SIbuffer_store_short : SDNode <"AMDGPUISD::BUFFER_STORE_SHORT",
158 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
159 def SIbuffer_store_format : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT",
161 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
162 def SIbuffer_store_format_d16 : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT_D16",
164 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
166 class SDBufferAtomic<string opcode> : SDNode <opcode,
168 [SDTCisVT<2, v4i32>, // rsrc
169 SDTCisVT<3, i32>, // vindex(VGPR)
170 SDTCisVT<4, i32>, // voffset(VGPR)
171 SDTCisVT<5, i32>, // soffset(SGPR)
172 SDTCisVT<6, i32>, // offset(imm)
173 SDTCisVT<7, i32>, // cachepolicy(imm)
174 SDTCisVT<8, i1>]>, // idxen(imm)
175 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
178 class SDBufferAtomicNoRtn<string opcode, ValueType ty> : SDNode <opcode,
180 [SDTCisVT<0, ty>, // vdata
181 SDTCisVT<1, v4i32>, // rsrc
182 SDTCisVT<2, i32>, // vindex(VGPR)
183 SDTCisVT<3, i32>, // voffset(VGPR)
184 SDTCisVT<4, i32>, // soffset(SGPR)
185 SDTCisVT<5, i32>, // offset(imm)
186 SDTCisVT<6, i32>, // cachepolicy(imm)
187 SDTCisVT<7, i1>]>, // idxen(imm)
188 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
191 def SIbuffer_atomic_swap : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SWAP">;
192 def SIbuffer_atomic_add : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_ADD">;
193 def SIbuffer_atomic_sub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SUB">;
194 def SIbuffer_atomic_smin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMIN">;
195 def SIbuffer_atomic_umin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMIN">;
196 def SIbuffer_atomic_smax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMAX">;
197 def SIbuffer_atomic_umax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMAX">;
198 def SIbuffer_atomic_and : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_AND">;
199 def SIbuffer_atomic_or : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_OR">;
200 def SIbuffer_atomic_xor : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_XOR">;
201 def SIbuffer_atomic_fadd : SDBufferAtomicNoRtn <"AMDGPUISD::BUFFER_ATOMIC_FADD", f32>;
202 def SIbuffer_atomic_pk_fadd : SDBufferAtomicNoRtn <"AMDGPUISD::BUFFER_ATOMIC_PK_FADD", v2f16>;
204 def SIbuffer_atomic_cmpswap : SDNode <"AMDGPUISD::BUFFER_ATOMIC_CMPSWAP",
206 [SDTCisVT<0, i32>, // dst
207 SDTCisVT<1, i32>, // src
208 SDTCisVT<2, i32>, // cmp
209 SDTCisVT<3, v4i32>, // rsrc
210 SDTCisVT<4, i32>, // vindex(VGPR)
211 SDTCisVT<5, i32>, // voffset(VGPR)
212 SDTCisVT<6, i32>, // soffset(SGPR)
213 SDTCisVT<7, i32>, // offset(imm)
214 SDTCisVT<8, i32>, // cachepolicy(imm)
215 SDTCisVT<9, i1>]>, // idxen(imm)
216 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
219 class SDGlobalAtomicNoRtn<string opcode, ValueType ty> : SDNode <opcode,
221 [SDTCisPtrTy<0>, // vaddr
222 SDTCisVT<1, ty>]>, // vdata
223 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
226 def SIglobal_atomic_fadd : SDGlobalAtomicNoRtn <"AMDGPUISD::ATOMIC_FADD", f32>;
227 def SIglobal_atomic_pk_fadd : SDGlobalAtomicNoRtn <"AMDGPUISD::ATOMIC_PK_FADD", v2f16>;
229 def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET",
230 SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>
233 def SIlds : SDNode<"AMDGPUISD::LDS",
234 SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>]>
237 def SIload_d16_lo : SDNode<"AMDGPUISD::LOAD_D16_LO",
239 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
242 def SIload_d16_lo_u8 : SDNode<"AMDGPUISD::LOAD_D16_LO_U8",
244 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
247 def SIload_d16_lo_i8 : SDNode<"AMDGPUISD::LOAD_D16_LO_I8",
249 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
252 def SIload_d16_hi : SDNode<"AMDGPUISD::LOAD_D16_HI",
254 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
257 def SIload_d16_hi_u8 : SDNode<"AMDGPUISD::LOAD_D16_HI_U8",
259 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
262 def SIload_d16_hi_i8 : SDNode<"AMDGPUISD::LOAD_D16_HI_I8",
264 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
267 //===----------------------------------------------------------------------===//
269 //===----------------------------------------------------------------------===//
271 // Returns 1 if the source arguments have modifiers, 0 if they do not.
272 // XXX - do f16 instructions?
273 class isFloatType<ValueType SrcVT> {
275 !if(!eq(SrcVT.Value, f16.Value), 1,
276 !if(!eq(SrcVT.Value, f32.Value), 1,
277 !if(!eq(SrcVT.Value, f64.Value), 1,
278 !if(!eq(SrcVT.Value, v2f16.Value), 1,
279 !if(!eq(SrcVT.Value, v4f16.Value), 1,
283 class isIntType<ValueType SrcVT> {
285 !if(!eq(SrcVT.Value, i16.Value), 1,
286 !if(!eq(SrcVT.Value, i32.Value), 1,
287 !if(!eq(SrcVT.Value, i64.Value), 1,
291 class isPackedType<ValueType SrcVT> {
293 !if(!eq(SrcVT.Value, v2i16.Value), 1,
294 !if(!eq(SrcVT.Value, v2f16.Value), 1,
295 !if(!eq(SrcVT.Value, v4f16.Value), 1, 0)
299 //===----------------------------------------------------------------------===//
300 // PatFrags for global memory operations
301 //===----------------------------------------------------------------------===//
303 defm atomic_inc_global : global_binary_atomic_op<SIatomic_inc>;
304 defm atomic_dec_global : global_binary_atomic_op<SIatomic_dec>;
306 def atomic_inc_local : local_binary_atomic_op<SIatomic_inc>;
307 def atomic_dec_local : local_binary_atomic_op<SIatomic_dec>;
308 def atomic_load_fadd_local : local_binary_atomic_op<atomic_load_fadd>;
309 def atomic_load_fmin_local : local_binary_atomic_op<SIatomic_fmin>;
310 def atomic_load_fmax_local : local_binary_atomic_op<SIatomic_fmax>;
312 //===----------------------------------------------------------------------===//
313 // SDNodes PatFrags for loads/stores with a glue input.
314 // This is for SDNodes and PatFrag for local loads and stores to
315 // enable s_mov_b32 m0, -1 to be glued to the memory instructions.
317 // These mirror the regular load/store PatFrags and rely on special
318 // processing during Select() to add the glued copy.
320 //===----------------------------------------------------------------------===//
322 def AMDGPUld_glue : SDNode <"ISD::LOAD", SDTLoad,
323 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
326 def AMDGPUatomic_ld_glue : SDNode <"ISD::ATOMIC_LOAD", SDTAtomicLoad,
327 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
330 def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> {
334 def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> {
335 let IsNonExtLoad = 1;
338 def atomic_load_32_glue : PatFrag<(ops node:$ptr),
339 (AMDGPUatomic_ld_glue node:$ptr)> {
344 def atomic_load_64_glue : PatFrag<(ops node:$ptr),
345 (AMDGPUatomic_ld_glue node:$ptr)> {
350 def extload_glue : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> {
352 let IsAnyExtLoad = 1;
355 def sextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
357 let IsSignExtLoad = 1;
360 def zextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
362 let IsZeroExtLoad = 1;
365 def extloadi8_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> {
370 def zextloadi8_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> {
375 def extloadi16_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> {
380 def zextloadi16_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> {
385 def sextloadi8_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> {
390 def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> {
395 def load_glue_align8 : Aligned8Bytes <
396 (ops node:$ptr), (load_glue node:$ptr)
398 def load_glue_align16 : Aligned16Bytes <
399 (ops node:$ptr), (load_glue node:$ptr)
403 def load_local_m0 : LoadFrag<load_glue>, LocalAddress;
404 def sextloadi8_local_m0 : LoadFrag<sextloadi8_glue>, LocalAddress;
405 def sextloadi16_local_m0 : LoadFrag<sextloadi16_glue>, LocalAddress;
406 def extloadi8_local_m0 : LoadFrag<extloadi8_glue>, LocalAddress;
407 def zextloadi8_local_m0 : LoadFrag<zextloadi8_glue>, LocalAddress;
408 def extloadi16_local_m0 : LoadFrag<extloadi16_glue>, LocalAddress;
409 def zextloadi16_local_m0 : LoadFrag<zextloadi16_glue>, LocalAddress;
410 def load_align8_local_m0 : LoadFrag <load_glue_align8>, LocalAddress;
411 def load_align16_local_m0 : LoadFrag <load_glue_align16>, LocalAddress;
412 def atomic_load_32_local_m0 : LoadFrag<atomic_load_32_glue>, LocalAddress;
413 def atomic_load_64_local_m0 : LoadFrag<atomic_load_64_glue>, LocalAddress;
416 def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore,
417 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
420 def AMDGPUatomic_st_glue : SDNode <"ISD::ATOMIC_STORE", SDTAtomicStore,
421 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
424 def atomic_store_glue : PatFrag<(ops node:$ptr, node:$val),
425 (AMDGPUatomic_st_glue node:$ptr, node:$val)> {
428 def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr),
429 (AMDGPUst_glue node:$val, node:$ptr)> {
434 def store_glue : PatFrag<(ops node:$val, node:$ptr),
435 (unindexedstore_glue node:$val, node:$ptr)> {
437 let IsTruncStore = 0;
440 def truncstore_glue : PatFrag<(ops node:$val, node:$ptr),
441 (unindexedstore_glue node:$val, node:$ptr)> {
443 let IsTruncStore = 1;
446 def truncstorei8_glue : PatFrag<(ops node:$val, node:$ptr),
447 (truncstore_glue node:$val, node:$ptr)> {
452 def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr),
453 (truncstore_glue node:$val, node:$ptr)> {
458 def store_glue_align8 : Aligned8Bytes <
459 (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
462 def store_glue_align16 : Aligned16Bytes <
463 (ops node:$value, node:$ptr), (store_glue node:$value, node:$ptr)
466 def store_local_m0 : StoreFrag<store_glue>, LocalAddress;
467 def truncstorei8_local_m0 : StoreFrag<truncstorei8_glue>, LocalAddress;
468 def truncstorei16_local_m0 : StoreFrag<truncstorei16_glue>, LocalAddress;
469 def atomic_store_local_m0 : StoreFrag<AMDGPUatomic_st_glue>, LocalAddress;
471 def store_align8_local_m0 : StoreFrag<store_glue_align8>, LocalAddress;
472 def store_align16_local_m0 : StoreFrag<store_glue_align16>, LocalAddress;
474 def si_setcc_uniform : PatFrag <
475 (ops node:$lhs, node:$rhs, node:$cond),
476 (setcc node:$lhs, node:$rhs, node:$cond), [{
477 for (SDNode *Use : N->uses()) {
478 if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
481 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
482 if (Reg != AMDGPU::SCC)
488 //===----------------------------------------------------------------------===//
489 // SDNodes PatFrags for d16 loads
490 //===----------------------------------------------------------------------===//
492 class LoadD16Frag <SDPatternOperator op> : PatFrag<(ops node:$ptr, node:$tied_in), (op node:$ptr, node:$tied_in)>;
493 class LocalLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, LocalAddress;
494 class GlobalLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, GlobalLoadAddress;
495 class PrivateLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, PrivateAddress;
496 class FlatLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, FlatLoadAddress;
498 def load_d16_hi_local : LocalLoadD16 <SIload_d16_hi>;
499 def az_extloadi8_d16_hi_local : LocalLoadD16 <SIload_d16_hi_u8>;
500 def sextloadi8_d16_hi_local : LocalLoadD16 <SIload_d16_hi_i8>;
502 def load_d16_hi_global : GlobalLoadD16 <SIload_d16_hi>;
503 def az_extloadi8_d16_hi_global : GlobalLoadD16 <SIload_d16_hi_u8>;
504 def sextloadi8_d16_hi_global : GlobalLoadD16 <SIload_d16_hi_i8>;
506 def load_d16_hi_private : PrivateLoadD16 <SIload_d16_hi>;
507 def az_extloadi8_d16_hi_private : PrivateLoadD16 <SIload_d16_hi_u8>;
508 def sextloadi8_d16_hi_private : PrivateLoadD16 <SIload_d16_hi_i8>;
510 def load_d16_hi_flat : FlatLoadD16 <SIload_d16_hi>;
511 def az_extloadi8_d16_hi_flat : FlatLoadD16 <SIload_d16_hi_u8>;
512 def sextloadi8_d16_hi_flat : FlatLoadD16 <SIload_d16_hi_i8>;
515 def load_d16_lo_local : LocalLoadD16 <SIload_d16_lo>;
516 def az_extloadi8_d16_lo_local : LocalLoadD16 <SIload_d16_lo_u8>;
517 def sextloadi8_d16_lo_local : LocalLoadD16 <SIload_d16_lo_i8>;
519 def load_d16_lo_global : GlobalLoadD16 <SIload_d16_lo>;
520 def az_extloadi8_d16_lo_global : GlobalLoadD16 <SIload_d16_lo_u8>;
521 def sextloadi8_d16_lo_global : GlobalLoadD16 <SIload_d16_lo_i8>;
523 def load_d16_lo_private : PrivateLoadD16 <SIload_d16_lo>;
524 def az_extloadi8_d16_lo_private : PrivateLoadD16 <SIload_d16_lo_u8>;
525 def sextloadi8_d16_lo_private : PrivateLoadD16 <SIload_d16_lo_i8>;
527 def load_d16_lo_flat : FlatLoadD16 <SIload_d16_lo>;
528 def az_extloadi8_d16_lo_flat : FlatLoadD16 <SIload_d16_lo_u8>;
529 def sextloadi8_d16_lo_flat : FlatLoadD16 <SIload_d16_lo_i8>;
533 def lshr_rev : PatFrag <
534 (ops node:$src1, node:$src0),
538 def ashr_rev : PatFrag <
539 (ops node:$src1, node:$src0),
543 def lshl_rev : PatFrag <
544 (ops node:$src1, node:$src0),
548 multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0,
549 SDTypeProfile tc = SDTAtomic2> {
552 !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, tc,
553 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
556 def _local_m0 : local_binary_atomic_op <!cast<SDNode>(NAME#"_glue")>;
557 def _region_m0 : region_binary_atomic_op <!cast<SDNode>(NAME#"_glue")>;
560 defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
561 defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
562 defm atomic_inc : SIAtomicM0Glue2 <"INC", 1>;
563 defm atomic_dec : SIAtomicM0Glue2 <"DEC", 1>;
564 defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
565 defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
566 defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
567 defm atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
568 defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
569 defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
570 defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
571 defm atomic_swap : SIAtomicM0Glue2 <"SWAP">;
572 defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32>;
573 defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32>;
574 defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32>;
576 def atomic_cmp_swap_glue : SDNode <"ISD::ATOMIC_CMP_SWAP", SDTAtomic3,
577 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
580 def atomic_cmp_swap_local_m0 : AtomicCmpSwapLocal<atomic_cmp_swap_glue>;
581 def atomic_cmp_swap_region_m0 : AtomicCmpSwapRegion<atomic_cmp_swap_glue>;
584 def as_i1imm : SDNodeXForm<imm, [{
585 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
588 def as_i8imm : SDNodeXForm<imm, [{
589 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
592 def as_i16imm : SDNodeXForm<imm, [{
593 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
596 def as_i32imm: SDNodeXForm<imm, [{
597 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
600 def as_i64imm: SDNodeXForm<imm, [{
601 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
604 def cond_as_i32imm: SDNodeXForm<cond, [{
605 return CurDAG->getTargetConstant(N->get(), SDLoc(N), MVT::i32);
608 // Copied from the AArch64 backend:
609 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
610 return CurDAG->getTargetConstant(
611 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
614 def frameindex_to_targetframeindex : SDNodeXForm<frameindex, [{
615 auto FI = cast<FrameIndexSDNode>(N);
616 return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
619 // Copied from the AArch64 backend:
620 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
621 return CurDAG->getTargetConstant(
622 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
625 class bitextract_imm<int bitnum> : SDNodeXForm<imm, [{
626 uint64_t Imm = N->getZExtValue();
627 unsigned Bit = (Imm >> }] # bitnum # [{ ) & 1;
628 return CurDAG->getTargetConstant(Bit, SDLoc(N), MVT::i1);
631 def SIMM16bit : ImmLeaf <i32,
632 [{return isInt<16>(Imm);}]
635 def UIMM16bit : ImmLeaf <i32,
636 [{return isUInt<16>(Imm); }]
639 class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
640 return isInlineImmediate(N);
643 class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
644 return isInlineImmediate(N);
647 class VGPRImm <dag frag> : PatLeaf<frag, [{
651 def NegateImm : SDNodeXForm<imm, [{
652 return CurDAG->getConstant(-N->getSExtValue(), SDLoc(N), MVT::i32);
655 // TODO: When FP inline imm values work?
656 def NegSubInlineConst32 : ImmLeaf<i32, [{
657 return Imm < -16 && Imm >= -64;
660 def NegSubInlineConst16 : ImmLeaf<i16, [{
661 return Imm < -16 && Imm >= -64;
664 def ShiftAmt32Imm : PatLeaf <(imm), [{
665 return N->getZExtValue() < 32;
668 def getNegV2I16Imm : SDNodeXForm<build_vector, [{
669 return SDValue(packNegConstantV2I16(N, *CurDAG), 0);
672 def NegSubInlineConstV216 : PatLeaf<(build_vector), [{
673 assert(N->getNumOperands() == 2);
674 assert(N->getOperand(0).getValueType().getSizeInBits() == 16);
675 SDValue Src0 = N->getOperand(0);
676 SDValue Src1 = N->getOperand(1);
678 return isNegInlineImmediate(Src0.getNode());
680 return (isNullConstantOrUndef(Src0) && isNegInlineImmediate(Src1.getNode())) ||
681 (isNullConstantOrUndef(Src1) && isNegInlineImmediate(Src0.getNode()));
684 //===----------------------------------------------------------------------===//
686 //===----------------------------------------------------------------------===//
688 def SoppBrTarget : AsmOperandClass {
689 let Name = "SoppBrTarget";
690 let ParserMethod = "parseSOppBrTarget";
693 def sopp_brtarget : Operand<OtherVT> {
694 let EncoderMethod = "getSOPPBrEncoding";
695 let DecoderMethod = "decodeSoppBrTarget";
696 let OperandType = "OPERAND_PCREL";
697 let ParserMatchClass = SoppBrTarget;
700 def si_ga : Operand<iPTR>;
702 def InterpSlotMatchClass : AsmOperandClass {
703 let Name = "InterpSlot";
704 let PredicateMethod = "isInterpSlot";
705 let ParserMethod = "parseInterpSlot";
706 let RenderMethod = "addImmOperands";
709 def InterpSlot : Operand<i32> {
710 let PrintMethod = "printInterpSlot";
711 let ParserMatchClass = InterpSlotMatchClass;
712 let OperandType = "OPERAND_IMMEDIATE";
715 def AttrMatchClass : AsmOperandClass {
717 let PredicateMethod = "isInterpAttr";
718 let ParserMethod = "parseInterpAttr";
719 let RenderMethod = "addImmOperands";
722 // It appears to be necessary to create a separate operand for this to
723 // be able to parse attr<num> with no space.
724 def Attr : Operand<i32> {
725 let PrintMethod = "printInterpAttr";
726 let ParserMatchClass = AttrMatchClass;
727 let OperandType = "OPERAND_IMMEDIATE";
730 def AttrChanMatchClass : AsmOperandClass {
731 let Name = "AttrChan";
732 let PredicateMethod = "isAttrChan";
733 let RenderMethod = "addImmOperands";
736 def AttrChan : Operand<i32> {
737 let PrintMethod = "printInterpAttrChan";
738 let ParserMatchClass = AttrChanMatchClass;
739 let OperandType = "OPERAND_IMMEDIATE";
742 def SendMsgMatchClass : AsmOperandClass {
743 let Name = "SendMsg";
744 let PredicateMethod = "isSendMsg";
745 let ParserMethod = "parseSendMsgOp";
746 let RenderMethod = "addImmOperands";
749 def SwizzleMatchClass : AsmOperandClass {
750 let Name = "Swizzle";
751 let PredicateMethod = "isSwizzle";
752 let ParserMethod = "parseSwizzleOp";
753 let RenderMethod = "addImmOperands";
757 def EndpgmMatchClass : AsmOperandClass {
758 let Name = "EndpgmImm";
759 let PredicateMethod = "isEndpgm";
760 let ParserMethod = "parseEndpgmOp";
761 let RenderMethod = "addImmOperands";
765 def ExpTgtMatchClass : AsmOperandClass {
767 let PredicateMethod = "isExpTgt";
768 let ParserMethod = "parseExpTgt";
769 let RenderMethod = "printExpTgt";
772 def SendMsgImm : Operand<i32> {
773 let PrintMethod = "printSendMsg";
774 let ParserMatchClass = SendMsgMatchClass;
777 def SwizzleImm : Operand<i16> {
778 let PrintMethod = "printSwizzle";
779 let ParserMatchClass = SwizzleMatchClass;
782 def EndpgmImm : Operand<i16> {
783 let PrintMethod = "printEndpgm";
784 let ParserMatchClass = EndpgmMatchClass;
787 def SWaitMatchClass : AsmOperandClass {
788 let Name = "SWaitCnt";
789 let RenderMethod = "addImmOperands";
790 let ParserMethod = "parseSWaitCntOps";
793 def VReg32OrOffClass : AsmOperandClass {
794 let Name = "VReg32OrOff";
795 let ParserMethod = "parseVReg32OrOff";
798 def WAIT_FLAG : Operand <i32> {
799 let ParserMatchClass = SWaitMatchClass;
800 let PrintMethod = "printWaitFlag";
801 let OperandType = "OPERAND_IMMEDIATE";
804 include "SIInstrFormats.td"
805 include "VIInstrFormats.td"
807 def BoolReg : AsmOperandClass {
808 let Name = "BoolReg";
809 let ParserMethod = "parseBoolReg";
810 let RenderMethod = "addRegOperands";
813 class BoolRC : RegisterOperand<SReg_1> {
814 let ParserMatchClass = BoolReg;
815 let DecoderMethod = "decodeBoolReg";
818 def SSrc_i1 : RegisterOperand<SReg_1_XEXEC> {
819 let ParserMatchClass = BoolReg;
820 let DecoderMethod = "decodeBoolReg";
823 def VOPDstS64orS32 : BoolRC {
824 let PrintMethod = "printVOPDst";
827 // SCSrc_i1 is the operand for pseudo instructions only.
828 // Boolean immeadiates shall not be exposed to codegen instructions.
829 def SCSrc_i1 : RegisterOperand<SReg_1_XEXEC> {
830 let OperandNamespace = "AMDGPU";
831 let OperandType = "OPERAND_REG_IMM_INT32";
832 let ParserMatchClass = BoolReg;
833 let DecoderMethod = "decodeBoolReg";
836 // ===----------------------------------------------------------------------===//
837 // ExpSrc* Special cases for exp src operands which are printed as
838 // "off" depending on en operand.
839 // ===----------------------------------------------------------------------===//
841 def ExpSrc0 : RegisterOperand<VGPR_32> {
842 let PrintMethod = "printExpSrc0";
843 let ParserMatchClass = VReg32OrOffClass;
846 def ExpSrc1 : RegisterOperand<VGPR_32> {
847 let PrintMethod = "printExpSrc1";
848 let ParserMatchClass = VReg32OrOffClass;
851 def ExpSrc2 : RegisterOperand<VGPR_32> {
852 let PrintMethod = "printExpSrc2";
853 let ParserMatchClass = VReg32OrOffClass;
856 def ExpSrc3 : RegisterOperand<VGPR_32> {
857 let PrintMethod = "printExpSrc3";
858 let ParserMatchClass = VReg32OrOffClass;
861 class SDWASrc<ValueType vt> : RegisterOperand<VS_32> {
862 let OperandNamespace = "AMDGPU";
863 string Type = !if(isFloatType<vt>.ret, "FP", "INT");
864 let OperandType = "OPERAND_REG_INLINE_C_"#Type#vt.Size;
865 let DecoderMethod = "decodeSDWASrc"#vt.Size;
866 let EncoderMethod = "getSDWASrcEncoding";
869 def SDWASrc_i32 : SDWASrc<i32>;
870 def SDWASrc_i16 : SDWASrc<i16>;
871 def SDWASrc_f32 : SDWASrc<f32>;
872 def SDWASrc_f16 : SDWASrc<f16>;
874 def SDWAVopcDst : BoolRC {
875 let OperandNamespace = "AMDGPU";
876 let OperandType = "OPERAND_SDWA_VOPC_DST";
877 let EncoderMethod = "getSDWAVopcDstEncoding";
878 let DecoderMethod = "decodeSDWAVopcDst";
879 let PrintMethod = "printVOPDst";
882 class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
883 let Name = "Imm"#CName;
884 let PredicateMethod = "is"#CName;
885 let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
886 let RenderMethod = "addImmOperands";
887 let IsOptional = Optional;
888 let DefaultMethod = !if(Optional, "default"#CName, ?);
891 class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> {
892 let PrintMethod = "print"#Name;
893 let ParserMatchClass = MatchClass;
896 class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> {
897 let PrintMethod = "print"#Name;
898 let ParserMatchClass = MatchClass;
901 class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> {
902 let PrintMethod = "print"#Name;
903 let ParserMatchClass = MatchClass;
906 class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> {
907 let PrintMethod = "print"#Name;
908 let ParserMatchClass = MatchClass;
911 class NamedOperandU32Default0<string Name, AsmOperandClass MatchClass> :
912 OperandWithDefaultOps<i32, (ops (i32 0))> {
913 let PrintMethod = "print"#Name;
914 let ParserMatchClass = MatchClass;
917 let OperandType = "OPERAND_IMMEDIATE" in {
919 def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>;
920 def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>;
921 def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>;
923 def flat_offset : NamedOperandU16<"FlatOffset", NamedMatchClass<"FlatOffset">>;
924 def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>;
925 def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
926 def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
928 def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>;
930 def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>;
931 def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>;
932 def highmod : NamedOperandBit<"High", NamedMatchClass<"High">>;
934 def DLC : NamedOperandBit<"DLC", NamedMatchClass<"DLC">>;
935 def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>;
936 def SLC : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>;
937 def TFE : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>;
938 def UNorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>;
939 def DA : NamedOperandBit<"DA", NamedMatchClass<"DA">>;
940 def R128A16 : NamedOperandBit<"R128A16", NamedMatchClass<"R128A16">>;
941 def D16 : NamedOperandBit<"D16", NamedMatchClass<"D16">>;
942 def LWE : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>;
943 def exp_compr : NamedOperandBit<"ExpCompr", NamedMatchClass<"ExpCompr">>;
944 def exp_vm : NamedOperandBit<"ExpVM", NamedMatchClass<"ExpVM">>;
946 def FORMAT : NamedOperandU8<"FORMAT", NamedMatchClass<"FORMAT">>;
948 def DMask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>;
949 def Dim : NamedOperandU8<"Dim", NamedMatchClass<"Dim", 0>>;
951 def dpp8 : NamedOperandU32<"DPP8", NamedMatchClass<"DPP8", 0>>;
953 def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>;
954 def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>;
955 def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>;
956 def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>;
957 def FI : NamedOperandU32<"FI", NamedMatchClass<"FI">>;
959 def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>;
960 def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>;
961 def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>;
962 def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
964 def op_sel : NamedOperandU32Default0<"OpSel", NamedMatchClass<"OpSel">>;
965 def op_sel_hi : NamedOperandU32Default0<"OpSelHi", NamedMatchClass<"OpSelHi">>;
966 def neg_lo : NamedOperandU32Default0<"NegLo", NamedMatchClass<"NegLo">>;
967 def neg_hi : NamedOperandU32Default0<"NegHi", NamedMatchClass<"NegHi">>;
969 def blgp : NamedOperandU32<"BLGP", NamedMatchClass<"BLGP">>;
970 def cbsz : NamedOperandU32<"CBSZ", NamedMatchClass<"CBSZ">>;
971 def abid : NamedOperandU32<"ABID", NamedMatchClass<"ABID">>;
973 def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
975 def exp_tgt : NamedOperandU8<"ExpTgt", NamedMatchClass<"ExpTgt", 0>> {
979 } // End OperandType = "OPERAND_IMMEDIATE"
981 class KImmMatchClass<int size> : AsmOperandClass {
982 let Name = "KImmFP"#size;
983 let PredicateMethod = "isKImmFP"#size;
984 let ParserMethod = "parseImm";
985 let RenderMethod = "addKImmFP"#size#"Operands";
988 class kimmOperand<ValueType vt> : Operand<vt> {
989 let OperandNamespace = "AMDGPU";
990 let OperandType = "OPERAND_KIMM"#vt.Size;
991 let PrintMethod = "printU"#vt.Size#"ImmOperand";
992 let ParserMatchClass = !cast<AsmOperandClass>("KImmFP"#vt.Size#"MatchClass");
995 // 32-bit VALU immediate operand that uses the constant bus.
996 def KImmFP32MatchClass : KImmMatchClass<32>;
997 def f32kimm : kimmOperand<i32>;
999 // 32-bit VALU immediate operand with a 16-bit value that uses the
1001 def KImmFP16MatchClass : KImmMatchClass<16>;
1002 def f16kimm : kimmOperand<i16>;
1004 class FPInputModsMatchClass <int opSize> : AsmOperandClass {
1005 let Name = "RegOrImmWithFP"#opSize#"InputMods";
1006 let ParserMethod = "parseRegOrImmWithFPInputMods";
1007 let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods";
1010 def FP16InputModsMatchClass : FPInputModsMatchClass<16>;
1011 def FP32InputModsMatchClass : FPInputModsMatchClass<32>;
1012 def FP64InputModsMatchClass : FPInputModsMatchClass<64>;
1014 class InputMods <AsmOperandClass matchClass> : Operand <i32> {
1015 let OperandNamespace = "AMDGPU";
1016 let OperandType = "OPERAND_INPUT_MODS";
1017 let ParserMatchClass = matchClass;
1020 class FPInputMods <FPInputModsMatchClass matchClass> : InputMods <matchClass> {
1021 let PrintMethod = "printOperandAndFPInputMods";
1024 def FP16InputMods : FPInputMods<FP16InputModsMatchClass>;
1025 def FP32InputMods : FPInputMods<FP32InputModsMatchClass>;
1026 def FP64InputMods : FPInputMods<FP64InputModsMatchClass>;
1028 class IntInputModsMatchClass <int opSize> : AsmOperandClass {
1029 let Name = "RegOrImmWithInt"#opSize#"InputMods";
1030 let ParserMethod = "parseRegOrImmWithIntInputMods";
1031 let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods";
1033 def Int32InputModsMatchClass : IntInputModsMatchClass<32>;
1034 def Int64InputModsMatchClass : IntInputModsMatchClass<64>;
1036 class IntInputMods <IntInputModsMatchClass matchClass> : InputMods <matchClass> {
1037 let PrintMethod = "printOperandAndIntInputMods";
1039 def Int32InputMods : IntInputMods<Int32InputModsMatchClass>;
1040 def Int64InputMods : IntInputMods<Int64InputModsMatchClass>;
1042 class OpSelModsMatchClass : AsmOperandClass {
1043 let Name = "OpSelMods";
1044 let ParserMethod = "parseRegOrImm";
1045 let PredicateMethod = "isRegOrImm";
1048 def IntOpSelModsMatchClass : OpSelModsMatchClass;
1049 def IntOpSelMods : InputMods<IntOpSelModsMatchClass>;
1051 class FPSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
1052 let Name = "SDWAWithFP"#opSize#"InputMods";
1053 let ParserMethod = "parseRegOrImmWithFPInputMods";
1054 let PredicateMethod = "isSDWAFP"#opSize#"Operand";
1057 def FP16SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<16>;
1058 def FP32SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<32>;
1060 class FPSDWAInputMods <FPSDWAInputModsMatchClass matchClass> :
1061 InputMods <matchClass> {
1062 let PrintMethod = "printOperandAndFPInputMods";
1065 def FP16SDWAInputMods : FPSDWAInputMods<FP16SDWAInputModsMatchClass>;
1066 def FP32SDWAInputMods : FPSDWAInputMods<FP32SDWAInputModsMatchClass>;
1068 def FPVRegInputModsMatchClass : AsmOperandClass {
1069 let Name = "VRegWithFPInputMods";
1070 let ParserMethod = "parseRegWithFPInputMods";
1071 let PredicateMethod = "isVReg32";
1074 def FPVRegInputMods : InputMods <FPVRegInputModsMatchClass> {
1075 let PrintMethod = "printOperandAndFPInputMods";
1078 class IntSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
1079 let Name = "SDWAWithInt"#opSize#"InputMods";
1080 let ParserMethod = "parseRegOrImmWithIntInputMods";
1081 let PredicateMethod = "isSDWAInt"#opSize#"Operand";
1084 def Int16SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<16>;
1085 def Int32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32>;
1087 class IntSDWAInputMods <IntSDWAInputModsMatchClass matchClass> :
1088 InputMods <matchClass> {
1089 let PrintMethod = "printOperandAndIntInputMods";
1092 def Int16SDWAInputMods : IntSDWAInputMods<Int16SDWAInputModsMatchClass>;
1093 def Int32SDWAInputMods : IntSDWAInputMods<Int32SDWAInputModsMatchClass>;
1095 def IntVRegInputModsMatchClass : AsmOperandClass {
1096 let Name = "VRegWithIntInputMods";
1097 let ParserMethod = "parseRegWithIntInputMods";
1098 let PredicateMethod = "isVReg32";
1101 def IntVRegInputMods : InputMods <IntVRegInputModsMatchClass> {
1102 let PrintMethod = "printOperandAndIntInputMods";
1105 class PackedFPInputModsMatchClass <int opSize> : AsmOperandClass {
1106 let Name = "PackedFP"#opSize#"InputMods";
1107 let ParserMethod = "parseRegOrImm";
1108 let PredicateMethod = "isRegOrImm";
1109 // let PredicateMethod = "isPackedFP"#opSize#"InputMods";
1112 class PackedIntInputModsMatchClass <int opSize> : AsmOperandClass {
1113 let Name = "PackedInt"#opSize#"InputMods";
1114 let ParserMethod = "parseRegOrImm";
1115 let PredicateMethod = "isRegOrImm";
1116 // let PredicateMethod = "isPackedInt"#opSize#"InputMods";
1119 def PackedF16InputModsMatchClass : PackedFPInputModsMatchClass<16>;
1120 def PackedI16InputModsMatchClass : PackedIntInputModsMatchClass<16>;
1122 class PackedFPInputMods <PackedFPInputModsMatchClass matchClass> : InputMods <matchClass> {
1123 // let PrintMethod = "printPackedFPInputMods";
1126 class PackedIntInputMods <PackedIntInputModsMatchClass matchClass> : InputMods <matchClass> {
1127 //let PrintMethod = "printPackedIntInputMods";
1130 def PackedF16InputMods : PackedFPInputMods<PackedF16InputModsMatchClass>;
1131 def PackedI16InputMods : PackedIntInputMods<PackedI16InputModsMatchClass>;
1133 //===----------------------------------------------------------------------===//
1135 //===----------------------------------------------------------------------===//
1137 def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
1138 def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
1140 def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">;
1142 def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
1143 def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
1144 def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
1145 def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
1146 def VOP3NoMods : ComplexPattern<untyped, 1, "SelectVOP3NoMods">;
1147 // VOP3Mods, but the input source is known to never be NaN.
1148 def VOP3Mods_nnan : ComplexPattern<fAny, 2, "SelectVOP3Mods_NNaN">;
1149 // VOP3Mods, but only allowed for f32 operands.
1150 def VOP3Mods_f32 : ComplexPattern<fAny, 2, "SelectVOP3Mods_f32">;
1152 def VOP3OMods : ComplexPattern<untyped, 3, "SelectVOP3OMods">;
1154 def VOP3PMods : ComplexPattern<untyped, 2, "SelectVOP3PMods">;
1155 def VOP3PMods0 : ComplexPattern<untyped, 3, "SelectVOP3PMods0">;
1157 def VOP3OpSel : ComplexPattern<untyped, 2, "SelectVOP3OpSel">;
1158 def VOP3OpSel0 : ComplexPattern<untyped, 3, "SelectVOP3OpSel0">;
1160 def VOP3OpSelMods : ComplexPattern<untyped, 2, "SelectVOP3OpSelMods">;
1161 def VOP3OpSelMods0 : ComplexPattern<untyped, 3, "SelectVOP3OpSelMods0">;
1163 def VOP3PMadMixMods : ComplexPattern<untyped, 2, "SelectVOP3PMadMixMods">;
1166 def Hi16Elt : ComplexPattern<untyped, 1, "SelectHi16Elt">;
1168 //===----------------------------------------------------------------------===//
1169 // SI assembler operands
1170 //===----------------------------------------------------------------------===//
1175 int FLAT_SCR = 0x68;
1178 // This should be kept in sync with SISrcMods enum
1202 int LLVM_DEBUG_TRAP = 3;
1218 int FLAT_SCR_LO = 20;
1219 int FLAT_SCR_HI = 21;
1220 int XNACK_MASK = 22;
1221 int POPS_PACKER = 25;
1224 class getHwRegImm<int Reg, int Offset = 0, int Size = 32> {
1226 !or(!shl(Offset, 6),
1227 !shl(!add(Size, -1), 11)));
1230 //===----------------------------------------------------------------------===//
1232 // SI Instruction multiclass helpers.
1234 // Instructions with _32 take 32-bit operands.
1235 // Instructions with _64 take 64-bit operands.
1237 // VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit
1238 // encoding is the standard encoding, but instruction that make use of
1239 // any of the instruction modifiers must use the 64-bit encoding.
1241 // Instructions with _e32 use the 32-bit encoding.
1242 // Instructions with _e64 use the 64-bit encoding.
1244 //===----------------------------------------------------------------------===//
1246 class SIMCInstr <string pseudo, int subtarget> {
1247 string PseudoInstr = pseudo;
1248 int Subtarget = subtarget;
1251 //===----------------------------------------------------------------------===//
1253 //===----------------------------------------------------------------------===//
1255 class EXP_Helper<bit done, SDPatternOperator node = null_frag> : EXPCommon<
1258 ExpSrc0:$src0, ExpSrc1:$src1, ExpSrc2:$src2, ExpSrc3:$src3,
1259 exp_vm:$vm, exp_compr:$compr, i8imm:$en),
1260 "exp$tgt $src0, $src1, $src2, $src3"#!if(done, " done", "")#"$compr$vm",
1261 [(node (i8 timm:$tgt), (i8 timm:$en),
1262 f32:$src0, f32:$src1, f32:$src2, f32:$src3,
1263 (i1 timm:$compr), (i1 timm:$vm))]> {
1264 let AsmMatchConverter = "cvtExp";
1267 // Split EXP instruction into EXP and EXP_DONE so we can set
1268 // mayLoad for done=1.
1269 multiclass EXP_m<bit done, SDPatternOperator node> {
1270 let mayLoad = done, DisableWQM = 1 in {
1271 let isPseudo = 1, isCodeGenOnly = 1 in {
1272 def "" : EXP_Helper<done, node>,
1273 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.NONE>;
1276 let done = done in {
1277 def _si : EXP_Helper<done>,
1278 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.SI>,
1280 let AssemblerPredicates = [isGFX6GFX7];
1281 let DecoderNamespace = "GFX6GFX7";
1282 let DisableDecoder = DisableSIDecoder;
1285 def _vi : EXP_Helper<done>,
1286 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.VI>,
1288 let AssemblerPredicates = [isGFX8GFX9];
1289 let DecoderNamespace = "GFX8";
1290 let DisableDecoder = DisableVIDecoder;
1293 def _gfx10 : EXP_Helper<done>,
1294 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.GFX10>,
1296 let AssemblerPredicates = [isGFX10Plus];
1297 let DecoderNamespace = "GFX10";
1298 let DisableDecoder = DisableSIDecoder;
1304 //===----------------------------------------------------------------------===//
1305 // Vector ALU classes
1306 //===----------------------------------------------------------------------===//
1308 class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> {
1310 !if (!eq(Src0.Value, untyped.Value), 0,
1311 !if (!eq(Src1.Value, untyped.Value), 1, // VOP1
1312 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2
1316 // Returns the register class to use for the destination of VOP[123C]
1317 // instructions for the given VT.
1318 class getVALUDstForVT<ValueType VT> {
1319 RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>,
1320 !if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>,
1321 !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
1322 !if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>,
1323 VOPDstS64orS32)))); // else VT == i1
1326 // Returns true if VT is floating point.
1327 class getIsFP<ValueType VT> {
1328 bit ret = !if(!eq(VT.Value, f16.Value), 1,
1329 !if(!eq(VT.Value, v2f16.Value), 1,
1330 !if(!eq(VT.Value, v4f16.Value), 1,
1331 !if(!eq(VT.Value, f32.Value), 1,
1332 !if(!eq(VT.Value, v2f32.Value), 1,
1333 !if(!eq(VT.Value, f64.Value), 1,
1334 !if(!eq(VT.Value, v2f64.Value), 1,
1338 // Returns the register class to use for the destination of VOP[12C]
1339 // instructions with SDWA extension
1340 class getSDWADstForVT<ValueType VT> {
1341 RegisterOperand ret = !if(!eq(VT.Size, 1),
1342 SDWAVopcDst, // VOPC
1343 VOPDstOperand<VGPR_32>); // VOP1/2 32-bit dst
1346 // Returns the register class to use for source 0 of VOP[12C]
1347 // instructions for the given VT.
1348 class getVOPSrc0ForVT<ValueType VT> {
1349 bit isFP = getIsFP<VT>.ret;
1351 RegisterOperand ret =
1353 !if(!eq(VT.Size, 64),
1355 !if(!eq(VT.Value, f16.Value),
1357 !if(!eq(VT.Value, v2f16.Value),
1359 !if(!eq(VT.Value, v4f16.Value),
1366 !if(!eq(VT.Size, 64),
1368 !if(!eq(VT.Value, i16.Value),
1370 !if(!eq(VT.Value, v2i16.Value),
1379 // Returns the vreg register class to use for source operand given VT
1380 class getVregSrcForVT<ValueType VT> {
1381 RegisterClass ret = !if(!eq(VT.Size, 128), VReg_128,
1382 !if(!eq(VT.Size, 64), VReg_64, VGPR_32));
1385 class getSDWASrcForVT <ValueType VT> {
1386 bit isFP = getIsFP<VT>.ret;
1387 RegisterOperand retFlt = !if(!eq(VT.Size, 16), SDWASrc_f16, SDWASrc_f32);
1388 RegisterOperand retInt = !if(!eq(VT.Size, 16), SDWASrc_i16, SDWASrc_i32);
1389 RegisterOperand ret = !if(isFP, retFlt, retInt);
1392 // Returns the register class to use for sources of VOP3 instructions for the
1394 class getVOP3SrcForVT<ValueType VT> {
1395 bit isFP = getIsFP<VT>.ret;
1396 RegisterOperand ret =
1397 !if(!eq(VT.Size, 128),
1399 !if(!eq(VT.Size, 64),
1403 !if(!eq(VT.Value, i1.Value),
1406 !if(!eq(VT.Value, f16.Value),
1408 !if(!eq(VT.Value, v2f16.Value),
1410 !if(!eq(VT.Value, v4f16.Value),
1416 !if(!eq(VT.Value, i16.Value),
1418 !if(!eq(VT.Value, v2i16.Value),
1429 // Float or packed int
1430 class isModifierType<ValueType SrcVT> {
1432 !if(!eq(SrcVT.Value, f16.Value), 1,
1433 !if(!eq(SrcVT.Value, f32.Value), 1,
1434 !if(!eq(SrcVT.Value, f64.Value), 1,
1435 !if(!eq(SrcVT.Value, v2f16.Value), 1,
1436 !if(!eq(SrcVT.Value, v2i16.Value), 1,
1440 // Return type of input modifiers operand for specified input operand
1441 class getSrcMod <ValueType VT, bit EnableF32SrcMods> {
1442 bit isFP = getIsFP<VT>.ret;
1443 bit isPacked = isPackedType<VT>.ret;
1444 Operand ret = !if(!eq(VT.Size, 64),
1445 !if(isFP, FP64InputMods, Int64InputMods),
1447 !if(!eq(VT.Value, f16.Value),
1451 !if(EnableF32SrcMods, FP32InputMods, Int32InputMods))
1455 class getOpSelMod <ValueType VT> {
1456 Operand ret = !if(!eq(VT.Value, f16.Value), FP16InputMods, IntOpSelMods);
1459 // Return type of input modifiers operand specified input operand for DPP
1460 class getSrcModExt <ValueType VT> {
1461 bit isFP = getIsFP<VT>.ret;
1462 Operand ret = !if(isFP, FPVRegInputMods, IntVRegInputMods);
1465 // Return type of input modifiers operand specified input operand for SDWA
1466 class getSrcModSDWA <ValueType VT> {
1467 Operand ret = !if(!eq(VT.Value, f16.Value), FP16SDWAInputMods,
1468 !if(!eq(VT.Value, f32.Value), FP32SDWAInputMods,
1469 !if(!eq(VT.Value, i16.Value), Int16SDWAInputMods,
1470 Int32SDWAInputMods)));
1473 // Returns the input arguments for VOP[12C] instructions for the given SrcVT.
1474 class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
1475 dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1
1476 !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
1480 // Returns the input arguments for VOP3 instructions for the given SrcVT.
1481 class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
1482 RegisterOperand Src2RC, int NumSrcArgs,
1483 bit HasIntClamp, bit HasModifiers, bit HasSrc2Mods, bit HasOMod,
1484 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1487 !if (!eq(NumSrcArgs, 0),
1488 // VOP1 without input operands (V_NOP, V_CLREXCP)
1491 !if (!eq(NumSrcArgs, 1),
1492 !if (!eq(HasModifiers, 1),
1493 // VOP1 with modifiers
1494 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1495 clampmod:$clamp, omod:$omod)
1497 // VOP1 without modifiers
1498 !if (!eq(HasIntClamp, 1),
1499 (ins Src0RC:$src0, clampmod:$clamp),
1502 !if (!eq(NumSrcArgs, 2),
1503 !if (!eq(HasModifiers, 1),
1504 // VOP 2 with modifiers
1505 !if( !eq(HasOMod, 1),
1506 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1507 Src1Mod:$src1_modifiers, Src1RC:$src1,
1508 clampmod:$clamp, omod:$omod),
1509 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1510 Src1Mod:$src1_modifiers, Src1RC:$src1,
1513 // VOP2 without modifiers
1514 !if (!eq(HasIntClamp, 1),
1515 (ins Src0RC:$src0, Src1RC:$src1, clampmod:$clamp),
1516 (ins Src0RC:$src0, Src1RC:$src1))
1519 /* NumSrcArgs == 3 */,
1520 !if (!eq(HasModifiers, 1),
1521 !if (!eq(HasSrc2Mods, 1),
1522 // VOP3 with modifiers
1523 !if (!eq(HasOMod, 1),
1524 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1525 Src1Mod:$src1_modifiers, Src1RC:$src1,
1526 Src2Mod:$src2_modifiers, Src2RC:$src2,
1527 clampmod:$clamp, omod:$omod),
1528 !if (!eq(HasIntClamp, 1),
1529 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1530 Src1Mod:$src1_modifiers, Src1RC:$src1,
1531 Src2Mod:$src2_modifiers, Src2RC:$src2,
1533 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1534 Src1Mod:$src1_modifiers, Src1RC:$src1,
1535 Src2Mod:$src2_modifiers, Src2RC:$src2))),
1536 // VOP3 with modifiers except src2
1537 !if (!eq(HasOMod, 1),
1538 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1539 Src1Mod:$src1_modifiers, Src1RC:$src1,
1540 Src2RC:$src2, clampmod:$clamp, omod:$omod),
1541 !if (!eq(HasIntClamp, 1),
1542 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1543 Src1Mod:$src1_modifiers, Src1RC:$src1,
1544 Src2RC:$src2, clampmod:$clamp),
1545 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1546 Src1Mod:$src1_modifiers, Src1RC:$src1,
1549 // VOP3 without modifiers
1550 !if (!eq(HasIntClamp, 1),
1551 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2, clampmod:$clamp),
1552 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2))
1556 /// XXX - src1 may only allow VGPRs?
1558 // The modifiers (except clamp) are dummy operands for the benefit of
1559 // printing and parsing. They defer their values to looking at the
1560 // srcN_modifiers for what to print.
1561 class getInsVOP3P <RegisterOperand Src0RC, RegisterOperand Src1RC,
1562 RegisterOperand Src2RC, int NumSrcArgs,
1564 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1565 dag ret = !if (!eq(NumSrcArgs, 2),
1567 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1568 Src1Mod:$src1_modifiers, Src1RC:$src1,
1570 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1571 neg_lo:$neg_lo, neg_hi:$neg_hi),
1572 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1573 Src1Mod:$src1_modifiers, Src1RC:$src1,
1574 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1575 neg_lo:$neg_lo, neg_hi:$neg_hi)),
1576 // else NumSrcArgs == 3
1578 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1579 Src1Mod:$src1_modifiers, Src1RC:$src1,
1580 Src2Mod:$src2_modifiers, Src2RC:$src2,
1582 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1583 neg_lo:$neg_lo, neg_hi:$neg_hi),
1584 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1585 Src1Mod:$src1_modifiers, Src1RC:$src1,
1586 Src2Mod:$src2_modifiers, Src2RC:$src2,
1587 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1588 neg_lo:$neg_lo, neg_hi:$neg_hi))
1592 class getInsVOP3OpSel <RegisterOperand Src0RC,
1593 RegisterOperand Src1RC,
1594 RegisterOperand Src2RC,
1600 dag ret = !if (!eq(NumSrcArgs, 2),
1602 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1603 Src1Mod:$src1_modifiers, Src1RC:$src1,
1606 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1607 Src1Mod:$src1_modifiers, Src1RC:$src1,
1609 // else NumSrcArgs == 3
1611 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1612 Src1Mod:$src1_modifiers, Src1RC:$src1,
1613 Src2Mod:$src2_modifiers, Src2RC:$src2,
1616 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1617 Src1Mod:$src1_modifiers, Src1RC:$src1,
1618 Src2Mod:$src2_modifiers, Src2RC:$src2,
1623 class getInsDPP <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1624 int NumSrcArgs, bit HasModifiers,
1625 Operand Src0Mod, Operand Src1Mod> {
1627 dag ret = !if (!eq(NumSrcArgs, 0),
1628 // VOP1 without input operands (V_NOP)
1629 (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1630 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl),
1631 !if (!eq(NumSrcArgs, 1),
1632 !if (!eq(HasModifiers, 1),
1633 // VOP1_DPP with modifiers
1634 (ins DstRC:$old, Src0Mod:$src0_modifiers,
1635 Src0RC:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1636 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1638 // VOP1_DPP without modifiers
1639 (ins DstRC:$old, Src0RC:$src0,
1640 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1641 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1643 /* NumSrcArgs == 2 */,
1644 !if (!eq(HasModifiers, 1),
1645 // VOP2_DPP with modifiers
1647 Src0Mod:$src0_modifiers, Src0RC:$src0,
1648 Src1Mod:$src1_modifiers, Src1RC:$src1,
1649 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1650 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1652 // VOP2_DPP without modifiers
1654 Src0RC:$src0, Src1RC:$src1, dpp_ctrl:$dpp_ctrl,
1655 row_mask:$row_mask, bank_mask:$bank_mask,
1656 bound_ctrl:$bound_ctrl)
1660 class getInsDPP16 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1661 int NumSrcArgs, bit HasModifiers,
1662 Operand Src0Mod, Operand Src1Mod> {
1663 dag ret = !con(getInsDPP<DstRC, Src0RC, Src1RC, NumSrcArgs,
1664 HasModifiers, Src0Mod, Src1Mod>.ret,
1668 class getInsDPP8 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1669 int NumSrcArgs, bit HasModifiers,
1670 Operand Src0Mod, Operand Src1Mod> {
1671 dag ret = !if (!eq(NumSrcArgs, 0),
1672 // VOP1 without input operands (V_NOP)
1673 (ins dpp8:$dpp8, FI:$fi),
1674 !if (!eq(NumSrcArgs, 1),
1675 !if (!eq(HasModifiers, 1),
1676 // VOP1_DPP with modifiers
1677 (ins DstRC:$old, Src0Mod:$src0_modifiers,
1678 Src0RC:$src0, dpp8:$dpp8, FI:$fi)
1680 // VOP1_DPP without modifiers
1681 (ins DstRC:$old, Src0RC:$src0, dpp8:$dpp8, FI:$fi)
1683 /* NumSrcArgs == 2 */,
1684 !if (!eq(HasModifiers, 1),
1685 // VOP2_DPP with modifiers
1687 Src0Mod:$src0_modifiers, Src0RC:$src0,
1688 Src1Mod:$src1_modifiers, Src1RC:$src1,
1691 // VOP2_DPP without modifiers
1693 Src0RC:$src0, Src1RC:$src1, dpp8:$dpp8, FI:$fi)
1699 class getInsSDWA <RegisterOperand Src0RC, RegisterOperand Src1RC, int NumSrcArgs,
1700 bit HasSDWAOMod, Operand Src0Mod, Operand Src1Mod,
1703 dag ret = !if(!eq(NumSrcArgs, 0),
1704 // VOP1 without input operands (V_NOP)
1706 !if(!eq(NumSrcArgs, 1),
1708 !if(!eq(HasSDWAOMod, 0),
1709 // VOP1_SDWA without omod
1710 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1712 dst_sel:$dst_sel, dst_unused:$dst_unused,
1713 src0_sel:$src0_sel),
1714 // VOP1_SDWA with omod
1715 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1716 clampmod:$clamp, omod:$omod,
1717 dst_sel:$dst_sel, dst_unused:$dst_unused,
1718 src0_sel:$src0_sel)),
1719 !if(!eq(NumSrcArgs, 2),
1720 !if(!eq(DstVT.Size, 1),
1722 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1723 Src1Mod:$src1_modifiers, Src1RC:$src1,
1724 clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
1726 !if(!eq(HasSDWAOMod, 0),
1727 // VOP2_SDWA without omod
1728 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1729 Src1Mod:$src1_modifiers, Src1RC:$src1,
1731 dst_sel:$dst_sel, dst_unused:$dst_unused,
1732 src0_sel:$src0_sel, src1_sel:$src1_sel),
1733 // VOP2_SDWA with omod
1734 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1735 Src1Mod:$src1_modifiers, Src1RC:$src1,
1736 clampmod:$clamp, omod:$omod,
1737 dst_sel:$dst_sel, dst_unused:$dst_unused,
1738 src0_sel:$src0_sel, src1_sel:$src1_sel))),
1739 (ins)/* endif */)));
1742 // Outs for DPP and SDWA
1743 class getOutsExt <bit HasDst, ValueType DstVT, RegisterOperand DstRCExt> {
1744 dag ret = !if(HasDst,
1745 !if(!eq(DstVT.Size, 1),
1746 (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions
1747 (outs DstRCExt:$vdst)),
1752 class getOutsSDWA <bit HasDst, ValueType DstVT, RegisterOperand DstRCSDWA> {
1753 dag ret = !if(HasDst,
1754 !if(!eq(DstVT.Size, 1),
1755 (outs DstRCSDWA:$sdst),
1756 (outs DstRCSDWA:$vdst)),
1760 // Returns the assembly string for the inputs and outputs of a VOP[12C]
1761 // instruction. This does not add the _e32 suffix, so it can be reused
1763 class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1764 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1765 string src0 = ", $src0";
1766 string src1 = ", $src1";
1767 string src2 = ", $src2";
1768 string ret = !if(HasDst, dst, "") #
1769 !if(!eq(NumSrcArgs, 1), src0, "") #
1770 !if(!eq(NumSrcArgs, 2), src0#src1, "") #
1771 !if(!eq(NumSrcArgs, 3), src0#src1#src2, "");
1774 // Returns the assembly string for the inputs and outputs of a VOP3
1776 class getAsm64 <bit HasDst, int NumSrcArgs, bit HasIntClamp, bit HasModifiers,
1777 bit HasOMod, ValueType DstVT = i32> {
1778 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1779 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1780 string src1 = !if(!eq(NumSrcArgs, 1), "",
1781 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1782 " $src1_modifiers,"));
1783 string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1784 string iclamp = !if(HasIntClamp, "$clamp", "");
1786 !if(!eq(HasModifiers, 0),
1787 getAsm32<HasDst, NumSrcArgs, DstVT>.ret # iclamp,
1788 dst#", "#src0#src1#src2#"$clamp"#!if(HasOMod, "$omod", ""));
1791 // Returns the assembly string for the inputs and outputs of a VOP3P
1793 class getAsmVOP3P <bit HasDst, int NumSrcArgs, bit HasModifiers,
1794 bit HasClamp, ValueType DstVT = i32> {
1795 string dst = " $vdst";
1796 string src0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1797 string src1 = !if(!eq(NumSrcArgs, 1), "",
1798 !if(!eq(NumSrcArgs, 2), " $src1",
1800 string src2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1802 string mods = !if(HasModifiers, "$neg_lo$neg_hi", "");
1803 string clamp = !if(HasClamp, "$clamp", "");
1805 // Each modifier is printed as an array of bits for each operand, so
1806 // all operands are printed as part of src0_modifiers.
1807 string ret = dst#", "#src0#src1#src2#"$op_sel$op_sel_hi"#mods#clamp;
1810 class getAsmVOP3OpSel <int NumSrcArgs,
1815 string dst = " $vdst";
1817 string isrc0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1818 string isrc1 = !if(!eq(NumSrcArgs, 1), "",
1819 !if(!eq(NumSrcArgs, 2), " $src1",
1821 string isrc2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1823 string fsrc0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1824 string fsrc1 = !if(!eq(NumSrcArgs, 1), "",
1825 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1826 " $src1_modifiers,"));
1827 string fsrc2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1829 string src0 = !if(Src0HasMods, fsrc0, isrc0);
1830 string src1 = !if(Src1HasMods, fsrc1, isrc1);
1831 string src2 = !if(Src2HasMods, fsrc2, isrc2);
1833 string clamp = !if(HasClamp, "$clamp", "");
1835 string ret = dst#", "#src0#src1#src2#"$op_sel"#clamp;
1838 class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1839 string dst = !if(HasDst,
1840 !if(!eq(DstVT.Size, 1),
1843 ""); // use $sdst for VOPC
1844 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1845 string src1 = !if(!eq(NumSrcArgs, 1), "",
1846 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1847 " $src1_modifiers,"));
1848 string args = !if(!eq(HasModifiers, 0),
1849 getAsm32<0, NumSrcArgs, DstVT>.ret,
1851 string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
1854 class getAsmDPP16 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1855 string ret = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret#"$fi";
1858 class getAsmDPP8 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1859 string dst = !if(HasDst,
1860 !if(!eq(DstVT.Size, 1),
1863 ""); // use $sdst for VOPC
1864 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1865 string src1 = !if(!eq(NumSrcArgs, 1), "",
1866 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1867 " $src1_modifiers,"));
1868 string args = !if(!eq(HasModifiers, 0),
1869 getAsm32<0, NumSrcArgs, DstVT>.ret,
1871 string ret = dst#args#"$dpp8$fi";
1874 class getAsmSDWA <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1875 string dst = !if(HasDst,
1876 !if(!eq(DstVT.Size, 1),
1877 " vcc", // use vcc token as dst for VOPC instructioins
1880 string src0 = "$src0_modifiers";
1881 string src1 = "$src1_modifiers";
1882 string args = !if(!eq(NumSrcArgs, 0),
1884 !if(!eq(NumSrcArgs, 1),
1886 ", "#src0#", "#src1#"$clamp"
1889 string sdwa = !if(!eq(NumSrcArgs, 0),
1891 !if(!eq(NumSrcArgs, 1),
1892 " $dst_sel $dst_unused $src0_sel",
1893 !if(!eq(DstVT.Size, 1),
1894 " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC
1895 " $dst_sel $dst_unused $src0_sel $src1_sel"
1899 string ret = dst#args#sdwa;
1902 class getAsmSDWA9 <bit HasDst, bit HasOMod, int NumSrcArgs,
1903 ValueType DstVT = i32> {
1904 string dst = !if(HasDst,
1905 !if(!eq(DstVT.Size, 1),
1909 string src0 = "$src0_modifiers";
1910 string src1 = "$src1_modifiers";
1911 string out_mods = !if(!eq(HasOMod, 0), "$clamp", "$clamp$omod");
1912 string args = !if(!eq(NumSrcArgs, 0), "",
1913 !if(!eq(NumSrcArgs, 1),
1918 string sdwa = !if(!eq(NumSrcArgs, 0), "",
1919 !if(!eq(NumSrcArgs, 1),
1920 out_mods#" $dst_sel $dst_unused $src0_sel",
1921 !if(!eq(DstVT.Size, 1),
1922 " $src0_sel $src1_sel", // No dst_sel, dst_unused and output modifiers for VOPC
1923 out_mods#" $dst_sel $dst_unused $src0_sel $src1_sel"
1927 string ret = dst#args#sdwa;
1931 // Function that checks if instruction supports DPP and SDWA
1932 class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
1933 ValueType Src1VT = i32> {
1934 bit ret = !if(!eq(NumSrcArgs, 3),
1935 0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3
1936 !if(!eq(DstVT.Size, 64),
1937 0, // 64-bit dst - No DPP or SDWA for 64-bit operands
1938 !if(!eq(Src0VT.Size, 64),
1940 !if(!eq(Src1VT.Size, 64),
1949 class getHasDPP <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
1950 ValueType Src1VT = i32> {
1951 bit ret = !if(!eq(NumSrcArgs, 0), 0,
1952 getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret);
1955 class BitOr<bit a, bit b> {
1956 bit ret = !if(a, 1, !if(b, 1, 0));
1959 class BitAnd<bit a, bit b> {
1960 bit ret = !if(a, !if(b, 1, 0), 0);
1968 class VOPProfile <list<ValueType> _ArgVT, bit _EnableF32SrcMods = 0,
1969 bit _EnableClamp = 0> {
1971 field list<ValueType> ArgVT = _ArgVT;
1972 field bit EnableF32SrcMods = _EnableF32SrcMods;
1973 field bit EnableClamp = _EnableClamp;
1975 field ValueType DstVT = ArgVT[0];
1976 field ValueType Src0VT = ArgVT[1];
1977 field ValueType Src1VT = ArgVT[2];
1978 field ValueType Src2VT = ArgVT[3];
1979 field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
1980 field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret;
1981 field RegisterOperand DstRCSDWA = getSDWADstForVT<DstVT>.ret;
1982 field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
1983 field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret;
1984 field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
1985 field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
1986 field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
1987 field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret;
1988 field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret;
1989 field RegisterOperand Src0SDWA = getSDWASrcForVT<Src0VT>.ret;
1990 field RegisterOperand Src1SDWA = getSDWASrcForVT<Src0VT>.ret;
1991 field Operand Src0Mod = getSrcMod<Src0VT, EnableF32SrcMods>.ret;
1992 field Operand Src1Mod = getSrcMod<Src1VT, EnableF32SrcMods>.ret;
1993 field Operand Src2Mod = getSrcMod<Src2VT, EnableF32SrcMods>.ret;
1994 field Operand Src0ModDPP = getSrcModExt<Src0VT>.ret;
1995 field Operand Src1ModDPP = getSrcModExt<Src1VT>.ret;
1996 field Operand Src0ModSDWA = getSrcModSDWA<Src0VT>.ret;
1997 field Operand Src1ModSDWA = getSrcModSDWA<Src1VT>.ret;
2000 field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
2001 field bit HasDst32 = HasDst;
2002 field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case
2003 field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
2004 field bit HasSrc0 = !if(!eq(Src0VT.Value, untyped.Value), 0, 1);
2005 field bit HasSrc1 = !if(!eq(Src1VT.Value, untyped.Value), 0, 1);
2006 field bit HasSrc2 = !if(!eq(Src2VT.Value, untyped.Value), 0, 1);
2008 // TODO: Modifiers logic is somewhat adhoc here, to be refined later
2009 // HasModifiers affects the normal and DPP encodings. We take note of EnableF32SrcMods, which
2010 // enables modifiers for i32 type.
2011 field bit HasModifiers = BitOr<isModifierType<Src0VT>.ret, EnableF32SrcMods>.ret;
2013 // HasSrc*FloatMods affects the SDWA encoding. We ignore EnableF32SrcMods.
2014 field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret;
2015 field bit HasSrc1FloatMods = isFloatType<Src1VT>.ret;
2016 field bit HasSrc2FloatMods = isFloatType<Src2VT>.ret;
2018 // HasSrc*IntMods affects the SDWA encoding. We ignore EnableF32SrcMods.
2019 field bit HasSrc0IntMods = isIntType<Src0VT>.ret;
2020 field bit HasSrc1IntMods = isIntType<Src1VT>.ret;
2021 field bit HasSrc2IntMods = isIntType<Src2VT>.ret;
2023 field bit HasSrc0Mods = HasModifiers;
2024 field bit HasSrc1Mods = !if(HasModifiers, BitOr<HasSrc1FloatMods, HasSrc1IntMods>.ret, 0);
2025 field bit HasSrc2Mods = !if(HasModifiers, BitOr<HasSrc2FloatMods, HasSrc2IntMods>.ret, 0);
2027 field bit HasClamp = BitOr<isModifierType<Src0VT>.ret, EnableClamp>.ret;
2028 field bit HasSDWAClamp = EmitDst;
2029 field bit HasFPClamp = BitAnd<isFloatType<DstVT>.ret, HasClamp>.ret;
2030 field bit HasIntClamp = !if(isFloatType<DstVT>.ret, 0, HasClamp);
2031 field bit HasClampLo = HasClamp;
2032 field bit HasClampHi = BitAnd<isPackedType<DstVT>.ret, HasClamp>.ret;
2033 field bit HasHigh = 0;
2035 field bit IsPacked = isPackedType<Src0VT>.ret;
2036 field bit HasOpSel = IsPacked;
2037 field bit HasOMod = !if(HasOpSel, 0, isFloatType<DstVT>.ret);
2038 field bit HasSDWAOMod = isFloatType<DstVT>.ret;
2040 field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
2041 field bit HasExtDPP = getHasDPP<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
2042 field bit HasExtSDWA = HasExt;
2043 field bit HasExtSDWA9 = HasExt;
2044 field int NeedPatGen = PatGenMode.NoPattern;
2046 field bit IsMAI = 0;
2048 field Operand Src0PackedMod = !if(HasSrc0FloatMods, PackedF16InputMods, PackedI16InputMods);
2049 field Operand Src1PackedMod = !if(HasSrc1FloatMods, PackedF16InputMods, PackedI16InputMods);
2050 field Operand Src2PackedMod = !if(HasSrc2FloatMods, PackedF16InputMods, PackedI16InputMods);
2052 field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs));
2054 // VOP3b instructions are a special case with a second explicit
2055 // output. This is manually overridden for them.
2056 field dag Outs32 = Outs;
2057 field dag Outs64 = Outs;
2058 field dag OutsDPP = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
2059 field dag OutsDPP8 = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
2060 field dag OutsSDWA = getOutsSDWA<HasDst, DstVT, DstRCSDWA>.ret;
2062 field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
2063 field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
2064 HasIntClamp, HasModifiers, HasSrc2Mods,
2065 HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret;
2066 field dag InsVOP3P = getInsVOP3P<Src0RC64, Src1RC64, Src2RC64,
2067 NumSrcArgs, HasClamp,
2068 Src0PackedMod, Src1PackedMod, Src2PackedMod>.ret;
2069 field dag InsVOP3OpSel = getInsVOP3OpSel<Src0RC64, Src1RC64, Src2RC64,
2072 getOpSelMod<Src0VT>.ret,
2073 getOpSelMod<Src1VT>.ret,
2074 getOpSelMod<Src2VT>.ret>.ret;
2075 field dag InsDPP = !if(HasExtDPP,
2076 getInsDPP<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs,
2077 HasModifiers, Src0ModDPP, Src1ModDPP>.ret,
2079 field dag InsDPP16 = getInsDPP16<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs,
2080 HasModifiers, Src0ModDPP, Src1ModDPP>.ret;
2081 field dag InsDPP8 = getInsDPP8<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs, 0,
2082 Src0ModDPP, Src1ModDPP>.ret;
2083 field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs,
2084 HasSDWAOMod, Src0ModSDWA, Src1ModSDWA,
2088 field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
2089 field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasIntClamp, HasModifiers, HasOMod, DstVT>.ret;
2090 field string AsmVOP3P = getAsmVOP3P<HasDst, NumSrcArgs, HasModifiers, HasClamp, DstVT>.ret;
2091 field string AsmVOP3OpSel = getAsmVOP3OpSel<NumSrcArgs,
2095 HasSrc2FloatMods>.ret;
2096 field string AsmDPP = !if(HasExtDPP,
2097 getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret, "");
2098 field string AsmDPP16 = getAsmDPP16<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
2099 field string AsmDPP8 = getAsmDPP8<HasDst, NumSrcArgs, 0, DstVT>.ret;
2100 field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, DstVT>.ret;
2101 field string AsmSDWA9 = getAsmSDWA9<HasDst, HasSDWAOMod, NumSrcArgs, DstVT>.ret;
2103 field string TieRegDPP = "$old";
2106 class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> {
2110 let HasExtSDWA9 = 0;
2113 class VOP_PAT_GEN <VOPProfile p, int mode=PatGenMode.Pattern> : VOPProfile <p.ArgVT> {
2114 let NeedPatGen = mode;
2117 def VOP_F16_F16 : VOPProfile <[f16, f16, untyped, untyped]>;
2118 def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>;
2119 def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
2121 def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
2122 def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
2123 def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
2124 def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
2126 def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
2127 def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
2129 def VOP_I32_I16_I16_I32 : VOPProfile <[i32, i16, i16, i32, untyped]>;
2131 def VOP_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, untyped]>;
2132 def VOP_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, untyped]>;
2133 def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>;
2135 def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>;
2136 def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>;
2137 def VOP_V2I16_F32_F32 : VOPProfile <[v2i16, f32, f32, untyped]>;
2138 def VOP_V2I16_I32_I32 : VOPProfile <[v2i16, i32, i32, untyped]>;
2140 def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>;
2142 def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
2144 def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
2145 def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>;
2146 def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>;
2147 def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>;
2148 def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>;
2149 def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>;
2150 def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>;
2151 def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>;
2152 def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>;
2153 def VOP_F16_F32 : VOPProfile <[f16, f32, untyped, untyped]>;
2154 def VOP_F32_F16 : VOPProfile <[f32, f16, untyped, untyped]>;
2156 def VOP_F32_F32_F16 : VOPProfile <[f32, f32, f16, untyped]>;
2157 def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>;
2158 def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
2159 def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
2160 def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
2161 def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
2162 def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
2163 def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
2164 def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], 0, /*EnableClamp=*/1>;
2165 def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
2166 def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
2168 def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
2169 def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
2170 def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
2172 def VOP_F16_F32_F16_F32 : VOPProfile <[f16, f32, f16, f32]>;
2173 def VOP_F32_F32_F16_F16 : VOPProfile <[f32, f32, f16, f16]>;
2174 def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
2175 def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
2176 def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
2177 def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
2178 def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>;
2179 def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>;
2180 def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>;
2182 def VOP_F32_V2F16_V2F16_F32 : VOPProfile <[f32, v2f16, v2f16, f32]>;
2183 def VOP_I32_V2I16_V2I16_I32 : VOPProfile <[i32, v2i16, v2i16, i32]>;
2185 def VOP_V4F32_F32_F32_V4F32 : VOPProfile <[v4f32, f32, f32, v4f32]>;
2186 def VOP_V16F32_F32_F32_V16F32 : VOPProfile <[v16f32, f32, f32, v16f32]>;
2187 def VOP_V32F32_F32_F32_V32F32 : VOPProfile <[v32f32, f32, f32, v32f32]>;
2188 def VOP_V4F32_V4F16_V4F16_V4F32 : VOPProfile <[v4f32, v4f16, v4f16, v4f32]>;
2189 def VOP_V16F32_V4F16_V4F16_V16F32 : VOPProfile <[v16f32, v4f16, v4f16, v16f32]>;
2190 def VOP_V32F32_V4F16_V4F16_V32F32 : VOPProfile <[v32f32, v4f16, v4f16, v32f32]>;
2191 def VOP_V4F32_V2I16_V2I16_V4F32 : VOPProfile <[v4f32, v2i16, v2i16, v4f32]>;
2192 def VOP_V16F32_V2I16_V2I16_V16F32 : VOPProfile <[v16f32, v2i16, v2i16, v16f32]>;
2193 def VOP_V32F32_V2I16_V2I16_V32F32 : VOPProfile <[v32f32, v2i16, v2i16, v32f32]>;
2194 def VOP_V4I32_I32_I32_V4I32 : VOPProfile <[v4i32, i32, i32, v4i32]>;
2195 def VOP_V16I32_I32_I32_V16I32 : VOPProfile <[v16i32, i32, i32, v16i32]>;
2196 def VOP_V32I32_I32_I32_V32I32 : VOPProfile <[v32i32, i32, i32, v32i32]>;
2198 class Commutable_REV <string revOp, bit isOrig> {
2199 string RevOp = revOp;
2200 bit IsOrig = isOrig;
2203 class AtomicNoRet <string noRetOp, bit isRet> {
2204 string NoRetOp = noRetOp;
2208 //===----------------------------------------------------------------------===//
2209 // Interpolation opcodes
2210 //===----------------------------------------------------------------------===//
2212 class VINTRPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVINTRPDst">;
2214 class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
2215 VINTRPCommon <outs, ins, "", pattern>,
2216 SIMCInstr<opName, SIEncodingFamily.NONE> {
2218 let isCodeGenOnly = 1;
2221 // FIXME-GFX10: WIP.
2222 class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
2223 string asm, int encodingFamily> :
2224 VINTRPCommon <outs, ins, asm, []>,
2226 SIMCInstr<opName, encodingFamily> {
2227 let DisableDecoder = DisableSIDecoder;
2230 class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
2232 VINTRPCommon <outs, ins, asm, []>,
2234 SIMCInstr<opName, SIEncodingFamily.VI> {
2235 let AssemblerPredicate = VIAssemblerPredicate;
2236 let DecoderNamespace = "GFX8";
2237 let DisableDecoder = DisableVIDecoder;
2240 // FIXME-GFX10: WIP.
2241 multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
2242 list<dag> pattern = []> {
2243 def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>;
2245 let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in {
2246 def _si : VINTRP_Real_si <op, NAME, outs, ins, asm, SIEncodingFamily.SI>;
2247 } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7"
2249 def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>;
2251 let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in {
2252 def _gfx10 : VINTRP_Real_si<op, NAME, outs, ins, asm, SIEncodingFamily.GFX10>;
2253 } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10"
2255 //===----------------------------------------------------------------------===//
2256 // Vector instruction mappings
2257 //===----------------------------------------------------------------------===//
2259 // Maps an opcode in e32 form to its e64 equivalent
2260 def getVOPe64 : InstrMapping {
2261 let FilterClass = "VOP";
2262 let RowFields = ["OpName"];
2263 let ColFields = ["Size", "VOP3"];
2264 let KeyCol = ["4", "0"];
2265 let ValueCols = [["8", "1"]];
2268 // Maps an opcode in e64 form to its e32 equivalent
2269 def getVOPe32 : InstrMapping {
2270 let FilterClass = "VOP";
2271 let RowFields = ["OpName"];
2272 let ColFields = ["Size", "VOP3"];
2273 let KeyCol = ["8", "1"];
2274 let ValueCols = [["4", "0"]];
2277 // Maps ordinary instructions to their SDWA counterparts
2278 def getSDWAOp : InstrMapping {
2279 let FilterClass = "VOP";
2280 let RowFields = ["OpName"];
2281 let ColFields = ["AsmVariantName"];
2282 let KeyCol = ["Default"];
2283 let ValueCols = [["SDWA"]];
2286 // Maps SDWA instructions to their ordinary counterparts
2287 def getBasicFromSDWAOp : InstrMapping {
2288 let FilterClass = "VOP";
2289 let RowFields = ["OpName"];
2290 let ColFields = ["AsmVariantName"];
2291 let KeyCol = ["SDWA"];
2292 let ValueCols = [["Default"]];
2295 // Maps ordinary instructions to their DPP counterparts
2296 def getDPPOp32 : InstrMapping {
2297 let FilterClass = "VOP";
2298 let RowFields = ["OpName"];
2299 let ColFields = ["AsmVariantName"];
2300 let KeyCol = ["Default"];
2301 let ValueCols = [["DPP"]];
2304 // Maps an commuted opcode to its original version
2305 def getCommuteOrig : InstrMapping {
2306 let FilterClass = "Commutable_REV";
2307 let RowFields = ["RevOp"];
2308 let ColFields = ["IsOrig"];
2310 let ValueCols = [["1"]];
2313 // Maps an original opcode to its commuted version
2314 def getCommuteRev : InstrMapping {
2315 let FilterClass = "Commutable_REV";
2316 let RowFields = ["RevOp"];
2317 let ColFields = ["IsOrig"];
2319 let ValueCols = [["0"]];
2322 def getMCOpcodeGen : InstrMapping {
2323 let FilterClass = "SIMCInstr";
2324 let RowFields = ["PseudoInstr"];
2325 let ColFields = ["Subtarget"];
2326 let KeyCol = [!cast<string>(SIEncodingFamily.NONE)];
2327 let ValueCols = [[!cast<string>(SIEncodingFamily.SI)],
2328 [!cast<string>(SIEncodingFamily.VI)],
2329 [!cast<string>(SIEncodingFamily.SDWA)],
2330 [!cast<string>(SIEncodingFamily.SDWA9)],
2331 // GFX80 encoding is added to work around a multiple matching
2332 // issue for buffer instructions with unpacked d16 data. This
2333 // does not actually change the encoding, and thus may be
2335 [!cast<string>(SIEncodingFamily.GFX80)],
2336 [!cast<string>(SIEncodingFamily.GFX9)],
2337 [!cast<string>(SIEncodingFamily.GFX10)],
2338 [!cast<string>(SIEncodingFamily.SDWA10)]];
2341 // Get equivalent SOPK instruction.
2342 def getSOPKOp : InstrMapping {
2343 let FilterClass = "SOPKInstTable";
2344 let RowFields = ["BaseCmpOp"];
2345 let ColFields = ["IsSOPK"];
2347 let ValueCols = [["1"]];
2350 def getAddr64Inst : InstrMapping {
2351 let FilterClass = "MUBUFAddr64Table";
2352 let RowFields = ["OpName"];
2353 let ColFields = ["IsAddr64"];
2355 let ValueCols = [["1"]];
2358 def getIfAddr64Inst : InstrMapping {
2359 let FilterClass = "MUBUFAddr64Table";
2360 let RowFields = ["OpName"];
2361 let ColFields = ["IsAddr64"];
2363 let ValueCols = [["1"]];
2366 def getMUBUFNoLdsInst : InstrMapping {
2367 let FilterClass = "MUBUFLdsTable";
2368 let RowFields = ["OpName"];
2369 let ColFields = ["IsLds"];
2371 let ValueCols = [["0"]];
2374 // Maps an atomic opcode to its version with a return value.
2375 def getAtomicRetOp : InstrMapping {
2376 let FilterClass = "AtomicNoRet";
2377 let RowFields = ["NoRetOp"];
2378 let ColFields = ["IsRet"];
2380 let ValueCols = [["1"]];
2383 // Maps an atomic opcode to its returnless version.
2384 def getAtomicNoRetOp : InstrMapping {
2385 let FilterClass = "AtomicNoRet";
2386 let RowFields = ["NoRetOp"];
2387 let ColFields = ["IsRet"];
2389 let ValueCols = [["0"]];
2392 // Maps a GLOBAL to its SADDR form.
2393 def getGlobalSaddrOp : InstrMapping {
2394 let FilterClass = "GlobalSaddrTable";
2395 let RowFields = ["SaddrOp"];
2396 let ColFields = ["IsSaddr"];
2398 let ValueCols = [["1"]];
2401 // Maps a v_cmpx opcode with sdst to opcode without sdst.
2402 def getVCMPXNoSDstOp : InstrMapping {
2403 let FilterClass = "VCMPXNoSDstTable";
2404 let RowFields = ["NoSDstOp"];
2405 let ColFields = ["HasSDst"];
2407 let ValueCols = [["0"]];
2410 // Maps a SOPP to a SOPP with S_NOP
2411 def getSOPPWithRelaxation : InstrMapping {
2412 let FilterClass = "Base_SOPP";
2413 let RowFields = ["AsmString"];
2414 let ColFields = ["Size"];
2416 let ValueCols = [["8"]];
2419 include "SIInstructions.td"
2421 include "DSInstructions.td"
2422 include "MIMGInstructions.td"