1 //===-- SIInstrInfo.td - SI Instruction Infos -------------*- tablegen -*--===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 def isWave32 : Predicate<"Subtarget->getWavefrontSize() == 32">,
10 AssemblerPredicate <"FeatureWavefrontSize32">;
11 def isWave64 : Predicate<"Subtarget->getWavefrontSize() == 64">,
12 AssemblerPredicate <"FeatureWavefrontSize64">;
14 def DisableInst : Predicate <"false">, AssemblerPredicate<"FeatureDisable">;
16 class GCNPredicateControl : PredicateControl {
17 Predicate SIAssemblerPredicate = isGFX6GFX7;
18 Predicate VIAssemblerPredicate = isGFX8GFX9;
21 // Execpt for the NONE field, this must be kept in sync with the
22 // SIEncodingFamily enum in AMDGPUInstrInfo.cpp
23 def SIEncodingFamily {
35 //===----------------------------------------------------------------------===//
37 //===----------------------------------------------------------------------===//
39 def AMDGPUclamp : SDNode<"AMDGPUISD::CLAMP", SDTFPUnaryOp>;
41 def SIsbuffer_load : SDNode<"AMDGPUISD::SBUFFER_LOAD",
42 SDTypeProfile<1, 4, [SDTCisVT<1, v4i32>, SDTCisVT<2, i32>, SDTCisVT<3, i1>,
44 [SDNPMayLoad, SDNPMemOperand]
47 def SIds_ordered_count : SDNode<"AMDGPUISD::DS_ORDERED_COUNT",
48 SDTypeProfile<1, 2, [SDTCisVT<0, i32>, SDTCisVT<1, i32>, SDTCisVT<2, i16>]>,
49 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain, SDNPInGlue]
52 def SIatomic_inc : SDNode<"AMDGPUISD::ATOMIC_INC", SDTAtomic2,
53 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
56 def SIatomic_dec : SDNode<"AMDGPUISD::ATOMIC_DEC", SDTAtomic2,
57 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
60 def SDTAtomic2_f32 : SDTypeProfile<1, 2, [
61 SDTCisSameAs<0,2>, SDTCisFP<0>, SDTCisPtrTy<1>
64 def SIatomic_fmin : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMIN", SDTAtomic2_f32,
65 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
68 def SIatomic_fmax : SDNode<"AMDGPUISD::ATOMIC_LOAD_FMAX", SDTAtomic2_f32,
69 [SDNPMayLoad, SDNPMayStore, SDNPMemOperand, SDNPHasChain]
72 // load_d16_{lo|hi} ptr, tied_input
73 def SIload_d16 : SDTypeProfile<1, 2, [
79 def SDTtbuffer_load : SDTypeProfile<1, 8,
81 SDTCisVT<1, v4i32>, // rsrc
82 SDTCisVT<2, i32>, // vindex(VGPR)
83 SDTCisVT<3, i32>, // voffset(VGPR)
84 SDTCisVT<4, i32>, // soffset(SGPR)
85 SDTCisVT<5, i32>, // offset(imm)
86 SDTCisVT<6, i32>, // format(imm)
87 SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm)
88 SDTCisVT<8, i1> // idxen(imm)
91 def SItbuffer_load : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT", SDTtbuffer_load,
92 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
93 def SItbuffer_load_d16 : SDNode<"AMDGPUISD::TBUFFER_LOAD_FORMAT_D16",
95 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]>;
97 def SDTtbuffer_store : SDTypeProfile<0, 9,
99 SDTCisVT<1, v4i32>, // rsrc
100 SDTCisVT<2, i32>, // vindex(VGPR)
101 SDTCisVT<3, i32>, // voffset(VGPR)
102 SDTCisVT<4, i32>, // soffset(SGPR)
103 SDTCisVT<5, i32>, // offset(imm)
104 SDTCisVT<6, i32>, // format(imm)
105 SDTCisVT<7, i32>, // cachepolicy, swizzled buffer(imm)
106 SDTCisVT<8, i1> // idxen(imm)
109 def SItbuffer_store : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT", SDTtbuffer_store,
110 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
111 def SItbuffer_store_d16 : SDNode<"AMDGPUISD::TBUFFER_STORE_FORMAT_D16",
113 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
115 def SDTBufferLoad : SDTypeProfile<1, 7,
117 SDTCisVT<1, v4i32>, // rsrc
118 SDTCisVT<2, i32>, // vindex(VGPR)
119 SDTCisVT<3, i32>, // voffset(VGPR)
120 SDTCisVT<4, i32>, // soffset(SGPR)
121 SDTCisVT<5, i32>, // offset(imm)
122 SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm)
123 SDTCisVT<7, i1>]>; // idxen(imm)
125 def SIbuffer_load : SDNode <"AMDGPUISD::BUFFER_LOAD", SDTBufferLoad,
126 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
127 def SIbuffer_load_ubyte : SDNode <"AMDGPUISD::BUFFER_LOAD_UBYTE", SDTBufferLoad,
128 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
129 def SIbuffer_load_ushort : SDNode <"AMDGPUISD::BUFFER_LOAD_USHORT", SDTBufferLoad,
130 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
131 def SIbuffer_load_byte : SDNode <"AMDGPUISD::BUFFER_LOAD_BYTE", SDTBufferLoad,
132 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
133 def SIbuffer_load_short: SDNode <"AMDGPUISD::BUFFER_LOAD_SHORT", SDTBufferLoad,
134 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
135 def SIbuffer_load_format : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT", SDTBufferLoad,
136 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
137 def SIbuffer_load_format_d16 : SDNode <"AMDGPUISD::BUFFER_LOAD_FORMAT_D16",
139 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad]>;
141 def SDTBufferStore : SDTypeProfile<0, 8,
143 SDTCisVT<1, v4i32>, // rsrc
144 SDTCisVT<2, i32>, // vindex(VGPR)
145 SDTCisVT<3, i32>, // voffset(VGPR)
146 SDTCisVT<4, i32>, // soffset(SGPR)
147 SDTCisVT<5, i32>, // offset(imm)
148 SDTCisVT<6, i32>, // cachepolicy, swizzled buffer(imm)
149 SDTCisVT<7, i1>]>; // idxen(imm)
151 def SIbuffer_store : SDNode <"AMDGPUISD::BUFFER_STORE", SDTBufferStore,
152 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
153 def SIbuffer_store_byte: SDNode <"AMDGPUISD::BUFFER_STORE_BYTE",
155 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
156 def SIbuffer_store_short : SDNode <"AMDGPUISD::BUFFER_STORE_SHORT",
158 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
159 def SIbuffer_store_format : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT",
161 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
162 def SIbuffer_store_format_d16 : SDNode <"AMDGPUISD::BUFFER_STORE_FORMAT_D16",
164 [SDNPMayStore, SDNPMemOperand, SDNPHasChain]>;
166 class SDBufferAtomic<string opcode> : SDNode <opcode,
168 [SDTCisVT<2, v4i32>, // rsrc
169 SDTCisVT<3, i32>, // vindex(VGPR)
170 SDTCisVT<4, i32>, // voffset(VGPR)
171 SDTCisVT<5, i32>, // soffset(SGPR)
172 SDTCisVT<6, i32>, // offset(imm)
173 SDTCisVT<7, i32>, // cachepolicy(imm)
174 SDTCisVT<8, i1>]>, // idxen(imm)
175 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
178 class SDBufferAtomicNoRtn<string opcode, ValueType ty> : SDNode <opcode,
180 [SDTCisVT<0, ty>, // vdata
181 SDTCisVT<1, v4i32>, // rsrc
182 SDTCisVT<2, i32>, // vindex(VGPR)
183 SDTCisVT<3, i32>, // voffset(VGPR)
184 SDTCisVT<4, i32>, // soffset(SGPR)
185 SDTCisVT<5, i32>, // offset(imm)
186 SDTCisVT<6, i32>, // cachepolicy(imm)
187 SDTCisVT<7, i1>]>, // idxen(imm)
188 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
191 def SIbuffer_atomic_swap : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SWAP">;
192 def SIbuffer_atomic_add : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_ADD">;
193 def SIbuffer_atomic_sub : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SUB">;
194 def SIbuffer_atomic_smin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMIN">;
195 def SIbuffer_atomic_umin : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMIN">;
196 def SIbuffer_atomic_smax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_SMAX">;
197 def SIbuffer_atomic_umax : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_UMAX">;
198 def SIbuffer_atomic_and : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_AND">;
199 def SIbuffer_atomic_or : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_OR">;
200 def SIbuffer_atomic_xor : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_XOR">;
201 def SIbuffer_atomic_inc : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_INC">;
202 def SIbuffer_atomic_dec : SDBufferAtomic <"AMDGPUISD::BUFFER_ATOMIC_DEC">;
203 def SIbuffer_atomic_fadd : SDBufferAtomicNoRtn <"AMDGPUISD::BUFFER_ATOMIC_FADD", f32>;
204 def SIbuffer_atomic_pk_fadd : SDBufferAtomicNoRtn <"AMDGPUISD::BUFFER_ATOMIC_PK_FADD", v2f16>;
206 def SIbuffer_atomic_cmpswap : SDNode <"AMDGPUISD::BUFFER_ATOMIC_CMPSWAP",
208 [SDTCisVT<0, i32>, // dst
209 SDTCisVT<1, i32>, // src
210 SDTCisVT<2, i32>, // cmp
211 SDTCisVT<3, v4i32>, // rsrc
212 SDTCisVT<4, i32>, // vindex(VGPR)
213 SDTCisVT<5, i32>, // voffset(VGPR)
214 SDTCisVT<6, i32>, // soffset(SGPR)
215 SDTCisVT<7, i32>, // offset(imm)
216 SDTCisVT<8, i32>, // cachepolicy(imm)
217 SDTCisVT<9, i1>]>, // idxen(imm)
218 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
221 class SDGlobalAtomicNoRtn<string opcode, ValueType ty> : SDNode <opcode,
223 [SDTCisPtrTy<0>, // vaddr
224 SDTCisVT<1, ty>]>, // vdata
225 [SDNPMemOperand, SDNPHasChain, SDNPMayLoad, SDNPMayStore]
228 def SIglobal_atomic_fadd : SDGlobalAtomicNoRtn <"AMDGPUISD::ATOMIC_FADD", f32>;
229 def SIglobal_atomic_pk_fadd : SDGlobalAtomicNoRtn <"AMDGPUISD::ATOMIC_PK_FADD", v2f16>;
231 def SIpc_add_rel_offset : SDNode<"AMDGPUISD::PC_ADD_REL_OFFSET",
232 SDTypeProfile<1, 2, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>, SDTCisSameAs<0,2>]>
235 def SIlds : SDNode<"AMDGPUISD::LDS",
236 SDTypeProfile<1, 1, [SDTCisVT<0, iPTR>, SDTCisSameAs<0,1>]>
239 def SIload_d16_lo : SDNode<"AMDGPUISD::LOAD_D16_LO",
241 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
244 def SIload_d16_lo_u8 : SDNode<"AMDGPUISD::LOAD_D16_LO_U8",
246 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
249 def SIload_d16_lo_i8 : SDNode<"AMDGPUISD::LOAD_D16_LO_I8",
251 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
254 def SIload_d16_hi : SDNode<"AMDGPUISD::LOAD_D16_HI",
256 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
259 def SIload_d16_hi_u8 : SDNode<"AMDGPUISD::LOAD_D16_HI_U8",
261 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
264 def SIload_d16_hi_i8 : SDNode<"AMDGPUISD::LOAD_D16_HI_I8",
266 [SDNPMayLoad, SDNPMemOperand, SDNPHasChain]
269 def SIdenorm_mode : SDNode<"AMDGPUISD::DENORM_MODE",
270 SDTypeProfile<0 ,1, [SDTCisInt<0>]>,
271 [SDNPHasChain, SDNPSideEffect, SDNPOptInGlue, SDNPOutGlue]
274 //===----------------------------------------------------------------------===//
276 //===----------------------------------------------------------------------===//
278 // Returns 1 if the source arguments have modifiers, 0 if they do not.
279 // XXX - do f16 instructions?
280 class isFloatType<ValueType SrcVT> {
282 !if(!eq(SrcVT.Value, f16.Value), 1,
283 !if(!eq(SrcVT.Value, f32.Value), 1,
284 !if(!eq(SrcVT.Value, f64.Value), 1,
285 !if(!eq(SrcVT.Value, v2f16.Value), 1,
286 !if(!eq(SrcVT.Value, v4f16.Value), 1,
290 class isIntType<ValueType SrcVT> {
292 !if(!eq(SrcVT.Value, i16.Value), 1,
293 !if(!eq(SrcVT.Value, i32.Value), 1,
294 !if(!eq(SrcVT.Value, i64.Value), 1,
298 class isPackedType<ValueType SrcVT> {
300 !if(!eq(SrcVT.Value, v2i16.Value), 1,
301 !if(!eq(SrcVT.Value, v2f16.Value), 1,
302 !if(!eq(SrcVT.Value, v4f16.Value), 1, 0)
306 //===----------------------------------------------------------------------===//
307 // PatFrags for global memory operations
308 //===----------------------------------------------------------------------===//
310 foreach as = [ "global", "flat", "constant", "local", "private", "region" ] in {
311 let AddressSpaces = !cast<AddressSpaceList>("LoadAddress_"#as).AddrSpaces in {
314 defm atomic_inc_#as : binary_atomic_op<SIatomic_inc>;
315 defm atomic_dec_#as : binary_atomic_op<SIatomic_dec>;
316 defm atomic_load_fmin_#as : binary_atomic_op<SIatomic_fmin, 0>;
317 defm atomic_load_fmax_#as : binary_atomic_op<SIatomic_fmax, 0>;
320 } // End let AddressSpaces = ...
321 } // End foreach AddrSpace
323 def atomic_fadd_global_noret : PatFrag<
324 (ops node:$ptr, node:$value),
325 (SIglobal_atomic_fadd node:$ptr, node:$value)> {
329 let AddressSpaces = StoreAddress_global.AddrSpaces;
332 def atomic_pk_fadd_global_noret : PatFrag<
333 (ops node:$ptr, node:$value),
334 (SIglobal_atomic_pk_fadd node:$ptr, node:$value)> {
336 let MemoryVT = v2f16;
338 let AddressSpaces = StoreAddress_global.AddrSpaces;
341 //===----------------------------------------------------------------------===//
342 // SDNodes PatFrags for loads/stores with a glue input.
343 // This is for SDNodes and PatFrag for local loads and stores to
344 // enable s_mov_b32 m0, -1 to be glued to the memory instructions.
346 // These mirror the regular load/store PatFrags and rely on special
347 // processing during Select() to add the glued copy.
349 //===----------------------------------------------------------------------===//
351 def AMDGPUld_glue : SDNode <"ISD::LOAD", SDTLoad,
352 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
355 def AMDGPUatomic_ld_glue : SDNode <"ISD::ATOMIC_LOAD", SDTAtomicLoad,
356 [SDNPHasChain, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
359 def unindexedload_glue : PatFrag <(ops node:$ptr), (AMDGPUld_glue node:$ptr)> {
364 def load_glue : PatFrag <(ops node:$ptr), (unindexedload_glue node:$ptr)> {
366 let IsNonExtLoad = 1;
369 def atomic_load_32_glue : PatFrag<(ops node:$ptr),
370 (AMDGPUatomic_ld_glue node:$ptr)> {
375 def atomic_load_64_glue : PatFrag<(ops node:$ptr),
376 (AMDGPUatomic_ld_glue node:$ptr)> {
381 def extload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
383 let IsAnyExtLoad = 1;
386 def sextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
388 let IsSignExtLoad = 1;
391 def zextload_glue : PatFrag<(ops node:$ptr), (unindexedload_glue node:$ptr)> {
393 let IsZeroExtLoad = 1;
396 def extloadi8_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> {
401 def zextloadi8_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> {
406 def extloadi16_glue : PatFrag<(ops node:$ptr), (extload_glue node:$ptr)> {
411 def zextloadi16_glue : PatFrag<(ops node:$ptr), (zextload_glue node:$ptr)> {
416 def sextloadi8_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> {
421 def sextloadi16_glue : PatFrag<(ops node:$ptr), (sextload_glue node:$ptr)> {
427 let IsLoad = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
428 def load_local_m0 : PatFrag<(ops node:$ptr), (load_glue node:$ptr)> {
429 let IsNonExtLoad = 1;
432 let MemoryVT = i8 in {
433 def extloadi8_local_m0 : PatFrag<(ops node:$ptr), (extloadi8_glue node:$ptr)>;
434 def sextloadi8_local_m0 : PatFrag<(ops node:$ptr), (sextloadi8_glue node:$ptr)>;
435 def zextloadi8_local_m0 : PatFrag<(ops node:$ptr), (zextloadi8_glue node:$ptr)>;
438 let MemoryVT = i16 in {
439 def extloadi16_local_m0 : PatFrag<(ops node:$ptr), (extloadi16_glue node:$ptr)>;
440 def sextloadi16_local_m0 : PatFrag<(ops node:$ptr), (sextloadi16_glue node:$ptr)>;
441 def zextloadi16_local_m0 : PatFrag<(ops node:$ptr), (zextloadi16_glue node:$ptr)>;
444 def load_align8_local_m0 : PatFrag<(ops node:$ptr),
445 (load_local_m0 node:$ptr)> {
447 let IsNonExtLoad = 1;
448 let MinAlignment = 8;
450 def load_align16_local_m0 : PatFrag<(ops node:$ptr),
451 (load_local_m0 node:$ptr)> {
453 let IsNonExtLoad = 1;
454 let MinAlignment = 16;
459 let IsAtomic = 1, AddressSpaces = LoadAddress_local.AddrSpaces in {
460 def atomic_load_32_local_m0 : PatFrag<(ops node:$ptr),
461 (atomic_load_32_glue node:$ptr)> {
464 def atomic_load_64_local_m0 : PatFrag<(ops node:$ptr),
465 (atomic_load_64_glue node:$ptr)> {
469 } // End let AddressSpaces = LoadAddress_local.AddrSpaces
472 def AMDGPUst_glue : SDNode <"ISD::STORE", SDTStore,
473 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
476 def AMDGPUatomic_st_glue : SDNode <"ISD::ATOMIC_STORE", SDTAtomicStore,
477 [SDNPHasChain, SDNPMayStore, SDNPMemOperand, SDNPInGlue]
480 def unindexedstore_glue : PatFrag<(ops node:$val, node:$ptr),
481 (AMDGPUst_glue node:$val, node:$ptr)> {
486 def store_glue : PatFrag<(ops node:$val, node:$ptr),
487 (unindexedstore_glue node:$val, node:$ptr)> {
489 let IsTruncStore = 0;
492 def truncstore_glue : PatFrag<(ops node:$val, node:$ptr),
493 (unindexedstore_glue node:$val, node:$ptr)> {
495 let IsTruncStore = 1;
498 def truncstorei8_glue : PatFrag<(ops node:$val, node:$ptr),
499 (truncstore_glue node:$val, node:$ptr)> {
504 def truncstorei16_glue : PatFrag<(ops node:$val, node:$ptr),
505 (truncstore_glue node:$val, node:$ptr)> {
510 let IsStore = 1, AddressSpaces = StoreAddress_local.AddrSpaces in {
511 def store_local_m0 : PatFrag<(ops node:$val, node:$ptr),
512 (store_glue node:$val, node:$ptr)> {
514 let IsTruncStore = 0;
517 def truncstorei8_local_m0 : PatFrag<(ops node:$val, node:$ptr),
518 (unindexedstore_glue node:$val, node:$ptr)> {
523 def truncstorei16_local_m0 : PatFrag<(ops node:$val, node:$ptr),
524 (unindexedstore_glue node:$val, node:$ptr)> {
530 def store_align16_local_m0 : PatFrag <
531 (ops node:$value, node:$ptr),
532 (store_local_m0 node:$value, node:$ptr)> {
534 let IsTruncStore = 0;
535 let MinAlignment = 16;
538 def store_align8_local_m0 : PatFrag <
539 (ops node:$value, node:$ptr),
540 (store_local_m0 node:$value, node:$ptr)> {
542 let IsTruncStore = 0;
543 let MinAlignment = 8;
546 let AddressSpaces = StoreAddress_local.AddrSpaces in {
548 def atomic_store_local_32_m0 : PatFrag <
549 (ops node:$value, node:$ptr),
550 (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
554 def atomic_store_local_64_m0 : PatFrag <
555 (ops node:$value, node:$ptr),
556 (AMDGPUatomic_st_glue node:$value, node:$ptr)> {
560 } // End let AddressSpaces = StoreAddress_local.AddrSpaces
563 def si_setcc_uniform : PatFrag <
564 (ops node:$lhs, node:$rhs, node:$cond),
565 (setcc node:$lhs, node:$rhs, node:$cond), [{
566 for (SDNode *Use : N->uses()) {
567 if (Use->isMachineOpcode() || Use->getOpcode() != ISD::CopyToReg)
570 unsigned Reg = cast<RegisterSDNode>(Use->getOperand(1))->getReg();
571 if (Reg != AMDGPU::SCC)
577 //===----------------------------------------------------------------------===//
578 // SDNodes PatFrags for d16 loads
579 //===----------------------------------------------------------------------===//
581 class LoadD16Frag <SDPatternOperator op> : PatFrag<(ops node:$ptr, node:$tied_in), (op node:$ptr, node:$tied_in)>;
582 class LocalLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, LocalAddress;
583 class GlobalLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, GlobalLoadAddress;
584 class PrivateLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, PrivateAddress;
585 class FlatLoadD16 <SDPatternOperator op> : LoadD16Frag <op>, FlatLoadAddress;
587 def load_d16_hi_local : LocalLoadD16 <SIload_d16_hi>;
588 def az_extloadi8_d16_hi_local : LocalLoadD16 <SIload_d16_hi_u8>;
589 def sextloadi8_d16_hi_local : LocalLoadD16 <SIload_d16_hi_i8>;
591 def load_d16_hi_global : GlobalLoadD16 <SIload_d16_hi>;
592 def az_extloadi8_d16_hi_global : GlobalLoadD16 <SIload_d16_hi_u8>;
593 def sextloadi8_d16_hi_global : GlobalLoadD16 <SIload_d16_hi_i8>;
595 def load_d16_hi_private : PrivateLoadD16 <SIload_d16_hi>;
596 def az_extloadi8_d16_hi_private : PrivateLoadD16 <SIload_d16_hi_u8>;
597 def sextloadi8_d16_hi_private : PrivateLoadD16 <SIload_d16_hi_i8>;
599 def load_d16_hi_flat : FlatLoadD16 <SIload_d16_hi>;
600 def az_extloadi8_d16_hi_flat : FlatLoadD16 <SIload_d16_hi_u8>;
601 def sextloadi8_d16_hi_flat : FlatLoadD16 <SIload_d16_hi_i8>;
604 def load_d16_lo_local : LocalLoadD16 <SIload_d16_lo>;
605 def az_extloadi8_d16_lo_local : LocalLoadD16 <SIload_d16_lo_u8>;
606 def sextloadi8_d16_lo_local : LocalLoadD16 <SIload_d16_lo_i8>;
608 def load_d16_lo_global : GlobalLoadD16 <SIload_d16_lo>;
609 def az_extloadi8_d16_lo_global : GlobalLoadD16 <SIload_d16_lo_u8>;
610 def sextloadi8_d16_lo_global : GlobalLoadD16 <SIload_d16_lo_i8>;
612 def load_d16_lo_private : PrivateLoadD16 <SIload_d16_lo>;
613 def az_extloadi8_d16_lo_private : PrivateLoadD16 <SIload_d16_lo_u8>;
614 def sextloadi8_d16_lo_private : PrivateLoadD16 <SIload_d16_lo_i8>;
616 def load_d16_lo_flat : FlatLoadD16 <SIload_d16_lo>;
617 def az_extloadi8_d16_lo_flat : FlatLoadD16 <SIload_d16_lo_u8>;
618 def sextloadi8_d16_lo_flat : FlatLoadD16 <SIload_d16_lo_i8>;
622 def lshr_rev : PatFrag <
623 (ops node:$src1, node:$src0),
627 def ashr_rev : PatFrag <
628 (ops node:$src1, node:$src0),
632 def lshl_rev : PatFrag <
633 (ops node:$src1, node:$src0),
637 def add_ctpop : PatFrag <
638 (ops node:$src0, node:$src1),
639 (add (ctpop $src0), $src1)
642 multiclass SIAtomicM0Glue2 <string op_name, bit is_amdgpu = 0,
643 SDTypeProfile tc = SDTAtomic2,
647 !if(is_amdgpu, "AMDGPUISD", "ISD")#"::ATOMIC_"#op_name, tc,
648 [SDNPHasChain, SDNPMayStore, SDNPMayLoad, SDNPMemOperand, SDNPInGlue]
651 let AddressSpaces = StoreAddress_local.AddrSpaces in {
652 defm _local_m0 : binary_atomic_op <!cast<SDNode>(NAME#"_glue"), IsInt>;
655 let AddressSpaces = StoreAddress_region.AddrSpaces in {
656 defm _region_m0 : binary_atomic_op <!cast<SDNode>(NAME#"_glue"), IsInt>;
660 defm atomic_load_add : SIAtomicM0Glue2 <"LOAD_ADD">;
661 defm atomic_load_sub : SIAtomicM0Glue2 <"LOAD_SUB">;
662 defm atomic_inc : SIAtomicM0Glue2 <"INC", 1>;
663 defm atomic_dec : SIAtomicM0Glue2 <"DEC", 1>;
664 defm atomic_load_and : SIAtomicM0Glue2 <"LOAD_AND">;
665 defm atomic_load_min : SIAtomicM0Glue2 <"LOAD_MIN">;
666 defm atomic_load_max : SIAtomicM0Glue2 <"LOAD_MAX">;
667 defm atomic_load_or : SIAtomicM0Glue2 <"LOAD_OR">;
668 defm atomic_load_xor : SIAtomicM0Glue2 <"LOAD_XOR">;
669 defm atomic_load_umin : SIAtomicM0Glue2 <"LOAD_UMIN">;
670 defm atomic_load_umax : SIAtomicM0Glue2 <"LOAD_UMAX">;
671 defm atomic_swap : SIAtomicM0Glue2 <"SWAP">;
672 defm atomic_load_fadd : SIAtomicM0Glue2 <"LOAD_FADD", 0, SDTAtomic2_f32, 0>;
673 defm atomic_load_fmin : SIAtomicM0Glue2 <"LOAD_FMIN", 1, SDTAtomic2_f32, 0>;
674 defm atomic_load_fmax : SIAtomicM0Glue2 <"LOAD_FMAX", 1, SDTAtomic2_f32, 0>;
676 def as_i1imm : SDNodeXForm<imm, [{
677 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i1);
680 def as_i8imm : SDNodeXForm<imm, [{
681 return CurDAG->getTargetConstant(N->getZExtValue(), SDLoc(N), MVT::i8);
684 def as_i16imm : SDNodeXForm<imm, [{
685 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i16);
688 def as_i32imm: SDNodeXForm<imm, [{
689 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
692 def as_i32timm: SDNodeXForm<timm, [{
693 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i32);
696 def as_i64imm: SDNodeXForm<imm, [{
697 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64);
700 def cond_as_i32imm: SDNodeXForm<cond, [{
701 return CurDAG->getTargetConstant(N->get(), SDLoc(N), MVT::i32);
704 // Copied from the AArch64 backend:
705 def bitcast_fpimm_to_i32 : SDNodeXForm<fpimm, [{
706 return CurDAG->getTargetConstant(
707 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i32);
710 def frameindex_to_targetframeindex : SDNodeXForm<frameindex, [{
711 auto FI = cast<FrameIndexSDNode>(N);
712 return CurDAG->getTargetFrameIndex(FI->getIndex(), MVT::i32);
715 // Copied from the AArch64 backend:
716 def bitcast_fpimm_to_i64 : SDNodeXForm<fpimm, [{
717 return CurDAG->getTargetConstant(
718 N->getValueAPF().bitcastToAPInt().getZExtValue(), SDLoc(N), MVT::i64);
721 class bitextract_imm<int bitnum> : SDNodeXForm<imm, [{
722 uint64_t Imm = N->getZExtValue();
723 unsigned Bit = (Imm >> }] # bitnum # [{ ) & 1;
724 return CurDAG->getTargetConstant(Bit, SDLoc(N), MVT::i1);
727 def SIMM16bit : ImmLeaf <i32,
728 [{return isInt<16>(Imm);}]
731 def UIMM16bit : ImmLeaf <i32,
732 [{return isUInt<16>(Imm);}]
735 def i64imm_32bit : ImmLeaf<i64, [{
736 return (Imm & 0xffffffffULL) == static_cast<uint64_t>(Imm);
739 class InlineImm <ValueType vt> : PatLeaf <(vt imm), [{
740 return isInlineImmediate(N);
743 class InlineFPImm <ValueType vt> : PatLeaf <(vt fpimm), [{
744 return isInlineImmediate(N);
747 class VGPRImm <dag frag> : PatLeaf<frag, [{
751 def NegateImm : SDNodeXForm<imm, [{
752 return CurDAG->getConstant(-N->getSExtValue(), SDLoc(N), MVT::i32);
755 // TODO: When FP inline imm values work?
756 def NegSubInlineConst32 : ImmLeaf<i32, [{
757 return Imm < -16 && Imm >= -64;
760 def NegSubInlineConst16 : ImmLeaf<i16, [{
761 return Imm < -16 && Imm >= -64;
764 def ShiftAmt32Imm : PatLeaf <(imm), [{
765 return N->getZExtValue() < 32;
768 def getNegV2I16Imm : SDNodeXForm<build_vector, [{
769 return SDValue(packNegConstantV2I16(N, *CurDAG), 0);
772 def NegSubInlineConstV216 : PatLeaf<(build_vector), [{
773 assert(N->getNumOperands() == 2);
774 assert(N->getOperand(0).getValueType().getSizeInBits() == 16);
775 SDValue Src0 = N->getOperand(0);
776 SDValue Src1 = N->getOperand(1);
778 return isNegInlineImmediate(Src0.getNode());
780 return (isNullConstantOrUndef(Src0) && isNegInlineImmediate(Src1.getNode())) ||
781 (isNullConstantOrUndef(Src1) && isNegInlineImmediate(Src0.getNode()));
784 //===----------------------------------------------------------------------===//
786 //===----------------------------------------------------------------------===//
788 def SoppBrTarget : AsmOperandClass {
789 let Name = "SoppBrTarget";
790 let ParserMethod = "parseSOppBrTarget";
793 def sopp_brtarget : Operand<OtherVT> {
794 let EncoderMethod = "getSOPPBrEncoding";
795 let DecoderMethod = "decodeSoppBrTarget";
796 let OperandType = "OPERAND_PCREL";
797 let ParserMatchClass = SoppBrTarget;
800 def si_ga : Operand<iPTR>;
802 def InterpSlotMatchClass : AsmOperandClass {
803 let Name = "InterpSlot";
804 let PredicateMethod = "isInterpSlot";
805 let ParserMethod = "parseInterpSlot";
806 let RenderMethod = "addImmOperands";
809 def InterpSlot : Operand<i32> {
810 let PrintMethod = "printInterpSlot";
811 let ParserMatchClass = InterpSlotMatchClass;
812 let OperandType = "OPERAND_IMMEDIATE";
815 def AttrMatchClass : AsmOperandClass {
817 let PredicateMethod = "isInterpAttr";
818 let ParserMethod = "parseInterpAttr";
819 let RenderMethod = "addImmOperands";
822 // It appears to be necessary to create a separate operand for this to
823 // be able to parse attr<num> with no space.
824 def Attr : Operand<i32> {
825 let PrintMethod = "printInterpAttr";
826 let ParserMatchClass = AttrMatchClass;
827 let OperandType = "OPERAND_IMMEDIATE";
830 def AttrChanMatchClass : AsmOperandClass {
831 let Name = "AttrChan";
832 let PredicateMethod = "isAttrChan";
833 let RenderMethod = "addImmOperands";
836 def AttrChan : Operand<i32> {
837 let PrintMethod = "printInterpAttrChan";
838 let ParserMatchClass = AttrChanMatchClass;
839 let OperandType = "OPERAND_IMMEDIATE";
842 def SendMsgMatchClass : AsmOperandClass {
843 let Name = "SendMsg";
844 let PredicateMethod = "isSendMsg";
845 let ParserMethod = "parseSendMsgOp";
846 let RenderMethod = "addImmOperands";
849 def SwizzleMatchClass : AsmOperandClass {
850 let Name = "Swizzle";
851 let PredicateMethod = "isSwizzle";
852 let ParserMethod = "parseSwizzleOp";
853 let RenderMethod = "addImmOperands";
857 def EndpgmMatchClass : AsmOperandClass {
858 let Name = "EndpgmImm";
859 let PredicateMethod = "isEndpgm";
860 let ParserMethod = "parseEndpgmOp";
861 let RenderMethod = "addImmOperands";
865 def ExpTgtMatchClass : AsmOperandClass {
867 let PredicateMethod = "isExpTgt";
868 let ParserMethod = "parseExpTgt";
869 let RenderMethod = "printExpTgt";
872 def SendMsgImm : Operand<i32> {
873 let PrintMethod = "printSendMsg";
874 let ParserMatchClass = SendMsgMatchClass;
877 def SwizzleImm : Operand<i16> {
878 let PrintMethod = "printSwizzle";
879 let ParserMatchClass = SwizzleMatchClass;
882 def EndpgmImm : Operand<i16> {
883 let PrintMethod = "printEndpgm";
884 let ParserMatchClass = EndpgmMatchClass;
887 def SWaitMatchClass : AsmOperandClass {
888 let Name = "SWaitCnt";
889 let RenderMethod = "addImmOperands";
890 let ParserMethod = "parseSWaitCntOps";
893 def VReg32OrOffClass : AsmOperandClass {
894 let Name = "VReg32OrOff";
895 let ParserMethod = "parseVReg32OrOff";
898 def WAIT_FLAG : Operand <i32> {
899 let ParserMatchClass = SWaitMatchClass;
900 let PrintMethod = "printWaitFlag";
901 let OperandType = "OPERAND_IMMEDIATE";
904 include "SIInstrFormats.td"
905 include "VIInstrFormats.td"
907 def BoolReg : AsmOperandClass {
908 let Name = "BoolReg";
909 let ParserMethod = "parseBoolReg";
910 let RenderMethod = "addRegOperands";
913 class BoolRC : RegisterOperand<SReg_1> {
914 let ParserMatchClass = BoolReg;
915 let DecoderMethod = "decodeBoolReg";
918 def SSrc_i1 : RegisterOperand<SReg_1_XEXEC> {
919 let ParserMatchClass = BoolReg;
920 let DecoderMethod = "decodeBoolReg";
923 def VOPDstS64orS32 : BoolRC {
924 let PrintMethod = "printVOPDst";
927 // SCSrc_i1 is the operand for pseudo instructions only.
928 // Boolean immeadiates shall not be exposed to codegen instructions.
929 def SCSrc_i1 : RegisterOperand<SReg_1_XEXEC> {
930 let OperandNamespace = "AMDGPU";
931 let OperandType = "OPERAND_REG_IMM_INT32";
932 let ParserMatchClass = BoolReg;
933 let DecoderMethod = "decodeBoolReg";
936 // ===----------------------------------------------------------------------===//
937 // ExpSrc* Special cases for exp src operands which are printed as
938 // "off" depending on en operand.
939 // ===----------------------------------------------------------------------===//
941 def ExpSrc0 : RegisterOperand<VGPR_32> {
942 let PrintMethod = "printExpSrc0";
943 let ParserMatchClass = VReg32OrOffClass;
946 def ExpSrc1 : RegisterOperand<VGPR_32> {
947 let PrintMethod = "printExpSrc1";
948 let ParserMatchClass = VReg32OrOffClass;
951 def ExpSrc2 : RegisterOperand<VGPR_32> {
952 let PrintMethod = "printExpSrc2";
953 let ParserMatchClass = VReg32OrOffClass;
956 def ExpSrc3 : RegisterOperand<VGPR_32> {
957 let PrintMethod = "printExpSrc3";
958 let ParserMatchClass = VReg32OrOffClass;
961 class SDWASrc<ValueType vt> : RegisterOperand<VS_32> {
962 let OperandNamespace = "AMDGPU";
963 string Type = !if(isFloatType<vt>.ret, "FP", "INT");
964 let OperandType = "OPERAND_REG_INLINE_C_"#Type#vt.Size;
965 let DecoderMethod = "decodeSDWASrc"#vt.Size;
966 let EncoderMethod = "getSDWASrcEncoding";
969 def SDWASrc_i32 : SDWASrc<i32>;
970 def SDWASrc_i16 : SDWASrc<i16>;
971 def SDWASrc_f32 : SDWASrc<f32>;
972 def SDWASrc_f16 : SDWASrc<f16>;
974 def SDWAVopcDst : BoolRC {
975 let OperandNamespace = "AMDGPU";
976 let OperandType = "OPERAND_SDWA_VOPC_DST";
977 let EncoderMethod = "getSDWAVopcDstEncoding";
978 let DecoderMethod = "decodeSDWAVopcDst";
979 let PrintMethod = "printVOPDst";
982 class NamedMatchClass<string CName, bit Optional = 1> : AsmOperandClass {
983 let Name = "Imm"#CName;
984 let PredicateMethod = "is"#CName;
985 let ParserMethod = !if(Optional, "parseOptionalOperand", "parse"#CName);
986 let RenderMethod = "addImmOperands";
987 let IsOptional = Optional;
988 let DefaultMethod = !if(Optional, "default"#CName, ?);
991 class NamedOperandBit<string Name, AsmOperandClass MatchClass> : Operand<i1> {
992 let PrintMethod = "print"#Name;
993 let ParserMatchClass = MatchClass;
996 class NamedOperandU8<string Name, AsmOperandClass MatchClass> : Operand<i8> {
997 let PrintMethod = "print"#Name;
998 let ParserMatchClass = MatchClass;
1001 class NamedOperandU16<string Name, AsmOperandClass MatchClass> : Operand<i16> {
1002 let PrintMethod = "print"#Name;
1003 let ParserMatchClass = MatchClass;
1006 class NamedOperandU32<string Name, AsmOperandClass MatchClass> : Operand<i32> {
1007 let PrintMethod = "print"#Name;
1008 let ParserMatchClass = MatchClass;
1011 class NamedOperandU32Default0<string Name, AsmOperandClass MatchClass> :
1012 OperandWithDefaultOps<i32, (ops (i32 0))> {
1013 let PrintMethod = "print"#Name;
1014 let ParserMatchClass = MatchClass;
1017 let OperandType = "OPERAND_IMMEDIATE" in {
1019 def offen : NamedOperandBit<"Offen", NamedMatchClass<"Offen">>;
1020 def idxen : NamedOperandBit<"Idxen", NamedMatchClass<"Idxen">>;
1021 def addr64 : NamedOperandBit<"Addr64", NamedMatchClass<"Addr64">>;
1023 def flat_offset : NamedOperandU16<"FlatOffset", NamedMatchClass<"FlatOffset">>;
1024 def offset : NamedOperandU16<"Offset", NamedMatchClass<"Offset">>;
1025 def offset0 : NamedOperandU8<"Offset0", NamedMatchClass<"Offset0">>;
1026 def offset1 : NamedOperandU8<"Offset1", NamedMatchClass<"Offset1">>;
1028 def gds : NamedOperandBit<"GDS", NamedMatchClass<"GDS">>;
1030 def omod : NamedOperandU32<"OModSI", NamedMatchClass<"OModSI">>;
1031 def clampmod : NamedOperandBit<"ClampSI", NamedMatchClass<"ClampSI">>;
1032 def highmod : NamedOperandBit<"High", NamedMatchClass<"High">>;
1034 def DLC : NamedOperandBit<"DLC", NamedMatchClass<"DLC">>;
1035 def GLC : NamedOperandBit<"GLC", NamedMatchClass<"GLC">>;
1036 def SLC : NamedOperandBit<"SLC", NamedMatchClass<"SLC">>;
1037 def TFE : NamedOperandBit<"TFE", NamedMatchClass<"TFE">>;
1038 def SWZ : NamedOperandBit<"SWZ", NamedMatchClass<"SWZ">>;
1039 def UNorm : NamedOperandBit<"UNorm", NamedMatchClass<"UNorm">>;
1040 def DA : NamedOperandBit<"DA", NamedMatchClass<"DA">>;
1041 def R128A16 : NamedOperandBit<"R128A16", NamedMatchClass<"R128A16">>;
1042 def D16 : NamedOperandBit<"D16", NamedMatchClass<"D16">>;
1043 def LWE : NamedOperandBit<"LWE", NamedMatchClass<"LWE">>;
1044 def exp_compr : NamedOperandBit<"ExpCompr", NamedMatchClass<"ExpCompr">>;
1045 def exp_vm : NamedOperandBit<"ExpVM", NamedMatchClass<"ExpVM">>;
1047 def FORMAT : NamedOperandU8<"FORMAT", NamedMatchClass<"FORMAT">>;
1049 def DMask : NamedOperandU16<"DMask", NamedMatchClass<"DMask">>;
1050 def Dim : NamedOperandU8<"Dim", NamedMatchClass<"Dim", 0>>;
1052 def dpp8 : NamedOperandU32<"DPP8", NamedMatchClass<"DPP8", 0>>;
1054 def dpp_ctrl : NamedOperandU32<"DPPCtrl", NamedMatchClass<"DPPCtrl", 0>>;
1055 def row_mask : NamedOperandU32<"RowMask", NamedMatchClass<"RowMask">>;
1056 def bank_mask : NamedOperandU32<"BankMask", NamedMatchClass<"BankMask">>;
1057 def bound_ctrl : NamedOperandBit<"BoundCtrl", NamedMatchClass<"BoundCtrl">>;
1058 def FI : NamedOperandU32<"FI", NamedMatchClass<"FI">>;
1060 def dst_sel : NamedOperandU32<"SDWADstSel", NamedMatchClass<"SDWADstSel">>;
1061 def src0_sel : NamedOperandU32<"SDWASrc0Sel", NamedMatchClass<"SDWASrc0Sel">>;
1062 def src1_sel : NamedOperandU32<"SDWASrc1Sel", NamedMatchClass<"SDWASrc1Sel">>;
1063 def dst_unused : NamedOperandU32<"SDWADstUnused", NamedMatchClass<"SDWADstUnused">>;
1065 def op_sel : NamedOperandU32Default0<"OpSel", NamedMatchClass<"OpSel">>;
1066 def op_sel_hi : NamedOperandU32Default0<"OpSelHi", NamedMatchClass<"OpSelHi">>;
1067 def neg_lo : NamedOperandU32Default0<"NegLo", NamedMatchClass<"NegLo">>;
1068 def neg_hi : NamedOperandU32Default0<"NegHi", NamedMatchClass<"NegHi">>;
1070 def blgp : NamedOperandU32<"BLGP", NamedMatchClass<"BLGP">>;
1071 def cbsz : NamedOperandU32<"CBSZ", NamedMatchClass<"CBSZ">>;
1072 def abid : NamedOperandU32<"ABID", NamedMatchClass<"ABID">>;
1074 def hwreg : NamedOperandU16<"Hwreg", NamedMatchClass<"Hwreg", 0>>;
1076 def exp_tgt : NamedOperandU8<"ExpTgt", NamedMatchClass<"ExpTgt", 0>> {
1080 } // End OperandType = "OPERAND_IMMEDIATE"
1082 class KImmMatchClass<int size> : AsmOperandClass {
1083 let Name = "KImmFP"#size;
1084 let PredicateMethod = "isKImmFP"#size;
1085 let ParserMethod = "parseImm";
1086 let RenderMethod = "addKImmFP"#size#"Operands";
1089 class kimmOperand<ValueType vt> : Operand<vt> {
1090 let OperandNamespace = "AMDGPU";
1091 let OperandType = "OPERAND_KIMM"#vt.Size;
1092 let PrintMethod = "printU"#vt.Size#"ImmOperand";
1093 let ParserMatchClass = !cast<AsmOperandClass>("KImmFP"#vt.Size#"MatchClass");
1096 // 32-bit VALU immediate operand that uses the constant bus.
1097 def KImmFP32MatchClass : KImmMatchClass<32>;
1098 def f32kimm : kimmOperand<i32>;
1100 // 32-bit VALU immediate operand with a 16-bit value that uses the
1102 def KImmFP16MatchClass : KImmMatchClass<16>;
1103 def f16kimm : kimmOperand<i16>;
1105 class FPInputModsMatchClass <int opSize> : AsmOperandClass {
1106 let Name = "RegOrImmWithFP"#opSize#"InputMods";
1107 let ParserMethod = "parseRegOrImmWithFPInputMods";
1108 let PredicateMethod = "isRegOrImmWithFP"#opSize#"InputMods";
1111 def FP16InputModsMatchClass : FPInputModsMatchClass<16>;
1112 def FP32InputModsMatchClass : FPInputModsMatchClass<32>;
1113 def FP64InputModsMatchClass : FPInputModsMatchClass<64>;
1115 class InputMods <AsmOperandClass matchClass> : Operand <i32> {
1116 let OperandNamespace = "AMDGPU";
1117 let OperandType = "OPERAND_INPUT_MODS";
1118 let ParserMatchClass = matchClass;
1121 class FPInputMods <FPInputModsMatchClass matchClass> : InputMods <matchClass> {
1122 let PrintMethod = "printOperandAndFPInputMods";
1125 def FP16InputMods : FPInputMods<FP16InputModsMatchClass>;
1126 def FP32InputMods : FPInputMods<FP32InputModsMatchClass>;
1127 def FP64InputMods : FPInputMods<FP64InputModsMatchClass>;
1129 class IntInputModsMatchClass <int opSize> : AsmOperandClass {
1130 let Name = "RegOrImmWithInt"#opSize#"InputMods";
1131 let ParserMethod = "parseRegOrImmWithIntInputMods";
1132 let PredicateMethod = "isRegOrImmWithInt"#opSize#"InputMods";
1134 def Int32InputModsMatchClass : IntInputModsMatchClass<32>;
1135 def Int64InputModsMatchClass : IntInputModsMatchClass<64>;
1137 class IntInputMods <IntInputModsMatchClass matchClass> : InputMods <matchClass> {
1138 let PrintMethod = "printOperandAndIntInputMods";
1140 def Int32InputMods : IntInputMods<Int32InputModsMatchClass>;
1141 def Int64InputMods : IntInputMods<Int64InputModsMatchClass>;
1143 class OpSelModsMatchClass : AsmOperandClass {
1144 let Name = "OpSelMods";
1145 let ParserMethod = "parseRegOrImm";
1146 let PredicateMethod = "isRegOrImm";
1149 def IntOpSelModsMatchClass : OpSelModsMatchClass;
1150 def IntOpSelMods : InputMods<IntOpSelModsMatchClass>;
1152 class FPSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
1153 let Name = "SDWAWithFP"#opSize#"InputMods";
1154 let ParserMethod = "parseRegOrImmWithFPInputMods";
1155 let PredicateMethod = "isSDWAFP"#opSize#"Operand";
1158 def FP16SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<16>;
1159 def FP32SDWAInputModsMatchClass : FPSDWAInputModsMatchClass<32>;
1161 class FPSDWAInputMods <FPSDWAInputModsMatchClass matchClass> :
1162 InputMods <matchClass> {
1163 let PrintMethod = "printOperandAndFPInputMods";
1166 def FP16SDWAInputMods : FPSDWAInputMods<FP16SDWAInputModsMatchClass>;
1167 def FP32SDWAInputMods : FPSDWAInputMods<FP32SDWAInputModsMatchClass>;
1169 def FPVRegInputModsMatchClass : AsmOperandClass {
1170 let Name = "VRegWithFPInputMods";
1171 let ParserMethod = "parseRegWithFPInputMods";
1172 let PredicateMethod = "isVReg32";
1175 def FPVRegInputMods : InputMods <FPVRegInputModsMatchClass> {
1176 let PrintMethod = "printOperandAndFPInputMods";
1179 class IntSDWAInputModsMatchClass <int opSize> : AsmOperandClass {
1180 let Name = "SDWAWithInt"#opSize#"InputMods";
1181 let ParserMethod = "parseRegOrImmWithIntInputMods";
1182 let PredicateMethod = "isSDWAInt"#opSize#"Operand";
1185 def Int16SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<16>;
1186 def Int32SDWAInputModsMatchClass : IntSDWAInputModsMatchClass<32>;
1188 class IntSDWAInputMods <IntSDWAInputModsMatchClass matchClass> :
1189 InputMods <matchClass> {
1190 let PrintMethod = "printOperandAndIntInputMods";
1193 def Int16SDWAInputMods : IntSDWAInputMods<Int16SDWAInputModsMatchClass>;
1194 def Int32SDWAInputMods : IntSDWAInputMods<Int32SDWAInputModsMatchClass>;
1196 def IntVRegInputModsMatchClass : AsmOperandClass {
1197 let Name = "VRegWithIntInputMods";
1198 let ParserMethod = "parseRegWithIntInputMods";
1199 let PredicateMethod = "isVReg32";
1202 def IntVRegInputMods : InputMods <IntVRegInputModsMatchClass> {
1203 let PrintMethod = "printOperandAndIntInputMods";
1206 class PackedFPInputModsMatchClass <int opSize> : AsmOperandClass {
1207 let Name = "PackedFP"#opSize#"InputMods";
1208 let ParserMethod = "parseRegOrImm";
1209 let PredicateMethod = "isRegOrImm";
1210 // let PredicateMethod = "isPackedFP"#opSize#"InputMods";
1213 class PackedIntInputModsMatchClass <int opSize> : AsmOperandClass {
1214 let Name = "PackedInt"#opSize#"InputMods";
1215 let ParserMethod = "parseRegOrImm";
1216 let PredicateMethod = "isRegOrImm";
1217 // let PredicateMethod = "isPackedInt"#opSize#"InputMods";
1220 def PackedF16InputModsMatchClass : PackedFPInputModsMatchClass<16>;
1221 def PackedI16InputModsMatchClass : PackedIntInputModsMatchClass<16>;
1223 class PackedFPInputMods <PackedFPInputModsMatchClass matchClass> : InputMods <matchClass> {
1224 // let PrintMethod = "printPackedFPInputMods";
1227 class PackedIntInputMods <PackedIntInputModsMatchClass matchClass> : InputMods <matchClass> {
1228 //let PrintMethod = "printPackedIntInputMods";
1231 def PackedF16InputMods : PackedFPInputMods<PackedF16InputModsMatchClass>;
1232 def PackedI16InputMods : PackedIntInputMods<PackedI16InputModsMatchClass>;
1234 //===----------------------------------------------------------------------===//
1236 //===----------------------------------------------------------------------===//
1238 def DS1Addr1Offset : ComplexPattern<i32, 2, "SelectDS1Addr1Offset">;
1239 def DS64Bit4ByteAligned : ComplexPattern<i32, 3, "SelectDS64Bit4ByteAligned">;
1241 def MOVRELOffset : ComplexPattern<i32, 2, "SelectMOVRELOffset">;
1243 def VOP3Mods0 : ComplexPattern<untyped, 4, "SelectVOP3Mods0">;
1244 def VOP3Mods0Clamp : ComplexPattern<untyped, 3, "SelectVOP3Mods0Clamp">;
1245 def VOP3Mods0Clamp0OMod : ComplexPattern<untyped, 4, "SelectVOP3Mods0Clamp0OMod">;
1246 def VOP3Mods : ComplexPattern<untyped, 2, "SelectVOP3Mods">;
1247 def VOP3NoMods : ComplexPattern<untyped, 1, "SelectVOP3NoMods">;
1248 // VOP3Mods, but the input source is known to never be NaN.
1249 def VOP3Mods_nnan : ComplexPattern<fAny, 2, "SelectVOP3Mods_NNaN">;
1250 // VOP3Mods, but only allowed for f32 operands.
1251 def VOP3Mods_f32 : ComplexPattern<fAny, 2, "SelectVOP3Mods_f32">;
1253 def VOP3OMods : ComplexPattern<untyped, 3, "SelectVOP3OMods">;
1255 def VOP3PMods : ComplexPattern<untyped, 2, "SelectVOP3PMods">;
1256 def VOP3PMods0 : ComplexPattern<untyped, 3, "SelectVOP3PMods0">;
1258 def VOP3OpSel : ComplexPattern<untyped, 2, "SelectVOP3OpSel">;
1259 def VOP3OpSel0 : ComplexPattern<untyped, 3, "SelectVOP3OpSel0">;
1261 def VOP3OpSelMods : ComplexPattern<untyped, 2, "SelectVOP3OpSelMods">;
1262 def VOP3OpSelMods0 : ComplexPattern<untyped, 3, "SelectVOP3OpSelMods0">;
1264 def VOP3PMadMixMods : ComplexPattern<untyped, 2, "SelectVOP3PMadMixMods">;
1267 def Hi16Elt : ComplexPattern<untyped, 1, "SelectHi16Elt">;
1269 //===----------------------------------------------------------------------===//
1270 // SI assembler operands
1271 //===----------------------------------------------------------------------===//
1276 int FLAT_SCR = 0x68;
1279 // This should be kept in sync with SISrcMods enum
1303 int LLVM_DEBUG_TRAP = 3;
1319 int FLAT_SCR_LO = 20;
1320 int FLAT_SCR_HI = 21;
1321 int XNACK_MASK = 22;
1322 int POPS_PACKER = 25;
1325 class getHwRegImm<int Reg, int Offset = 0, int Size = 32> {
1327 !or(!shl(Offset, 6),
1328 !shl(!add(Size, -1), 11)));
1331 //===----------------------------------------------------------------------===//
1333 // SI Instruction multiclass helpers.
1335 // Instructions with _32 take 32-bit operands.
1336 // Instructions with _64 take 64-bit operands.
1338 // VOP_* instructions can use either a 32-bit or 64-bit encoding. The 32-bit
1339 // encoding is the standard encoding, but instruction that make use of
1340 // any of the instruction modifiers must use the 64-bit encoding.
1342 // Instructions with _e32 use the 32-bit encoding.
1343 // Instructions with _e64 use the 64-bit encoding.
1345 //===----------------------------------------------------------------------===//
1347 class SIMCInstr <string pseudo, int subtarget> {
1348 string PseudoInstr = pseudo;
1349 int Subtarget = subtarget;
1352 //===----------------------------------------------------------------------===//
1354 //===----------------------------------------------------------------------===//
1356 class EXP_Helper<bit done, SDPatternOperator node = null_frag> : EXPCommon<
1359 ExpSrc0:$src0, ExpSrc1:$src1, ExpSrc2:$src2, ExpSrc3:$src3,
1360 exp_vm:$vm, exp_compr:$compr, i8imm:$en),
1361 "exp$tgt $src0, $src1, $src2, $src3"#!if(done, " done", "")#"$compr$vm",
1362 [(node (i8 timm:$tgt), (i8 timm:$en),
1363 f32:$src0, f32:$src1, f32:$src2, f32:$src3,
1364 (i1 timm:$compr), (i1 timm:$vm))]> {
1365 let AsmMatchConverter = "cvtExp";
1368 // Split EXP instruction into EXP and EXP_DONE so we can set
1369 // mayLoad for done=1.
1370 multiclass EXP_m<bit done, SDPatternOperator node> {
1371 let mayLoad = done, DisableWQM = 1 in {
1372 let isPseudo = 1, isCodeGenOnly = 1 in {
1373 def "" : EXP_Helper<done, node>,
1374 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.NONE>;
1377 let done = done in {
1378 def _si : EXP_Helper<done>,
1379 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.SI>,
1381 let AssemblerPredicates = [isGFX6GFX7];
1382 let DecoderNamespace = "GFX6GFX7";
1383 let DisableDecoder = DisableSIDecoder;
1386 def _vi : EXP_Helper<done>,
1387 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.VI>,
1389 let AssemblerPredicates = [isGFX8GFX9];
1390 let DecoderNamespace = "GFX8";
1391 let DisableDecoder = DisableVIDecoder;
1394 def _gfx10 : EXP_Helper<done>,
1395 SIMCInstr <"exp"#!if(done, "_done", ""), SIEncodingFamily.GFX10>,
1397 let AssemblerPredicates = [isGFX10Plus];
1398 let DecoderNamespace = "GFX10";
1399 let DisableDecoder = DisableSIDecoder;
1405 //===----------------------------------------------------------------------===//
1406 // Vector ALU classes
1407 //===----------------------------------------------------------------------===//
1409 class getNumSrcArgs<ValueType Src0, ValueType Src1, ValueType Src2> {
1411 !if (!eq(Src0.Value, untyped.Value), 0,
1412 !if (!eq(Src1.Value, untyped.Value), 1, // VOP1
1413 !if (!eq(Src2.Value, untyped.Value), 2, // VOP2
1417 // Returns the register class to use for the destination of VOP[123C]
1418 // instructions for the given VT.
1419 class getVALUDstForVT<ValueType VT> {
1420 RegisterOperand ret = !if(!eq(VT.Size, 32), VOPDstOperand<VGPR_32>,
1421 !if(!eq(VT.Size, 128), VOPDstOperand<VReg_128>,
1422 !if(!eq(VT.Size, 64), VOPDstOperand<VReg_64>,
1423 !if(!eq(VT.Size, 16), VOPDstOperand<VGPR_32>,
1424 VOPDstS64orS32)))); // else VT == i1
1427 // Returns true if VT is floating point.
1428 class getIsFP<ValueType VT> {
1429 bit ret = !if(!eq(VT.Value, f16.Value), 1,
1430 !if(!eq(VT.Value, v2f16.Value), 1,
1431 !if(!eq(VT.Value, v4f16.Value), 1,
1432 !if(!eq(VT.Value, f32.Value), 1,
1433 !if(!eq(VT.Value, v2f32.Value), 1,
1434 !if(!eq(VT.Value, f64.Value), 1,
1435 !if(!eq(VT.Value, v2f64.Value), 1,
1439 // Returns the register class to use for the destination of VOP[12C]
1440 // instructions with SDWA extension
1441 class getSDWADstForVT<ValueType VT> {
1442 RegisterOperand ret = !if(!eq(VT.Size, 1),
1443 SDWAVopcDst, // VOPC
1444 VOPDstOperand<VGPR_32>); // VOP1/2 32-bit dst
1447 // Returns the register class to use for source 0 of VOP[12C]
1448 // instructions for the given VT.
1449 class getVOPSrc0ForVT<ValueType VT> {
1450 bit isFP = getIsFP<VT>.ret;
1452 RegisterOperand ret =
1454 !if(!eq(VT.Size, 64),
1456 !if(!eq(VT.Value, f16.Value),
1458 !if(!eq(VT.Value, v2f16.Value),
1460 !if(!eq(VT.Value, v4f16.Value),
1467 !if(!eq(VT.Size, 64),
1469 !if(!eq(VT.Value, i16.Value),
1471 !if(!eq(VT.Value, v2i16.Value),
1480 // Returns the vreg register class to use for source operand given VT
1481 class getVregSrcForVT<ValueType VT> {
1482 RegisterClass ret = !if(!eq(VT.Size, 128), VReg_128,
1483 !if(!eq(VT.Size, 96), VReg_96,
1484 !if(!eq(VT.Size, 64), VReg_64,
1485 !if(!eq(VT.Size, 48), VReg_64,
1489 class getSDWASrcForVT <ValueType VT> {
1490 bit isFP = getIsFP<VT>.ret;
1491 RegisterOperand retFlt = !if(!eq(VT.Size, 16), SDWASrc_f16, SDWASrc_f32);
1492 RegisterOperand retInt = !if(!eq(VT.Size, 16), SDWASrc_i16, SDWASrc_i32);
1493 RegisterOperand ret = !if(isFP, retFlt, retInt);
1496 // Returns the register class to use for sources of VOP3 instructions for the
1498 class getVOP3SrcForVT<ValueType VT> {
1499 bit isFP = getIsFP<VT>.ret;
1500 RegisterOperand ret =
1501 !if(!eq(VT.Size, 128),
1503 !if(!eq(VT.Size, 64),
1507 !if(!eq(VT.Value, i1.Value),
1510 !if(!eq(VT.Value, f16.Value),
1512 !if(!eq(VT.Value, v2f16.Value),
1514 !if(!eq(VT.Value, v4f16.Value),
1520 !if(!eq(VT.Value, i16.Value),
1522 !if(!eq(VT.Value, v2i16.Value),
1533 // Float or packed int
1534 class isModifierType<ValueType SrcVT> {
1536 !if(!eq(SrcVT.Value, f16.Value), 1,
1537 !if(!eq(SrcVT.Value, f32.Value), 1,
1538 !if(!eq(SrcVT.Value, f64.Value), 1,
1539 !if(!eq(SrcVT.Value, v2f16.Value), 1,
1540 !if(!eq(SrcVT.Value, v2i16.Value), 1,
1544 // Return type of input modifiers operand for specified input operand
1545 class getSrcMod <ValueType VT, bit EnableF32SrcMods> {
1546 bit isFP = getIsFP<VT>.ret;
1547 bit isPacked = isPackedType<VT>.ret;
1548 Operand ret = !if(!eq(VT.Size, 64),
1549 !if(isFP, FP64InputMods, Int64InputMods),
1551 !if(!eq(VT.Value, f16.Value),
1555 !if(EnableF32SrcMods, FP32InputMods, Int32InputMods))
1559 class getOpSelMod <ValueType VT> {
1560 Operand ret = !if(!eq(VT.Value, f16.Value), FP16InputMods, IntOpSelMods);
1563 // Return type of input modifiers operand specified input operand for DPP
1564 class getSrcModExt <ValueType VT> {
1565 bit isFP = getIsFP<VT>.ret;
1566 Operand ret = !if(isFP, FPVRegInputMods, IntVRegInputMods);
1569 // Return type of input modifiers operand specified input operand for SDWA
1570 class getSrcModSDWA <ValueType VT> {
1571 Operand ret = !if(!eq(VT.Value, f16.Value), FP16SDWAInputMods,
1572 !if(!eq(VT.Value, f32.Value), FP32SDWAInputMods,
1573 !if(!eq(VT.Value, i16.Value), Int16SDWAInputMods,
1574 Int32SDWAInputMods)));
1577 // Returns the input arguments for VOP[12C] instructions for the given SrcVT.
1578 class getIns32 <RegisterOperand Src0RC, RegisterClass Src1RC, int NumSrcArgs> {
1579 dag ret = !if(!eq(NumSrcArgs, 1), (ins Src0RC:$src0), // VOP1
1580 !if(!eq(NumSrcArgs, 2), (ins Src0RC:$src0, Src1RC:$src1), // VOP2
1584 // Returns the input arguments for VOP3 instructions for the given SrcVT.
1585 class getIns64 <RegisterOperand Src0RC, RegisterOperand Src1RC,
1586 RegisterOperand Src2RC, int NumSrcArgs,
1587 bit HasIntClamp, bit HasModifiers, bit HasSrc2Mods, bit HasOMod,
1588 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1591 !if (!eq(NumSrcArgs, 0),
1592 // VOP1 without input operands (V_NOP, V_CLREXCP)
1595 !if (!eq(NumSrcArgs, 1),
1596 !if (!eq(HasModifiers, 1),
1597 // VOP1 with modifiers
1598 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1599 clampmod:$clamp, omod:$omod)
1601 // VOP1 without modifiers
1602 !if (!eq(HasIntClamp, 1),
1603 (ins Src0RC:$src0, clampmod:$clamp),
1606 !if (!eq(NumSrcArgs, 2),
1607 !if (!eq(HasModifiers, 1),
1608 // VOP 2 with modifiers
1609 !if( !eq(HasOMod, 1),
1610 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1611 Src1Mod:$src1_modifiers, Src1RC:$src1,
1612 clampmod:$clamp, omod:$omod),
1613 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1614 Src1Mod:$src1_modifiers, Src1RC:$src1,
1617 // VOP2 without modifiers
1618 !if (!eq(HasIntClamp, 1),
1619 (ins Src0RC:$src0, Src1RC:$src1, clampmod:$clamp),
1620 (ins Src0RC:$src0, Src1RC:$src1))
1623 /* NumSrcArgs == 3 */,
1624 !if (!eq(HasModifiers, 1),
1625 !if (!eq(HasSrc2Mods, 1),
1626 // VOP3 with modifiers
1627 !if (!eq(HasOMod, 1),
1628 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1629 Src1Mod:$src1_modifiers, Src1RC:$src1,
1630 Src2Mod:$src2_modifiers, Src2RC:$src2,
1631 clampmod:$clamp, omod:$omod),
1632 !if (!eq(HasIntClamp, 1),
1633 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1634 Src1Mod:$src1_modifiers, Src1RC:$src1,
1635 Src2Mod:$src2_modifiers, Src2RC:$src2,
1637 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1638 Src1Mod:$src1_modifiers, Src1RC:$src1,
1639 Src2Mod:$src2_modifiers, Src2RC:$src2))),
1640 // VOP3 with modifiers except src2
1641 !if (!eq(HasOMod, 1),
1642 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1643 Src1Mod:$src1_modifiers, Src1RC:$src1,
1644 Src2RC:$src2, clampmod:$clamp, omod:$omod),
1645 !if (!eq(HasIntClamp, 1),
1646 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1647 Src1Mod:$src1_modifiers, Src1RC:$src1,
1648 Src2RC:$src2, clampmod:$clamp),
1649 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1650 Src1Mod:$src1_modifiers, Src1RC:$src1,
1653 // VOP3 without modifiers
1654 !if (!eq(HasIntClamp, 1),
1655 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2, clampmod:$clamp),
1656 (ins Src0RC:$src0, Src1RC:$src1, Src2RC:$src2))
1660 /// XXX - src1 may only allow VGPRs?
1662 // The modifiers (except clamp) are dummy operands for the benefit of
1663 // printing and parsing. They defer their values to looking at the
1664 // srcN_modifiers for what to print.
1665 class getInsVOP3P <RegisterOperand Src0RC, RegisterOperand Src1RC,
1666 RegisterOperand Src2RC, int NumSrcArgs,
1668 Operand Src0Mod, Operand Src1Mod, Operand Src2Mod> {
1669 dag ret = !if (!eq(NumSrcArgs, 2),
1671 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1672 Src1Mod:$src1_modifiers, Src1RC:$src1,
1674 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1675 neg_lo:$neg_lo, neg_hi:$neg_hi),
1676 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1677 Src1Mod:$src1_modifiers, Src1RC:$src1,
1678 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1679 neg_lo:$neg_lo, neg_hi:$neg_hi)),
1680 // else NumSrcArgs == 3
1682 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1683 Src1Mod:$src1_modifiers, Src1RC:$src1,
1684 Src2Mod:$src2_modifiers, Src2RC:$src2,
1686 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1687 neg_lo:$neg_lo, neg_hi:$neg_hi),
1688 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1689 Src1Mod:$src1_modifiers, Src1RC:$src1,
1690 Src2Mod:$src2_modifiers, Src2RC:$src2,
1691 op_sel:$op_sel, op_sel_hi:$op_sel_hi,
1692 neg_lo:$neg_lo, neg_hi:$neg_hi))
1696 class getInsVOP3OpSel <RegisterOperand Src0RC,
1697 RegisterOperand Src1RC,
1698 RegisterOperand Src2RC,
1704 dag ret = !if (!eq(NumSrcArgs, 2),
1706 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1707 Src1Mod:$src1_modifiers, Src1RC:$src1,
1710 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1711 Src1Mod:$src1_modifiers, Src1RC:$src1,
1713 // else NumSrcArgs == 3
1715 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1716 Src1Mod:$src1_modifiers, Src1RC:$src1,
1717 Src2Mod:$src2_modifiers, Src2RC:$src2,
1720 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1721 Src1Mod:$src1_modifiers, Src1RC:$src1,
1722 Src2Mod:$src2_modifiers, Src2RC:$src2,
1727 class getInsDPP <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1728 int NumSrcArgs, bit HasModifiers,
1729 Operand Src0Mod, Operand Src1Mod> {
1731 dag ret = !if (!eq(NumSrcArgs, 0),
1732 // VOP1 without input operands (V_NOP)
1733 (ins dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1734 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl),
1735 !if (!eq(NumSrcArgs, 1),
1736 !if (!eq(HasModifiers, 1),
1737 // VOP1_DPP with modifiers
1738 (ins DstRC:$old, Src0Mod:$src0_modifiers,
1739 Src0RC:$src0, dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1740 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1742 // VOP1_DPP without modifiers
1743 (ins DstRC:$old, Src0RC:$src0,
1744 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1745 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1747 /* NumSrcArgs == 2 */,
1748 !if (!eq(HasModifiers, 1),
1749 // VOP2_DPP with modifiers
1751 Src0Mod:$src0_modifiers, Src0RC:$src0,
1752 Src1Mod:$src1_modifiers, Src1RC:$src1,
1753 dpp_ctrl:$dpp_ctrl, row_mask:$row_mask,
1754 bank_mask:$bank_mask, bound_ctrl:$bound_ctrl)
1756 // VOP2_DPP without modifiers
1758 Src0RC:$src0, Src1RC:$src1, dpp_ctrl:$dpp_ctrl,
1759 row_mask:$row_mask, bank_mask:$bank_mask,
1760 bound_ctrl:$bound_ctrl)
1764 class getInsDPP16 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1765 int NumSrcArgs, bit HasModifiers,
1766 Operand Src0Mod, Operand Src1Mod> {
1767 dag ret = !con(getInsDPP<DstRC, Src0RC, Src1RC, NumSrcArgs,
1768 HasModifiers, Src0Mod, Src1Mod>.ret,
1772 class getInsDPP8 <RegisterOperand DstRC, RegisterClass Src0RC, RegisterClass Src1RC,
1773 int NumSrcArgs, bit HasModifiers,
1774 Operand Src0Mod, Operand Src1Mod> {
1775 dag ret = !if (!eq(NumSrcArgs, 0),
1776 // VOP1 without input operands (V_NOP)
1777 (ins dpp8:$dpp8, FI:$fi),
1778 !if (!eq(NumSrcArgs, 1),
1779 !if (!eq(HasModifiers, 1),
1780 // VOP1_DPP with modifiers
1781 (ins DstRC:$old, Src0Mod:$src0_modifiers,
1782 Src0RC:$src0, dpp8:$dpp8, FI:$fi)
1784 // VOP1_DPP without modifiers
1785 (ins DstRC:$old, Src0RC:$src0, dpp8:$dpp8, FI:$fi)
1787 /* NumSrcArgs == 2 */,
1788 !if (!eq(HasModifiers, 1),
1789 // VOP2_DPP with modifiers
1791 Src0Mod:$src0_modifiers, Src0RC:$src0,
1792 Src1Mod:$src1_modifiers, Src1RC:$src1,
1795 // VOP2_DPP without modifiers
1797 Src0RC:$src0, Src1RC:$src1, dpp8:$dpp8, FI:$fi)
1803 class getInsSDWA <RegisterOperand Src0RC, RegisterOperand Src1RC, int NumSrcArgs,
1804 bit HasSDWAOMod, Operand Src0Mod, Operand Src1Mod,
1807 dag ret = !if(!eq(NumSrcArgs, 0),
1808 // VOP1 without input operands (V_NOP)
1810 !if(!eq(NumSrcArgs, 1),
1812 !if(!eq(HasSDWAOMod, 0),
1813 // VOP1_SDWA without omod
1814 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1816 dst_sel:$dst_sel, dst_unused:$dst_unused,
1817 src0_sel:$src0_sel),
1818 // VOP1_SDWA with omod
1819 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1820 clampmod:$clamp, omod:$omod,
1821 dst_sel:$dst_sel, dst_unused:$dst_unused,
1822 src0_sel:$src0_sel)),
1823 !if(!eq(NumSrcArgs, 2),
1824 !if(!eq(DstVT.Size, 1),
1826 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1827 Src1Mod:$src1_modifiers, Src1RC:$src1,
1828 clampmod:$clamp, src0_sel:$src0_sel, src1_sel:$src1_sel),
1830 !if(!eq(HasSDWAOMod, 0),
1831 // VOP2_SDWA without omod
1832 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1833 Src1Mod:$src1_modifiers, Src1RC:$src1,
1835 dst_sel:$dst_sel, dst_unused:$dst_unused,
1836 src0_sel:$src0_sel, src1_sel:$src1_sel),
1837 // VOP2_SDWA with omod
1838 (ins Src0Mod:$src0_modifiers, Src0RC:$src0,
1839 Src1Mod:$src1_modifiers, Src1RC:$src1,
1840 clampmod:$clamp, omod:$omod,
1841 dst_sel:$dst_sel, dst_unused:$dst_unused,
1842 src0_sel:$src0_sel, src1_sel:$src1_sel))),
1843 (ins)/* endif */)));
1846 // Outs for DPP and SDWA
1847 class getOutsExt <bit HasDst, ValueType DstVT, RegisterOperand DstRCExt> {
1848 dag ret = !if(HasDst,
1849 !if(!eq(DstVT.Size, 1),
1850 (outs), // no dst for VOPC, we use "vcc"-token as dst in SDWA VOPC instructions
1851 (outs DstRCExt:$vdst)),
1856 class getOutsSDWA <bit HasDst, ValueType DstVT, RegisterOperand DstRCSDWA> {
1857 dag ret = !if(HasDst,
1858 !if(!eq(DstVT.Size, 1),
1859 (outs DstRCSDWA:$sdst),
1860 (outs DstRCSDWA:$vdst)),
1864 // Returns the assembly string for the inputs and outputs of a VOP[12C]
1865 // instruction. This does not add the _e32 suffix, so it can be reused
1867 class getAsm32 <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1868 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1869 string src0 = ", $src0";
1870 string src1 = ", $src1";
1871 string src2 = ", $src2";
1872 string ret = !if(HasDst, dst, "") #
1873 !if(!eq(NumSrcArgs, 1), src0, "") #
1874 !if(!eq(NumSrcArgs, 2), src0#src1, "") #
1875 !if(!eq(NumSrcArgs, 3), src0#src1#src2, "");
1878 // Returns the assembly string for the inputs and outputs of a VOP3
1880 class getAsm64 <bit HasDst, int NumSrcArgs, bit HasIntClamp, bit HasModifiers,
1881 bit HasOMod, ValueType DstVT = i32> {
1882 string dst = !if(!eq(DstVT.Size, 1), "$sdst", "$vdst"); // use $sdst for VOPC
1883 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1884 string src1 = !if(!eq(NumSrcArgs, 1), "",
1885 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1886 " $src1_modifiers,"));
1887 string src2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1888 string iclamp = !if(HasIntClamp, "$clamp", "");
1890 !if(!eq(HasModifiers, 0),
1891 getAsm32<HasDst, NumSrcArgs, DstVT>.ret # iclamp,
1892 dst#", "#src0#src1#src2#"$clamp"#!if(HasOMod, "$omod", ""));
1895 // Returns the assembly string for the inputs and outputs of a VOP3P
1897 class getAsmVOP3P <bit HasDst, int NumSrcArgs, bit HasModifiers,
1898 bit HasClamp, ValueType DstVT = i32> {
1899 string dst = " $vdst";
1900 string src0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1901 string src1 = !if(!eq(NumSrcArgs, 1), "",
1902 !if(!eq(NumSrcArgs, 2), " $src1",
1904 string src2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1906 string mods = !if(HasModifiers, "$neg_lo$neg_hi", "");
1907 string clamp = !if(HasClamp, "$clamp", "");
1909 // Each modifier is printed as an array of bits for each operand, so
1910 // all operands are printed as part of src0_modifiers.
1911 string ret = dst#", "#src0#src1#src2#"$op_sel$op_sel_hi"#mods#clamp;
1914 class getAsmVOP3OpSel <int NumSrcArgs,
1919 string dst = " $vdst";
1921 string isrc0 = !if(!eq(NumSrcArgs, 1), "$src0", "$src0,");
1922 string isrc1 = !if(!eq(NumSrcArgs, 1), "",
1923 !if(!eq(NumSrcArgs, 2), " $src1",
1925 string isrc2 = !if(!eq(NumSrcArgs, 3), " $src2", "");
1927 string fsrc0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1928 string fsrc1 = !if(!eq(NumSrcArgs, 1), "",
1929 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1930 " $src1_modifiers,"));
1931 string fsrc2 = !if(!eq(NumSrcArgs, 3), " $src2_modifiers", "");
1933 string src0 = !if(Src0HasMods, fsrc0, isrc0);
1934 string src1 = !if(Src1HasMods, fsrc1, isrc1);
1935 string src2 = !if(Src2HasMods, fsrc2, isrc2);
1937 string clamp = !if(HasClamp, "$clamp", "");
1939 string ret = dst#", "#src0#src1#src2#"$op_sel"#clamp;
1942 class getAsmDPP <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1943 string dst = !if(HasDst,
1944 !if(!eq(DstVT.Size, 1),
1947 ""); // use $sdst for VOPC
1948 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1949 string src1 = !if(!eq(NumSrcArgs, 1), "",
1950 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1951 " $src1_modifiers,"));
1952 string args = !if(!eq(HasModifiers, 0),
1953 getAsm32<0, NumSrcArgs, DstVT>.ret,
1955 string ret = dst#args#" $dpp_ctrl$row_mask$bank_mask$bound_ctrl";
1958 class getAsmDPP16 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1959 string ret = getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret#"$fi";
1962 class getAsmDPP8 <bit HasDst, int NumSrcArgs, bit HasModifiers, ValueType DstVT = i32> {
1963 string dst = !if(HasDst,
1964 !if(!eq(DstVT.Size, 1),
1967 ""); // use $sdst for VOPC
1968 string src0 = !if(!eq(NumSrcArgs, 1), "$src0_modifiers", "$src0_modifiers,");
1969 string src1 = !if(!eq(NumSrcArgs, 1), "",
1970 !if(!eq(NumSrcArgs, 2), " $src1_modifiers",
1971 " $src1_modifiers,"));
1972 string args = !if(!eq(HasModifiers, 0),
1973 getAsm32<0, NumSrcArgs, DstVT>.ret,
1975 string ret = dst#args#"$dpp8$fi";
1978 class getAsmSDWA <bit HasDst, int NumSrcArgs, ValueType DstVT = i32> {
1979 string dst = !if(HasDst,
1980 !if(!eq(DstVT.Size, 1),
1981 " vcc", // use vcc token as dst for VOPC instructioins
1984 string src0 = "$src0_modifiers";
1985 string src1 = "$src1_modifiers";
1986 string args = !if(!eq(NumSrcArgs, 0),
1988 !if(!eq(NumSrcArgs, 1),
1990 ", "#src0#", "#src1#"$clamp"
1993 string sdwa = !if(!eq(NumSrcArgs, 0),
1995 !if(!eq(NumSrcArgs, 1),
1996 " $dst_sel $dst_unused $src0_sel",
1997 !if(!eq(DstVT.Size, 1),
1998 " $src0_sel $src1_sel", // No dst_sel and dst_unused for VOPC
1999 " $dst_sel $dst_unused $src0_sel $src1_sel"
2003 string ret = dst#args#sdwa;
2006 class getAsmSDWA9 <bit HasDst, bit HasOMod, int NumSrcArgs,
2007 ValueType DstVT = i32> {
2008 string dst = !if(HasDst,
2009 !if(!eq(DstVT.Size, 1),
2013 string src0 = "$src0_modifiers";
2014 string src1 = "$src1_modifiers";
2015 string out_mods = !if(!eq(HasOMod, 0), "$clamp", "$clamp$omod");
2016 string args = !if(!eq(NumSrcArgs, 0), "",
2017 !if(!eq(NumSrcArgs, 1),
2022 string sdwa = !if(!eq(NumSrcArgs, 0), "",
2023 !if(!eq(NumSrcArgs, 1),
2024 out_mods#" $dst_sel $dst_unused $src0_sel",
2025 !if(!eq(DstVT.Size, 1),
2026 " $src0_sel $src1_sel", // No dst_sel, dst_unused and output modifiers for VOPC
2027 out_mods#" $dst_sel $dst_unused $src0_sel $src1_sel"
2031 string ret = dst#args#sdwa;
2035 // Function that checks if instruction supports DPP and SDWA
2036 class getHasExt <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
2037 ValueType Src1VT = i32> {
2038 bit ret = !if(!eq(NumSrcArgs, 3),
2039 0, // NumSrcArgs == 3 - No DPP or SDWA for VOP3
2040 !if(!eq(DstVT.Size, 64),
2041 0, // 64-bit dst - No DPP or SDWA for 64-bit operands
2042 !if(!eq(Src0VT.Size, 64),
2044 !if(!eq(Src1VT.Size, 64),
2053 class getHasDPP <int NumSrcArgs, ValueType DstVT = i32, ValueType Src0VT = i32,
2054 ValueType Src1VT = i32> {
2055 bit ret = !if(!eq(NumSrcArgs, 0), 0,
2056 getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret);
2059 class BitOr<bit a, bit b> {
2060 bit ret = !if(a, 1, !if(b, 1, 0));
2063 class BitAnd<bit a, bit b> {
2064 bit ret = !if(a, !if(b, 1, 0), 0);
2072 class VOPProfile <list<ValueType> _ArgVT, bit _EnableF32SrcMods = 0,
2073 bit _EnableClamp = 0> {
2075 field list<ValueType> ArgVT = _ArgVT;
2076 field bit EnableF32SrcMods = _EnableF32SrcMods;
2077 field bit EnableClamp = _EnableClamp;
2079 field ValueType DstVT = ArgVT[0];
2080 field ValueType Src0VT = ArgVT[1];
2081 field ValueType Src1VT = ArgVT[2];
2082 field ValueType Src2VT = ArgVT[3];
2083 field RegisterOperand DstRC = getVALUDstForVT<DstVT>.ret;
2084 field RegisterOperand DstRCDPP = getVALUDstForVT<DstVT>.ret;
2085 field RegisterOperand DstRCSDWA = getSDWADstForVT<DstVT>.ret;
2086 field RegisterOperand Src0RC32 = getVOPSrc0ForVT<Src0VT>.ret;
2087 field RegisterClass Src1RC32 = getVregSrcForVT<Src1VT>.ret;
2088 field RegisterOperand Src0RC64 = getVOP3SrcForVT<Src0VT>.ret;
2089 field RegisterOperand Src1RC64 = getVOP3SrcForVT<Src1VT>.ret;
2090 field RegisterOperand Src2RC64 = getVOP3SrcForVT<Src2VT>.ret;
2091 field RegisterClass Src0DPP = getVregSrcForVT<Src0VT>.ret;
2092 field RegisterClass Src1DPP = getVregSrcForVT<Src1VT>.ret;
2093 field RegisterOperand Src0SDWA = getSDWASrcForVT<Src0VT>.ret;
2094 field RegisterOperand Src1SDWA = getSDWASrcForVT<Src0VT>.ret;
2095 field Operand Src0Mod = getSrcMod<Src0VT, EnableF32SrcMods>.ret;
2096 field Operand Src1Mod = getSrcMod<Src1VT, EnableF32SrcMods>.ret;
2097 field Operand Src2Mod = getSrcMod<Src2VT, EnableF32SrcMods>.ret;
2098 field Operand Src0ModDPP = getSrcModExt<Src0VT>.ret;
2099 field Operand Src1ModDPP = getSrcModExt<Src1VT>.ret;
2100 field Operand Src0ModSDWA = getSrcModSDWA<Src0VT>.ret;
2101 field Operand Src1ModSDWA = getSrcModSDWA<Src1VT>.ret;
2104 field bit HasDst = !if(!eq(DstVT.Value, untyped.Value), 0, 1);
2105 field bit HasDst32 = HasDst;
2106 field bit EmitDst = HasDst; // force dst encoding, see v_movreld_b32 special case
2107 field int NumSrcArgs = getNumSrcArgs<Src0VT, Src1VT, Src2VT>.ret;
2108 field bit HasSrc0 = !if(!eq(Src0VT.Value, untyped.Value), 0, 1);
2109 field bit HasSrc1 = !if(!eq(Src1VT.Value, untyped.Value), 0, 1);
2110 field bit HasSrc2 = !if(!eq(Src2VT.Value, untyped.Value), 0, 1);
2112 // TODO: Modifiers logic is somewhat adhoc here, to be refined later
2113 // HasModifiers affects the normal and DPP encodings. We take note of EnableF32SrcMods, which
2114 // enables modifiers for i32 type.
2115 field bit HasModifiers = BitOr<isModifierType<Src0VT>.ret, EnableF32SrcMods>.ret;
2117 // HasSrc*FloatMods affects the SDWA encoding. We ignore EnableF32SrcMods.
2118 field bit HasSrc0FloatMods = isFloatType<Src0VT>.ret;
2119 field bit HasSrc1FloatMods = isFloatType<Src1VT>.ret;
2120 field bit HasSrc2FloatMods = isFloatType<Src2VT>.ret;
2122 // HasSrc*IntMods affects the SDWA encoding. We ignore EnableF32SrcMods.
2123 field bit HasSrc0IntMods = isIntType<Src0VT>.ret;
2124 field bit HasSrc1IntMods = isIntType<Src1VT>.ret;
2125 field bit HasSrc2IntMods = isIntType<Src2VT>.ret;
2127 field bit HasSrc0Mods = HasModifiers;
2128 field bit HasSrc1Mods = !if(HasModifiers, BitOr<HasSrc1FloatMods, HasSrc1IntMods>.ret, 0);
2129 field bit HasSrc2Mods = !if(HasModifiers, BitOr<HasSrc2FloatMods, HasSrc2IntMods>.ret, 0);
2131 field bit HasClamp = BitOr<isModifierType<Src0VT>.ret, EnableClamp>.ret;
2132 field bit HasSDWAClamp = EmitDst;
2133 field bit HasFPClamp = BitAnd<isFloatType<DstVT>.ret, HasClamp>.ret;
2134 field bit HasIntClamp = !if(isFloatType<DstVT>.ret, 0, HasClamp);
2135 field bit HasClampLo = HasClamp;
2136 field bit HasClampHi = BitAnd<isPackedType<DstVT>.ret, HasClamp>.ret;
2137 field bit HasHigh = 0;
2139 field bit IsPacked = isPackedType<Src0VT>.ret;
2140 field bit HasOpSel = IsPacked;
2141 field bit HasOMod = !if(HasOpSel, 0, isFloatType<DstVT>.ret);
2142 field bit HasSDWAOMod = isFloatType<DstVT>.ret;
2144 field bit HasExt = getHasExt<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
2145 field bit HasExtDPP = getHasDPP<NumSrcArgs, DstVT, Src0VT, Src1VT>.ret;
2146 field bit HasExtSDWA = HasExt;
2147 field bit HasExtSDWA9 = HasExt;
2148 field int NeedPatGen = PatGenMode.NoPattern;
2150 field bit IsMAI = 0;
2151 field bit IsDOT = 0;
2153 field Operand Src0PackedMod = !if(HasSrc0FloatMods, PackedF16InputMods, PackedI16InputMods);
2154 field Operand Src1PackedMod = !if(HasSrc1FloatMods, PackedF16InputMods, PackedI16InputMods);
2155 field Operand Src2PackedMod = !if(HasSrc2FloatMods, PackedF16InputMods, PackedI16InputMods);
2157 field dag Outs = !if(HasDst,(outs DstRC:$vdst),(outs));
2159 // VOP3b instructions are a special case with a second explicit
2160 // output. This is manually overridden for them.
2161 field dag Outs32 = Outs;
2162 field dag Outs64 = Outs;
2163 field dag OutsDPP = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
2164 field dag OutsDPP8 = getOutsExt<HasDst, DstVT, DstRCDPP>.ret;
2165 field dag OutsSDWA = getOutsSDWA<HasDst, DstVT, DstRCSDWA>.ret;
2167 field dag Ins32 = getIns32<Src0RC32, Src1RC32, NumSrcArgs>.ret;
2168 field dag Ins64 = getIns64<Src0RC64, Src1RC64, Src2RC64, NumSrcArgs,
2169 HasIntClamp, HasModifiers, HasSrc2Mods,
2170 HasOMod, Src0Mod, Src1Mod, Src2Mod>.ret;
2171 field dag InsVOP3P = getInsVOP3P<Src0RC64, Src1RC64, Src2RC64,
2172 NumSrcArgs, HasClamp,
2173 Src0PackedMod, Src1PackedMod, Src2PackedMod>.ret;
2174 field dag InsVOP3OpSel = getInsVOP3OpSel<Src0RC64, Src1RC64, Src2RC64,
2177 getOpSelMod<Src0VT>.ret,
2178 getOpSelMod<Src1VT>.ret,
2179 getOpSelMod<Src2VT>.ret>.ret;
2180 field dag InsDPP = !if(HasExtDPP,
2181 getInsDPP<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs,
2182 HasModifiers, Src0ModDPP, Src1ModDPP>.ret,
2184 field dag InsDPP16 = getInsDPP16<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs,
2185 HasModifiers, Src0ModDPP, Src1ModDPP>.ret;
2186 field dag InsDPP8 = getInsDPP8<DstRCDPP, Src0DPP, Src1DPP, NumSrcArgs, 0,
2187 Src0ModDPP, Src1ModDPP>.ret;
2188 field dag InsSDWA = getInsSDWA<Src0SDWA, Src1SDWA, NumSrcArgs,
2189 HasSDWAOMod, Src0ModSDWA, Src1ModSDWA,
2193 field string Asm32 = getAsm32<HasDst, NumSrcArgs, DstVT>.ret;
2194 field string Asm64 = getAsm64<HasDst, NumSrcArgs, HasIntClamp, HasModifiers, HasOMod, DstVT>.ret;
2195 field string AsmVOP3P = getAsmVOP3P<HasDst, NumSrcArgs, HasModifiers, HasClamp, DstVT>.ret;
2196 field string AsmVOP3OpSel = getAsmVOP3OpSel<NumSrcArgs,
2200 HasSrc2FloatMods>.ret;
2201 field string AsmDPP = !if(HasExtDPP,
2202 getAsmDPP<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret, "");
2203 field string AsmDPP16 = getAsmDPP16<HasDst, NumSrcArgs, HasModifiers, DstVT>.ret;
2204 field string AsmDPP8 = getAsmDPP8<HasDst, NumSrcArgs, 0, DstVT>.ret;
2205 field string AsmSDWA = getAsmSDWA<HasDst, NumSrcArgs, DstVT>.ret;
2206 field string AsmSDWA9 = getAsmSDWA9<HasDst, HasSDWAOMod, NumSrcArgs, DstVT>.ret;
2208 field string TieRegDPP = "$old";
2211 class VOP_NO_EXT <VOPProfile p> : VOPProfile <p.ArgVT> {
2215 let HasExtSDWA9 = 0;
2218 class VOP_PAT_GEN <VOPProfile p, int mode=PatGenMode.Pattern> : VOPProfile <p.ArgVT> {
2219 let NeedPatGen = mode;
2222 def VOP_F16_F16 : VOPProfile <[f16, f16, untyped, untyped]>;
2223 def VOP_F16_I16 : VOPProfile <[f16, i16, untyped, untyped]>;
2224 def VOP_I16_F16 : VOPProfile <[i16, f16, untyped, untyped]>;
2226 def VOP_F16_F16_F16 : VOPProfile <[f16, f16, f16, untyped]>;
2227 def VOP_F16_F16_I16 : VOPProfile <[f16, f16, i16, untyped]>;
2228 def VOP_F16_F16_I32 : VOPProfile <[f16, f16, i32, untyped]>;
2229 def VOP_I16_I16_I16 : VOPProfile <[i16, i16, i16, untyped]>;
2231 def VOP_I16_I16_I16_I16 : VOPProfile <[i16, i16, i16, i16, untyped]>;
2232 def VOP_F16_F16_F16_F16 : VOPProfile <[f16, f16, f16, f16, untyped]>;
2234 def VOP_I32_I16_I16_I32 : VOPProfile <[i32, i16, i16, i32, untyped]>;
2236 def VOP_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, untyped]>;
2237 def VOP_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, untyped]>;
2238 def VOP_B32_F16_F16 : VOPProfile <[i32, f16, f16, untyped]>;
2240 def VOP_V2F16_V2F16_V2F16_V2F16 : VOPProfile <[v2f16, v2f16, v2f16, v2f16]>;
2241 def VOP_V2I16_V2I16_V2I16_V2I16 : VOPProfile <[v2i16, v2i16, v2i16, v2i16]>;
2242 def VOP_V2I16_F32_F32 : VOPProfile <[v2i16, f32, f32, untyped]>;
2243 def VOP_V2I16_I32_I32 : VOPProfile <[v2i16, i32, i32, untyped]>;
2245 def VOP_F32_V2F16_V2F16_V2F16 : VOPProfile <[f32, v2f16, v2f16, v2f16]>;
2247 def VOP_NONE : VOPProfile <[untyped, untyped, untyped, untyped]>;
2249 def VOP_F32_F32 : VOPProfile <[f32, f32, untyped, untyped]>;
2250 def VOP_F32_F64 : VOPProfile <[f32, f64, untyped, untyped]>;
2251 def VOP_F32_I32 : VOPProfile <[f32, i32, untyped, untyped]>;
2252 def VOP_F64_F32 : VOPProfile <[f64, f32, untyped, untyped]>;
2253 def VOP_F64_F64 : VOPProfile <[f64, f64, untyped, untyped]>;
2254 def VOP_F64_I32 : VOPProfile <[f64, i32, untyped, untyped]>;
2255 def VOP_I32_F32 : VOPProfile <[i32, f32, untyped, untyped]>;
2256 def VOP_I32_F64 : VOPProfile <[i32, f64, untyped, untyped]>;
2257 def VOP_I32_I32 : VOPProfile <[i32, i32, untyped, untyped]>;
2258 def VOP_F16_F32 : VOPProfile <[f16, f32, untyped, untyped]>;
2259 def VOP_F32_F16 : VOPProfile <[f32, f16, untyped, untyped]>;
2261 def VOP_F32_F32_F16 : VOPProfile <[f32, f32, f16, untyped]>;
2262 def VOP_F32_F32_F32 : VOPProfile <[f32, f32, f32, untyped]>;
2263 def VOP_F32_F32_I32 : VOPProfile <[f32, f32, i32, untyped]>;
2264 def VOP_F64_F64_F64 : VOPProfile <[f64, f64, f64, untyped]>;
2265 def VOP_F64_F64_I32 : VOPProfile <[f64, f64, i32, untyped]>;
2266 def VOP_I32_F32_F32 : VOPProfile <[i32, f32, f32, untyped]>;
2267 def VOP_I32_F32_I32 : VOPProfile <[i32, f32, i32, untyped]>;
2268 def VOP_I32_I32_I32 : VOPProfile <[i32, i32, i32, untyped]>;
2269 def VOP_I32_I32_I32_ARITH : VOPProfile <[i32, i32, i32, untyped], 0, /*EnableClamp=*/1>;
2270 def VOP_V2F16_F32_F32 : VOPProfile <[v2f16, f32, f32, untyped]>;
2271 def VOP_F32_F16_F16_F16 : VOPProfile <[f32, f16, f16, f16]>;
2273 def VOP_I64_I64_I32 : VOPProfile <[i64, i64, i32, untyped]>;
2274 def VOP_I64_I32_I64 : VOPProfile <[i64, i32, i64, untyped]>;
2275 def VOP_I64_I64_I64 : VOPProfile <[i64, i64, i64, untyped]>;
2277 def VOP_F16_F32_F16_F32 : VOPProfile <[f16, f32, f16, f32]>;
2278 def VOP_F32_F32_F16_F16 : VOPProfile <[f32, f32, f16, f16]>;
2279 def VOP_F32_F32_F32_F32 : VOPProfile <[f32, f32, f32, f32]>;
2280 def VOP_F64_F64_F64_F64 : VOPProfile <[f64, f64, f64, f64]>;
2281 def VOP_I32_I32_I32_I32 : VOPProfile <[i32, i32, i32, i32]>;
2282 def VOP_I64_I32_I32_I64 : VOPProfile <[i64, i32, i32, i64]>;
2283 def VOP_I32_F32_I32_I32 : VOPProfile <[i32, f32, i32, i32]>;
2284 def VOP_I64_I64_I32_I64 : VOPProfile <[i64, i64, i32, i64]>;
2285 def VOP_V4I32_I64_I32_V4I32 : VOPProfile <[v4i32, i64, i32, v4i32]>;
2287 def VOP_F32_V2F16_V2F16_F32 : VOPProfile <[f32, v2f16, v2f16, f32]>;
2288 def VOP_I32_V2I16_V2I16_I32 : VOPProfile <[i32, v2i16, v2i16, i32]>;
2290 def VOP_V4F32_F32_F32_V4F32 : VOPProfile <[v4f32, f32, f32, v4f32]>;
2291 def VOP_V16F32_F32_F32_V16F32 : VOPProfile <[v16f32, f32, f32, v16f32]>;
2292 def VOP_V32F32_F32_F32_V32F32 : VOPProfile <[v32f32, f32, f32, v32f32]>;
2293 def VOP_V4F32_V4F16_V4F16_V4F32 : VOPProfile <[v4f32, v4f16, v4f16, v4f32]>;
2294 def VOP_V16F32_V4F16_V4F16_V16F32 : VOPProfile <[v16f32, v4f16, v4f16, v16f32]>;
2295 def VOP_V32F32_V4F16_V4F16_V32F32 : VOPProfile <[v32f32, v4f16, v4f16, v32f32]>;
2296 def VOP_V4F32_V2I16_V2I16_V4F32 : VOPProfile <[v4f32, v2i16, v2i16, v4f32]>;
2297 def VOP_V16F32_V2I16_V2I16_V16F32 : VOPProfile <[v16f32, v2i16, v2i16, v16f32]>;
2298 def VOP_V32F32_V2I16_V2I16_V32F32 : VOPProfile <[v32f32, v2i16, v2i16, v32f32]>;
2299 def VOP_V4I32_I32_I32_V4I32 : VOPProfile <[v4i32, i32, i32, v4i32]>;
2300 def VOP_V16I32_I32_I32_V16I32 : VOPProfile <[v16i32, i32, i32, v16i32]>;
2301 def VOP_V32I32_I32_I32_V32I32 : VOPProfile <[v32i32, i32, i32, v32i32]>;
2303 class Commutable_REV <string revOp, bit isOrig> {
2304 string RevOp = revOp;
2305 bit IsOrig = isOrig;
2308 class AtomicNoRet <string noRetOp, bit isRet> {
2309 string NoRetOp = noRetOp;
2313 //===----------------------------------------------------------------------===//
2314 // Interpolation opcodes
2315 //===----------------------------------------------------------------------===//
2317 class VINTRPDstOperand <RegisterClass rc> : RegisterOperand <rc, "printVINTRPDst">;
2319 class VINTRP_Pseudo <string opName, dag outs, dag ins, list<dag> pattern> :
2320 VINTRPCommon <outs, ins, "", pattern>,
2321 SIMCInstr<opName, SIEncodingFamily.NONE> {
2323 let isCodeGenOnly = 1;
2326 // FIXME-GFX10: WIP.
2327 class VINTRP_Real_si <bits <2> op, string opName, dag outs, dag ins,
2328 string asm, int encodingFamily> :
2329 VINTRPCommon <outs, ins, asm, []>,
2331 SIMCInstr<opName, encodingFamily> {
2332 let DisableDecoder = DisableSIDecoder;
2335 class VINTRP_Real_vi <bits <2> op, string opName, dag outs, dag ins,
2337 VINTRPCommon <outs, ins, asm, []>,
2339 SIMCInstr<opName, SIEncodingFamily.VI> {
2340 let AssemblerPredicate = VIAssemblerPredicate;
2341 let DecoderNamespace = "GFX8";
2342 let DisableDecoder = DisableVIDecoder;
2345 // FIXME-GFX10: WIP.
2346 multiclass VINTRP_m <bits <2> op, dag outs, dag ins, string asm,
2347 list<dag> pattern = []> {
2348 def "" : VINTRP_Pseudo <NAME, outs, ins, pattern>;
2350 let AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7" in {
2351 def _si : VINTRP_Real_si <op, NAME, outs, ins, asm, SIEncodingFamily.SI>;
2352 } // End AssemblerPredicate = isGFX6GFX7, DecoderNamespace = "GFX6GFX7"
2354 def _vi : VINTRP_Real_vi <op, NAME, outs, ins, asm>;
2356 let AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10" in {
2357 def _gfx10 : VINTRP_Real_si<op, NAME, outs, ins, asm, SIEncodingFamily.GFX10>;
2358 } // End AssemblerPredicate = isGFX10Plus, DecoderNamespace = "GFX10"
2360 //===----------------------------------------------------------------------===//
2361 // Vector instruction mappings
2362 //===----------------------------------------------------------------------===//
2364 // Maps an opcode in e32 form to its e64 equivalent
2365 def getVOPe64 : InstrMapping {
2366 let FilterClass = "VOP";
2367 let RowFields = ["OpName"];
2368 let ColFields = ["Size", "VOP3"];
2369 let KeyCol = ["4", "0"];
2370 let ValueCols = [["8", "1"]];
2373 // Maps an opcode in e64 form to its e32 equivalent
2374 def getVOPe32 : InstrMapping {
2375 let FilterClass = "VOP";
2376 let RowFields = ["OpName"];
2377 let ColFields = ["Size", "VOP3"];
2378 let KeyCol = ["8", "1"];
2379 let ValueCols = [["4", "0"]];
2382 // Maps ordinary instructions to their SDWA counterparts
2383 def getSDWAOp : InstrMapping {
2384 let FilterClass = "VOP";
2385 let RowFields = ["OpName"];
2386 let ColFields = ["AsmVariantName"];
2387 let KeyCol = ["Default"];
2388 let ValueCols = [["SDWA"]];
2391 // Maps SDWA instructions to their ordinary counterparts
2392 def getBasicFromSDWAOp : InstrMapping {
2393 let FilterClass = "VOP";
2394 let RowFields = ["OpName"];
2395 let ColFields = ["AsmVariantName"];
2396 let KeyCol = ["SDWA"];
2397 let ValueCols = [["Default"]];
2400 // Maps ordinary instructions to their DPP counterparts
2401 def getDPPOp32 : InstrMapping {
2402 let FilterClass = "VOP";
2403 let RowFields = ["OpName"];
2404 let ColFields = ["AsmVariantName"];
2405 let KeyCol = ["Default"];
2406 let ValueCols = [["DPP"]];
2409 // Maps an commuted opcode to its original version
2410 def getCommuteOrig : InstrMapping {
2411 let FilterClass = "Commutable_REV";
2412 let RowFields = ["RevOp"];
2413 let ColFields = ["IsOrig"];
2415 let ValueCols = [["1"]];
2418 // Maps an original opcode to its commuted version
2419 def getCommuteRev : InstrMapping {
2420 let FilterClass = "Commutable_REV";
2421 let RowFields = ["RevOp"];
2422 let ColFields = ["IsOrig"];
2424 let ValueCols = [["0"]];
2427 def getMCOpcodeGen : InstrMapping {
2428 let FilterClass = "SIMCInstr";
2429 let RowFields = ["PseudoInstr"];
2430 let ColFields = ["Subtarget"];
2431 let KeyCol = [!cast<string>(SIEncodingFamily.NONE)];
2432 let ValueCols = [[!cast<string>(SIEncodingFamily.SI)],
2433 [!cast<string>(SIEncodingFamily.VI)],
2434 [!cast<string>(SIEncodingFamily.SDWA)],
2435 [!cast<string>(SIEncodingFamily.SDWA9)],
2436 // GFX80 encoding is added to work around a multiple matching
2437 // issue for buffer instructions with unpacked d16 data. This
2438 // does not actually change the encoding, and thus may be
2440 [!cast<string>(SIEncodingFamily.GFX80)],
2441 [!cast<string>(SIEncodingFamily.GFX9)],
2442 [!cast<string>(SIEncodingFamily.GFX10)],
2443 [!cast<string>(SIEncodingFamily.SDWA10)]];
2446 // Get equivalent SOPK instruction.
2447 def getSOPKOp : InstrMapping {
2448 let FilterClass = "SOPKInstTable";
2449 let RowFields = ["BaseCmpOp"];
2450 let ColFields = ["IsSOPK"];
2452 let ValueCols = [["1"]];
2455 def getAddr64Inst : InstrMapping {
2456 let FilterClass = "MUBUFAddr64Table";
2457 let RowFields = ["OpName"];
2458 let ColFields = ["IsAddr64"];
2460 let ValueCols = [["1"]];
2463 def getIfAddr64Inst : InstrMapping {
2464 let FilterClass = "MUBUFAddr64Table";
2465 let RowFields = ["OpName"];
2466 let ColFields = ["IsAddr64"];
2468 let ValueCols = [["1"]];
2471 def getMUBUFNoLdsInst : InstrMapping {
2472 let FilterClass = "MUBUFLdsTable";
2473 let RowFields = ["OpName"];
2474 let ColFields = ["IsLds"];
2476 let ValueCols = [["0"]];
2479 // Maps an atomic opcode to its version with a return value.
2480 def getAtomicRetOp : InstrMapping {
2481 let FilterClass = "AtomicNoRet";
2482 let RowFields = ["NoRetOp"];
2483 let ColFields = ["IsRet"];
2485 let ValueCols = [["1"]];
2488 // Maps an atomic opcode to its returnless version.
2489 def getAtomicNoRetOp : InstrMapping {
2490 let FilterClass = "AtomicNoRet";
2491 let RowFields = ["NoRetOp"];
2492 let ColFields = ["IsRet"];
2494 let ValueCols = [["0"]];
2497 // Maps a GLOBAL to its SADDR form.
2498 def getGlobalSaddrOp : InstrMapping {
2499 let FilterClass = "GlobalSaddrTable";
2500 let RowFields = ["SaddrOp"];
2501 let ColFields = ["IsSaddr"];
2503 let ValueCols = [["1"]];
2506 // Maps a v_cmpx opcode with sdst to opcode without sdst.
2507 def getVCMPXNoSDstOp : InstrMapping {
2508 let FilterClass = "VCMPXNoSDstTable";
2509 let RowFields = ["NoSDstOp"];
2510 let ColFields = ["HasSDst"];
2512 let ValueCols = [["0"]];
2515 // Maps a SOPP to a SOPP with S_NOP
2516 def getSOPPWithRelaxation : InstrMapping {
2517 let FilterClass = "Base_SOPP";
2518 let RowFields = ["AsmString"];
2519 let ColFields = ["Size"];
2521 let ValueCols = [["8"]];
2524 include "SIInstructions.td"
2526 include "DSInstructions.td"
2527 include "MIMGInstructions.td"