1 //===---- SMInstructions.td - Scalar Memory Instruction Defintions --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 def smrd_offset_8 : NamedOperandU32<"SMRDOffset8",
11 NamedMatchClass<"SMRDOffset8">> {
12 let OperandType = "OPERAND_IMMEDIATE";
15 def smrd_offset_20 : NamedOperandU32<"SMRDOffset20",
16 NamedMatchClass<"SMRDOffset20">> {
17 let OperandType = "OPERAND_IMMEDIATE";
20 //===----------------------------------------------------------------------===//
21 // Scalar Memory classes
22 //===----------------------------------------------------------------------===//
24 class SM_Pseudo <string opName, dag outs, dag ins, string asmOps, list<dag> pattern=[]> :
25 InstSI <outs, ins, "", pattern>,
26 SIMCInstr<opName, SIEncodingFamily.NONE> {
28 let isCodeGenOnly = 1;
34 let hasSideEffects = 0;
35 let UseNamedOperandTable = 1;
36 let SchedRW = [WriteSMEM];
37 let SubtargetPredicate = isGCN;
39 string Mnemonic = opName;
40 string AsmOperands = asmOps;
42 bits<1> has_sbase = 1;
45 bits<1> has_offset = 1;
46 bits<1> offset_is_imm = 0;
49 class SM_Real <SM_Pseudo ps>
50 : InstSI<ps.OutOperandList, ps.InOperandList, ps.Mnemonic # ps.AsmOperands, []> {
53 let isCodeGenOnly = 0;
55 // copy relevant pseudo op flags
56 let SubtargetPredicate = ps.SubtargetPredicate;
57 let AsmMatchConverter = ps.AsmMatchConverter;
63 bits<1> imm = !if(ps.has_offset, ps.offset_is_imm, 0);
66 class SM_Probe_Pseudo <string opName, dag ins, bit isImm>
67 : SM_Pseudo<opName, (outs), ins, " $sdata, $sbase, $offset"> {
73 let hasSideEffects = 1;
74 let offset_is_imm = isImm;
75 let PseudoInstr = opName # !if(isImm, "_IMM", "_SGPR");
78 class SM_Load_Pseudo <string opName, dag outs, dag ins, string asmOps, list<dag> pattern=[]>
79 : SM_Pseudo<opName, outs, ins, asmOps, pattern> {
80 RegisterClass BaseClass;
86 class SM_Store_Pseudo <string opName, dag ins, string asmOps, list<dag> pattern = []>
87 : SM_Pseudo<opName, (outs), ins, asmOps, pattern> {
88 RegisterClass BaseClass;
89 RegisterClass SrcClass;
96 class SM_Discard_Pseudo <string opName, dag ins, bit isImm>
97 : SM_Pseudo<opName, (outs), ins, " $sbase, $offset"> {
103 let hasSideEffects = 1;
104 let offset_is_imm = isImm;
105 let PseudoInstr = opName # !if(isImm, "_IMM", "_SGPR");
108 multiclass SM_Pseudo_Loads<string opName,
109 RegisterClass baseClass,
110 RegisterClass dstClass> {
111 def _IMM : SM_Load_Pseudo <opName,
112 (outs dstClass:$sdst),
113 (ins baseClass:$sbase, i32imm:$offset, i1imm:$glc),
114 " $sdst, $sbase, $offset$glc", []> {
115 let offset_is_imm = 1;
116 let BaseClass = baseClass;
117 let PseudoInstr = opName # "_IMM";
121 def _SGPR : SM_Load_Pseudo <opName,
122 (outs dstClass:$sdst),
123 (ins baseClass:$sbase, SReg_32:$soff, i1imm:$glc),
124 " $sdst, $sbase, $offset$glc", []> {
125 let BaseClass = baseClass;
126 let PseudoInstr = opName # "_SGPR";
131 multiclass SM_Pseudo_Stores<string opName,
132 RegisterClass baseClass,
133 RegisterClass srcClass> {
134 def _IMM : SM_Store_Pseudo <opName,
135 (ins srcClass:$sdata, baseClass:$sbase, i32imm:$offset, i1imm:$glc),
136 " $sdata, $sbase, $offset$glc", []> {
137 let offset_is_imm = 1;
138 let BaseClass = baseClass;
139 let SrcClass = srcClass;
140 let PseudoInstr = opName # "_IMM";
143 def _SGPR : SM_Store_Pseudo <opName,
144 (ins srcClass:$sdata, baseClass:$sbase, SReg_32:$soff, i1imm:$glc),
145 " $sdata, $sbase, $offset$glc", []> {
146 let BaseClass = baseClass;
147 let SrcClass = srcClass;
148 let PseudoInstr = opName # "_SGPR";
152 multiclass SM_Pseudo_Discards<string opName> {
153 def _IMM : SM_Discard_Pseudo <opName, (ins SReg_64:$sbase, smrd_offset_20:$offset), 1>;
154 def _SGPR : SM_Discard_Pseudo <opName, (ins SReg_64:$sbase, SReg_32:$offset), 0>;
157 class SM_Time_Pseudo<string opName, SDPatternOperator node> : SM_Pseudo<
158 opName, (outs SReg_64_XEXEC:$sdst), (ins),
159 " $sdst", [(set i64:$sdst, (node))]> {
160 let hasSideEffects = 1;
167 class SM_Inval_Pseudo <string opName, SDPatternOperator node> : SM_Pseudo<
168 opName, (outs), (ins), "", [(node)]> {
169 let hasSideEffects = 1;
176 multiclass SM_Pseudo_Probe<string opName, RegisterClass baseClass> {
177 def _IMM : SM_Probe_Pseudo <opName, (ins i8imm:$sdata, baseClass:$sbase, smrd_offset_20:$offset), 1>;
178 def _SGPR : SM_Probe_Pseudo <opName, (ins i8imm:$sdata, baseClass:$sbase, SReg_32:$offset), 0>;
181 //===----------------------------------------------------------------------===//
182 // Scalar Atomic Memory Classes
183 //===----------------------------------------------------------------------===//
185 class SM_Atomic_Pseudo <string opName,
186 dag outs, dag ins, string asmOps, bit isRet>
187 : SM_Pseudo<opName, outs, ins, asmOps, []> {
195 // Should these be set?
197 let hasSideEffects = 1;
201 class SM_Pseudo_Atomic<string opName,
202 RegisterClass baseClass,
203 RegisterClass dataClass,
206 SM_Atomic_Pseudo<opName,
207 !if(isRet, (outs dataClass:$sdst), (outs)),
209 (ins dataClass:$sdata, baseClass:$sbase, smrd_offset_20:$offset),
210 (ins dataClass:$sdata, baseClass:$sbase, SReg_32:$offset)),
211 !if(isRet, " $sdst", " $sdata") # ", $sbase, $offset" # !if(isRet, " glc", ""),
213 let offset_is_imm = isImm;
214 let PseudoInstr = opName # !if(isImm,
215 !if(isRet, "_IMM_RTN", "_IMM"),
216 !if(isRet, "_SGPR_RTN", "_SGPR"));
218 let Constraints = !if(isRet, "$sdst = $sdata", "");
219 let DisableEncoding = !if(isRet, "$sdata", "");
222 multiclass SM_Pseudo_Atomics<string opName,
223 RegisterClass baseClass,
224 RegisterClass dataClass> {
225 def _IMM : SM_Pseudo_Atomic <opName, baseClass, dataClass, 1, 0>;
226 def _SGPR : SM_Pseudo_Atomic <opName, baseClass, dataClass, 0, 0>;
227 def _IMM_RTN : SM_Pseudo_Atomic <opName, baseClass, dataClass, 1, 1>;
228 def _SGPR_RTN : SM_Pseudo_Atomic <opName, baseClass, dataClass, 0, 1>;
231 //===----------------------------------------------------------------------===//
232 // Scalar Memory Instructions
233 //===----------------------------------------------------------------------===//
235 // We are using the SReg_32_XM0 and not the SReg_32 register class for 32-bit
236 // SMRD instructions, because the SReg_32_XM0 register class does not include M0
237 // and writing to M0 from an SMRD instruction will hang the GPU.
239 // XXX - SMEM instructions do not allow exec for data operand, but
240 // does sdst for SMRD on SI/CI?
241 defm S_LOAD_DWORD : SM_Pseudo_Loads <"s_load_dword", SReg_64, SReg_32_XM0_XEXEC>;
242 defm S_LOAD_DWORDX2 : SM_Pseudo_Loads <"s_load_dwordx2", SReg_64, SReg_64_XEXEC>;
243 defm S_LOAD_DWORDX4 : SM_Pseudo_Loads <"s_load_dwordx4", SReg_64, SReg_128>;
244 defm S_LOAD_DWORDX8 : SM_Pseudo_Loads <"s_load_dwordx8", SReg_64, SReg_256>;
245 defm S_LOAD_DWORDX16 : SM_Pseudo_Loads <"s_load_dwordx16", SReg_64, SReg_512>;
247 defm S_BUFFER_LOAD_DWORD : SM_Pseudo_Loads <
248 "s_buffer_load_dword", SReg_128, SReg_32_XM0_XEXEC
251 // FIXME: exec_lo/exec_hi appear to be allowed for SMRD loads on
252 // SI/CI, bit disallowed for SMEM on VI.
253 defm S_BUFFER_LOAD_DWORDX2 : SM_Pseudo_Loads <
254 "s_buffer_load_dwordx2", SReg_128, SReg_64_XEXEC
257 defm S_BUFFER_LOAD_DWORDX4 : SM_Pseudo_Loads <
258 "s_buffer_load_dwordx4", SReg_128, SReg_128
261 defm S_BUFFER_LOAD_DWORDX8 : SM_Pseudo_Loads <
262 "s_buffer_load_dwordx8", SReg_128, SReg_256
265 defm S_BUFFER_LOAD_DWORDX16 : SM_Pseudo_Loads <
266 "s_buffer_load_dwordx16", SReg_128, SReg_512
269 defm S_STORE_DWORD : SM_Pseudo_Stores <"s_store_dword", SReg_64, SReg_32_XM0_XEXEC>;
270 defm S_STORE_DWORDX2 : SM_Pseudo_Stores <"s_store_dwordx2", SReg_64, SReg_64_XEXEC>;
271 defm S_STORE_DWORDX4 : SM_Pseudo_Stores <"s_store_dwordx4", SReg_64, SReg_128>;
273 defm S_BUFFER_STORE_DWORD : SM_Pseudo_Stores <
274 "s_buffer_store_dword", SReg_128, SReg_32_XM0_XEXEC
277 defm S_BUFFER_STORE_DWORDX2 : SM_Pseudo_Stores <
278 "s_buffer_store_dwordx2", SReg_128, SReg_64_XEXEC
281 defm S_BUFFER_STORE_DWORDX4 : SM_Pseudo_Stores <
282 "s_buffer_store_dwordx4", SReg_128, SReg_128
286 def S_MEMTIME : SM_Time_Pseudo <"s_memtime", int_amdgcn_s_memtime>;
287 def S_DCACHE_INV : SM_Inval_Pseudo <"s_dcache_inv", int_amdgcn_s_dcache_inv>;
289 let SubtargetPredicate = isCIVI in {
290 def S_DCACHE_INV_VOL : SM_Inval_Pseudo <"s_dcache_inv_vol", int_amdgcn_s_dcache_inv_vol>;
291 } // let SubtargetPredicate = isCIVI
293 let SubtargetPredicate = isVI in {
294 def S_DCACHE_WB : SM_Inval_Pseudo <"s_dcache_wb", int_amdgcn_s_dcache_wb>;
295 def S_DCACHE_WB_VOL : SM_Inval_Pseudo <"s_dcache_wb_vol", int_amdgcn_s_dcache_wb_vol>;
296 def S_MEMREALTIME : SM_Time_Pseudo <"s_memrealtime", int_amdgcn_s_memrealtime>;
298 defm S_ATC_PROBE : SM_Pseudo_Probe <"s_atc_probe", SReg_64>;
299 defm S_ATC_PROBE_BUFFER : SM_Pseudo_Probe <"s_atc_probe_buffer", SReg_128>;
300 } // SubtargetPredicate = isVI
302 let SubtargetPredicate = HasFlatScratchInsts, Uses = [FLAT_SCR] in {
303 defm S_SCRATCH_LOAD_DWORD : SM_Pseudo_Loads <"s_scratch_load_dword", SReg_64, SReg_32_XM0_XEXEC>;
304 defm S_SCRATCH_LOAD_DWORDX2 : SM_Pseudo_Loads <"s_scratch_load_dwordx2", SReg_64, SReg_64_XEXEC>;
305 defm S_SCRATCH_LOAD_DWORDX4 : SM_Pseudo_Loads <"s_scratch_load_dwordx4", SReg_64, SReg_128>;
307 defm S_SCRATCH_STORE_DWORD : SM_Pseudo_Stores <"s_scratch_store_dword", SReg_64, SReg_32_XM0_XEXEC>;
308 defm S_SCRATCH_STORE_DWORDX2 : SM_Pseudo_Stores <"s_scratch_store_dwordx2", SReg_64, SReg_64_XEXEC>;
309 defm S_SCRATCH_STORE_DWORDX4 : SM_Pseudo_Stores <"s_scratch_store_dwordx4", SReg_64, SReg_128>;
310 } // SubtargetPredicate = HasFlatScratchInsts
312 let SubtargetPredicate = HasScalarAtomics in {
314 defm S_BUFFER_ATOMIC_SWAP : SM_Pseudo_Atomics <"s_buffer_atomic_swap", SReg_128, SReg_32_XM0_XEXEC>;
315 defm S_BUFFER_ATOMIC_CMPSWAP : SM_Pseudo_Atomics <"s_buffer_atomic_cmpswap", SReg_128, SReg_64_XEXEC>;
316 defm S_BUFFER_ATOMIC_ADD : SM_Pseudo_Atomics <"s_buffer_atomic_add", SReg_128, SReg_32_XM0_XEXEC>;
317 defm S_BUFFER_ATOMIC_SUB : SM_Pseudo_Atomics <"s_buffer_atomic_sub", SReg_128, SReg_32_XM0_XEXEC>;
318 defm S_BUFFER_ATOMIC_SMIN : SM_Pseudo_Atomics <"s_buffer_atomic_smin", SReg_128, SReg_32_XM0_XEXEC>;
319 defm S_BUFFER_ATOMIC_UMIN : SM_Pseudo_Atomics <"s_buffer_atomic_umin", SReg_128, SReg_32_XM0_XEXEC>;
320 defm S_BUFFER_ATOMIC_SMAX : SM_Pseudo_Atomics <"s_buffer_atomic_smax", SReg_128, SReg_32_XM0_XEXEC>;
321 defm S_BUFFER_ATOMIC_UMAX : SM_Pseudo_Atomics <"s_buffer_atomic_umax", SReg_128, SReg_32_XM0_XEXEC>;
322 defm S_BUFFER_ATOMIC_AND : SM_Pseudo_Atomics <"s_buffer_atomic_and", SReg_128, SReg_32_XM0_XEXEC>;
323 defm S_BUFFER_ATOMIC_OR : SM_Pseudo_Atomics <"s_buffer_atomic_or", SReg_128, SReg_32_XM0_XEXEC>;
324 defm S_BUFFER_ATOMIC_XOR : SM_Pseudo_Atomics <"s_buffer_atomic_xor", SReg_128, SReg_32_XM0_XEXEC>;
325 defm S_BUFFER_ATOMIC_INC : SM_Pseudo_Atomics <"s_buffer_atomic_inc", SReg_128, SReg_32_XM0_XEXEC>;
326 defm S_BUFFER_ATOMIC_DEC : SM_Pseudo_Atomics <"s_buffer_atomic_dec", SReg_128, SReg_32_XM0_XEXEC>;
328 defm S_BUFFER_ATOMIC_SWAP_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_swap_x2", SReg_128, SReg_64_XEXEC>;
329 defm S_BUFFER_ATOMIC_CMPSWAP_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_cmpswap_x2", SReg_128, SReg_128>;
330 defm S_BUFFER_ATOMIC_ADD_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_add_x2", SReg_128, SReg_64_XEXEC>;
331 defm S_BUFFER_ATOMIC_SUB_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_sub_x2", SReg_128, SReg_64_XEXEC>;
332 defm S_BUFFER_ATOMIC_SMIN_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_smin_x2", SReg_128, SReg_64_XEXEC>;
333 defm S_BUFFER_ATOMIC_UMIN_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_umin_x2", SReg_128, SReg_64_XEXEC>;
334 defm S_BUFFER_ATOMIC_SMAX_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_smax_x2", SReg_128, SReg_64_XEXEC>;
335 defm S_BUFFER_ATOMIC_UMAX_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_umax_x2", SReg_128, SReg_64_XEXEC>;
336 defm S_BUFFER_ATOMIC_AND_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_and_x2", SReg_128, SReg_64_XEXEC>;
337 defm S_BUFFER_ATOMIC_OR_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_or_x2", SReg_128, SReg_64_XEXEC>;
338 defm S_BUFFER_ATOMIC_XOR_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_xor_x2", SReg_128, SReg_64_XEXEC>;
339 defm S_BUFFER_ATOMIC_INC_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_inc_x2", SReg_128, SReg_64_XEXEC>;
340 defm S_BUFFER_ATOMIC_DEC_X2 : SM_Pseudo_Atomics <"s_buffer_atomic_dec_x2", SReg_128, SReg_64_XEXEC>;
342 defm S_ATOMIC_SWAP : SM_Pseudo_Atomics <"s_atomic_swap", SReg_64, SReg_32_XM0_XEXEC>;
343 defm S_ATOMIC_CMPSWAP : SM_Pseudo_Atomics <"s_atomic_cmpswap", SReg_64, SReg_64_XEXEC>;
344 defm S_ATOMIC_ADD : SM_Pseudo_Atomics <"s_atomic_add", SReg_64, SReg_32_XM0_XEXEC>;
345 defm S_ATOMIC_SUB : SM_Pseudo_Atomics <"s_atomic_sub", SReg_64, SReg_32_XM0_XEXEC>;
346 defm S_ATOMIC_SMIN : SM_Pseudo_Atomics <"s_atomic_smin", SReg_64, SReg_32_XM0_XEXEC>;
347 defm S_ATOMIC_UMIN : SM_Pseudo_Atomics <"s_atomic_umin", SReg_64, SReg_32_XM0_XEXEC>;
348 defm S_ATOMIC_SMAX : SM_Pseudo_Atomics <"s_atomic_smax", SReg_64, SReg_32_XM0_XEXEC>;
349 defm S_ATOMIC_UMAX : SM_Pseudo_Atomics <"s_atomic_umax", SReg_64, SReg_32_XM0_XEXEC>;
350 defm S_ATOMIC_AND : SM_Pseudo_Atomics <"s_atomic_and", SReg_64, SReg_32_XM0_XEXEC>;
351 defm S_ATOMIC_OR : SM_Pseudo_Atomics <"s_atomic_or", SReg_64, SReg_32_XM0_XEXEC>;
352 defm S_ATOMIC_XOR : SM_Pseudo_Atomics <"s_atomic_xor", SReg_64, SReg_32_XM0_XEXEC>;
353 defm S_ATOMIC_INC : SM_Pseudo_Atomics <"s_atomic_inc", SReg_64, SReg_32_XM0_XEXEC>;
354 defm S_ATOMIC_DEC : SM_Pseudo_Atomics <"s_atomic_dec", SReg_64, SReg_32_XM0_XEXEC>;
356 defm S_ATOMIC_SWAP_X2 : SM_Pseudo_Atomics <"s_atomic_swap_x2", SReg_64, SReg_64_XEXEC>;
357 defm S_ATOMIC_CMPSWAP_X2 : SM_Pseudo_Atomics <"s_atomic_cmpswap_x2", SReg_64, SReg_128>;
358 defm S_ATOMIC_ADD_X2 : SM_Pseudo_Atomics <"s_atomic_add_x2", SReg_64, SReg_64_XEXEC>;
359 defm S_ATOMIC_SUB_X2 : SM_Pseudo_Atomics <"s_atomic_sub_x2", SReg_64, SReg_64_XEXEC>;
360 defm S_ATOMIC_SMIN_X2 : SM_Pseudo_Atomics <"s_atomic_smin_x2", SReg_64, SReg_64_XEXEC>;
361 defm S_ATOMIC_UMIN_X2 : SM_Pseudo_Atomics <"s_atomic_umin_x2", SReg_64, SReg_64_XEXEC>;
362 defm S_ATOMIC_SMAX_X2 : SM_Pseudo_Atomics <"s_atomic_smax_x2", SReg_64, SReg_64_XEXEC>;
363 defm S_ATOMIC_UMAX_X2 : SM_Pseudo_Atomics <"s_atomic_umax_x2", SReg_64, SReg_64_XEXEC>;
364 defm S_ATOMIC_AND_X2 : SM_Pseudo_Atomics <"s_atomic_and_x2", SReg_64, SReg_64_XEXEC>;
365 defm S_ATOMIC_OR_X2 : SM_Pseudo_Atomics <"s_atomic_or_x2", SReg_64, SReg_64_XEXEC>;
366 defm S_ATOMIC_XOR_X2 : SM_Pseudo_Atomics <"s_atomic_xor_x2", SReg_64, SReg_64_XEXEC>;
367 defm S_ATOMIC_INC_X2 : SM_Pseudo_Atomics <"s_atomic_inc_x2", SReg_64, SReg_64_XEXEC>;
368 defm S_ATOMIC_DEC_X2 : SM_Pseudo_Atomics <"s_atomic_dec_x2", SReg_64, SReg_64_XEXEC>;
370 } // let SubtargetPredicate = HasScalarAtomics
372 let SubtargetPredicate = isGFX9 in {
373 defm S_DCACHE_DISCARD : SM_Pseudo_Discards <"s_dcache_discard">;
374 defm S_DCACHE_DISCARD_X2 : SM_Pseudo_Discards <"s_dcache_discard_x2">;
377 //===----------------------------------------------------------------------===//
378 // Scalar Memory Patterns
379 //===----------------------------------------------------------------------===//
382 def smrd_load : PatFrag <(ops node:$ptr), (load node:$ptr), [{
383 auto Ld = cast<LoadSDNode>(N);
384 return Ld->getAlignment() >= 4 &&
385 ((((Ld->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS) || (Ld->getAddressSpace() == AMDGPUASI.CONSTANT_ADDRESS_32BIT)) && !N->isDivergent()) ||
386 (Subtarget->getScalarizeGlobalBehavior() && Ld->getAddressSpace() == AMDGPUASI.GLOBAL_ADDRESS &&
387 !Ld->isVolatile() && !N->isDivergent() &&
388 static_cast<const SITargetLowering *>(getTargetLowering())->isMemOpHasNoClobberedMemOperand(N)));
391 def SMRDImm : ComplexPattern<i64, 2, "SelectSMRDImm">;
392 def SMRDImm32 : ComplexPattern<i64, 2, "SelectSMRDImm32">;
393 def SMRDSgpr : ComplexPattern<i64, 2, "SelectSMRDSgpr">;
394 def SMRDBufferImm : ComplexPattern<i32, 1, "SelectSMRDBufferImm">;
395 def SMRDBufferImm32 : ComplexPattern<i32, 1, "SelectSMRDBufferImm32">;
397 multiclass SMRD_Pattern <string Instr, ValueType vt> {
401 (smrd_load (SMRDImm i64:$sbase, i32:$offset)),
402 (vt (!cast<SM_Pseudo>(Instr#"_IMM") $sbase, $offset, 0))
407 (smrd_load (SMRDSgpr i64:$sbase, i32:$offset)),
408 (vt (!cast<SM_Pseudo>(Instr#"_SGPR") $sbase, $offset, 0))
412 let OtherPredicates = [isSICI] in {
414 (i64 (readcyclecounter)),
419 // Global and constant loads can be selected to either MUBUF or SMRD
420 // instructions, but SMRD instructions are faster so we want the instruction
421 // selector to prefer those.
422 let AddedComplexity = 100 in {
424 defm : SMRD_Pattern <"S_LOAD_DWORD", i32>;
425 defm : SMRD_Pattern <"S_LOAD_DWORDX2", v2i32>;
426 defm : SMRD_Pattern <"S_LOAD_DWORDX4", v4i32>;
427 defm : SMRD_Pattern <"S_LOAD_DWORDX8", v8i32>;
428 defm : SMRD_Pattern <"S_LOAD_DWORDX16", v16i32>;
430 // 1. Offset as an immediate
431 def SM_LOAD_PATTERN : GCNPat < // name this pattern to reuse AddedComplexity on CI
432 (SIload_constant v4i32:$sbase, (SMRDBufferImm i32:$offset)),
433 (S_BUFFER_LOAD_DWORD_IMM $sbase, $offset, 0)
436 // 2. Offset loaded in an 32bit SGPR
438 (SIload_constant v4i32:$sbase, i32:$offset),
439 (S_BUFFER_LOAD_DWORD_SGPR $sbase, $offset, 0)
442 } // End let AddedComplexity = 100
444 let OtherPredicates = [isVI] in {
447 (i64 (readcyclecounter)),
451 } // let OtherPredicates = [isVI]
454 //===----------------------------------------------------------------------===//
456 //===----------------------------------------------------------------------===//
458 //===----------------------------------------------------------------------===//
460 //===----------------------------------------------------------------------===//
462 class SMRD_Real_si <bits<5> op, SM_Pseudo ps>
464 , SIMCInstr<ps.PseudoInstr, SIEncodingFamily.SI>
467 let AssemblerPredicates = [isSICI];
468 let DecoderNamespace = "SICI";
470 let Inst{7-0} = !if(ps.has_offset, offset{7-0}, ?);
472 let Inst{14-9} = !if(ps.has_sbase, sbase{6-1}, ?);
473 let Inst{21-15} = !if(ps.has_sdst, sdst{6-0}, ?);
474 let Inst{26-22} = op;
475 let Inst{31-27} = 0x18; //encoding
478 // FIXME: Assembler should reject trying to use glc on SMRD
479 // instructions on SI.
480 multiclass SM_Real_Loads_si<bits<5> op, string ps,
481 SM_Load_Pseudo immPs = !cast<SM_Load_Pseudo>(ps#_IMM),
482 SM_Load_Pseudo sgprPs = !cast<SM_Load_Pseudo>(ps#_SGPR)> {
484 def _IMM_si : SMRD_Real_si <op, immPs> {
485 let InOperandList = (ins immPs.BaseClass:$sbase, smrd_offset_8:$offset, GLC:$glc);
488 // FIXME: The operand name $offset is inconsistent with $soff used
490 def _SGPR_si : SMRD_Real_si <op, sgprPs> {
491 let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc);
496 defm S_LOAD_DWORD : SM_Real_Loads_si <0x00, "S_LOAD_DWORD">;
497 defm S_LOAD_DWORDX2 : SM_Real_Loads_si <0x01, "S_LOAD_DWORDX2">;
498 defm S_LOAD_DWORDX4 : SM_Real_Loads_si <0x02, "S_LOAD_DWORDX4">;
499 defm S_LOAD_DWORDX8 : SM_Real_Loads_si <0x03, "S_LOAD_DWORDX8">;
500 defm S_LOAD_DWORDX16 : SM_Real_Loads_si <0x04, "S_LOAD_DWORDX16">;
501 defm S_BUFFER_LOAD_DWORD : SM_Real_Loads_si <0x08, "S_BUFFER_LOAD_DWORD">;
502 defm S_BUFFER_LOAD_DWORDX2 : SM_Real_Loads_si <0x09, "S_BUFFER_LOAD_DWORDX2">;
503 defm S_BUFFER_LOAD_DWORDX4 : SM_Real_Loads_si <0x0a, "S_BUFFER_LOAD_DWORDX4">;
504 defm S_BUFFER_LOAD_DWORDX8 : SM_Real_Loads_si <0x0b, "S_BUFFER_LOAD_DWORDX8">;
505 defm S_BUFFER_LOAD_DWORDX16 : SM_Real_Loads_si <0x0c, "S_BUFFER_LOAD_DWORDX16">;
507 def S_MEMTIME_si : SMRD_Real_si <0x1e, S_MEMTIME>;
508 def S_DCACHE_INV_si : SMRD_Real_si <0x1f, S_DCACHE_INV>;
511 //===----------------------------------------------------------------------===//
513 //===----------------------------------------------------------------------===//
515 class SMEM_Real_vi <bits<8> op, SM_Pseudo ps>
517 , SIMCInstr<ps.PseudoInstr, SIEncodingFamily.VI>
521 let AssemblerPredicates = [isVI];
522 let DecoderNamespace = "VI";
524 let Inst{5-0} = !if(ps.has_sbase, sbase{6-1}, ?);
525 let Inst{12-6} = !if(ps.has_sdst, sdst{6-0}, ?);
527 let Inst{16} = !if(ps.has_glc, glc, ?);
529 let Inst{25-18} = op;
530 let Inst{31-26} = 0x30; //encoding
531 let Inst{51-32} = !if(ps.has_offset, offset{19-0}, ?);
534 multiclass SM_Real_Loads_vi<bits<8> op, string ps,
535 SM_Load_Pseudo immPs = !cast<SM_Load_Pseudo>(ps#_IMM),
536 SM_Load_Pseudo sgprPs = !cast<SM_Load_Pseudo>(ps#_SGPR)> {
537 def _IMM_vi : SMEM_Real_vi <op, immPs> {
538 let InOperandList = (ins immPs.BaseClass:$sbase, smrd_offset_20:$offset, GLC:$glc);
540 def _SGPR_vi : SMEM_Real_vi <op, sgprPs> {
541 let InOperandList = (ins sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc);
545 class SMEM_Real_Store_vi <bits<8> op, SM_Pseudo ps> : SMEM_Real_vi <op, ps> {
550 let Inst{12-6} = !if(ps.has_sdst, sdata{6-0}, ?);
553 multiclass SM_Real_Stores_vi<bits<8> op, string ps,
554 SM_Store_Pseudo immPs = !cast<SM_Store_Pseudo>(ps#_IMM),
555 SM_Store_Pseudo sgprPs = !cast<SM_Store_Pseudo>(ps#_SGPR)> {
556 // FIXME: The operand name $offset is inconsistent with $soff used
558 def _IMM_vi : SMEM_Real_Store_vi <op, immPs> {
559 let InOperandList = (ins immPs.SrcClass:$sdata, immPs.BaseClass:$sbase, smrd_offset_20:$offset, GLC:$glc);
562 def _SGPR_vi : SMEM_Real_Store_vi <op, sgprPs> {
563 let InOperandList = (ins sgprPs.SrcClass:$sdata, sgprPs.BaseClass:$sbase, SReg_32:$offset, GLC:$glc);
567 multiclass SM_Real_Probe_vi<bits<8> op, string ps> {
568 def _IMM_vi : SMEM_Real_Store_vi <op, !cast<SM_Probe_Pseudo>(ps#_IMM)>;
569 def _SGPR_vi : SMEM_Real_Store_vi <op, !cast<SM_Probe_Pseudo>(ps#_SGPR)>;
572 defm S_LOAD_DWORD : SM_Real_Loads_vi <0x00, "S_LOAD_DWORD">;
573 defm S_LOAD_DWORDX2 : SM_Real_Loads_vi <0x01, "S_LOAD_DWORDX2">;
574 defm S_LOAD_DWORDX4 : SM_Real_Loads_vi <0x02, "S_LOAD_DWORDX4">;
575 defm S_LOAD_DWORDX8 : SM_Real_Loads_vi <0x03, "S_LOAD_DWORDX8">;
576 defm S_LOAD_DWORDX16 : SM_Real_Loads_vi <0x04, "S_LOAD_DWORDX16">;
577 defm S_BUFFER_LOAD_DWORD : SM_Real_Loads_vi <0x08, "S_BUFFER_LOAD_DWORD">;
578 defm S_BUFFER_LOAD_DWORDX2 : SM_Real_Loads_vi <0x09, "S_BUFFER_LOAD_DWORDX2">;
579 defm S_BUFFER_LOAD_DWORDX4 : SM_Real_Loads_vi <0x0a, "S_BUFFER_LOAD_DWORDX4">;
580 defm S_BUFFER_LOAD_DWORDX8 : SM_Real_Loads_vi <0x0b, "S_BUFFER_LOAD_DWORDX8">;
581 defm S_BUFFER_LOAD_DWORDX16 : SM_Real_Loads_vi <0x0c, "S_BUFFER_LOAD_DWORDX16">;
583 defm S_STORE_DWORD : SM_Real_Stores_vi <0x10, "S_STORE_DWORD">;
584 defm S_STORE_DWORDX2 : SM_Real_Stores_vi <0x11, "S_STORE_DWORDX2">;
585 defm S_STORE_DWORDX4 : SM_Real_Stores_vi <0x12, "S_STORE_DWORDX4">;
587 defm S_BUFFER_STORE_DWORD : SM_Real_Stores_vi <0x18, "S_BUFFER_STORE_DWORD">;
588 defm S_BUFFER_STORE_DWORDX2 : SM_Real_Stores_vi <0x19, "S_BUFFER_STORE_DWORDX2">;
589 defm S_BUFFER_STORE_DWORDX4 : SM_Real_Stores_vi <0x1a, "S_BUFFER_STORE_DWORDX4">;
591 // These instructions use same encoding
592 def S_DCACHE_INV_vi : SMEM_Real_vi <0x20, S_DCACHE_INV>;
593 def S_DCACHE_WB_vi : SMEM_Real_vi <0x21, S_DCACHE_WB>;
594 def S_DCACHE_INV_VOL_vi : SMEM_Real_vi <0x22, S_DCACHE_INV_VOL>;
595 def S_DCACHE_WB_VOL_vi : SMEM_Real_vi <0x23, S_DCACHE_WB_VOL>;
596 def S_MEMTIME_vi : SMEM_Real_vi <0x24, S_MEMTIME>;
597 def S_MEMREALTIME_vi : SMEM_Real_vi <0x25, S_MEMREALTIME>;
599 defm S_SCRATCH_LOAD_DWORD : SM_Real_Loads_vi <0x05, "S_SCRATCH_LOAD_DWORD">;
600 defm S_SCRATCH_LOAD_DWORDX2 : SM_Real_Loads_vi <0x06, "S_SCRATCH_LOAD_DWORDX2">;
601 defm S_SCRATCH_LOAD_DWORDX4 : SM_Real_Loads_vi <0x07, "S_SCRATCH_LOAD_DWORDX4">;
603 defm S_SCRATCH_STORE_DWORD : SM_Real_Stores_vi <0x15, "S_SCRATCH_STORE_DWORD">;
604 defm S_SCRATCH_STORE_DWORDX2 : SM_Real_Stores_vi <0x16, "S_SCRATCH_STORE_DWORDX2">;
605 defm S_SCRATCH_STORE_DWORDX4 : SM_Real_Stores_vi <0x17, "S_SCRATCH_STORE_DWORDX4">;
607 defm S_ATC_PROBE : SM_Real_Probe_vi <0x26, "S_ATC_PROBE">;
608 defm S_ATC_PROBE_BUFFER : SM_Real_Probe_vi <0x27, "S_ATC_PROBE_BUFFER">;
610 //===----------------------------------------------------------------------===//
612 //===----------------------------------------------------------------------===//
614 class SMEM_Atomic_Real_vi <bits<8> op, SM_Atomic_Pseudo ps>
615 : SMEM_Real_vi <op, ps> {
619 let Constraints = ps.Constraints;
620 let DisableEncoding = ps.DisableEncoding;
623 let Inst{12-6} = !if(glc, sdst{6-0}, sdata{6-0});
626 multiclass SM_Real_Atomics_vi<bits<8> op, string ps> {
627 def _IMM_vi : SMEM_Atomic_Real_vi <op, !cast<SM_Atomic_Pseudo>(ps#_IMM)>;
628 def _SGPR_vi : SMEM_Atomic_Real_vi <op, !cast<SM_Atomic_Pseudo>(ps#_SGPR)>;
629 def _IMM_RTN_vi : SMEM_Atomic_Real_vi <op, !cast<SM_Atomic_Pseudo>(ps#_IMM_RTN)>;
630 def _SGPR_RTN_vi : SMEM_Atomic_Real_vi <op, !cast<SM_Atomic_Pseudo>(ps#_SGPR_RTN)>;
633 defm S_BUFFER_ATOMIC_SWAP : SM_Real_Atomics_vi <0x40, "S_BUFFER_ATOMIC_SWAP">;
634 defm S_BUFFER_ATOMIC_CMPSWAP : SM_Real_Atomics_vi <0x41, "S_BUFFER_ATOMIC_CMPSWAP">;
635 defm S_BUFFER_ATOMIC_ADD : SM_Real_Atomics_vi <0x42, "S_BUFFER_ATOMIC_ADD">;
636 defm S_BUFFER_ATOMIC_SUB : SM_Real_Atomics_vi <0x43, "S_BUFFER_ATOMIC_SUB">;
637 defm S_BUFFER_ATOMIC_SMIN : SM_Real_Atomics_vi <0x44, "S_BUFFER_ATOMIC_SMIN">;
638 defm S_BUFFER_ATOMIC_UMIN : SM_Real_Atomics_vi <0x45, "S_BUFFER_ATOMIC_UMIN">;
639 defm S_BUFFER_ATOMIC_SMAX : SM_Real_Atomics_vi <0x46, "S_BUFFER_ATOMIC_SMAX">;
640 defm S_BUFFER_ATOMIC_UMAX : SM_Real_Atomics_vi <0x47, "S_BUFFER_ATOMIC_UMAX">;
641 defm S_BUFFER_ATOMIC_AND : SM_Real_Atomics_vi <0x48, "S_BUFFER_ATOMIC_AND">;
642 defm S_BUFFER_ATOMIC_OR : SM_Real_Atomics_vi <0x49, "S_BUFFER_ATOMIC_OR">;
643 defm S_BUFFER_ATOMIC_XOR : SM_Real_Atomics_vi <0x4a, "S_BUFFER_ATOMIC_XOR">;
644 defm S_BUFFER_ATOMIC_INC : SM_Real_Atomics_vi <0x4b, "S_BUFFER_ATOMIC_INC">;
645 defm S_BUFFER_ATOMIC_DEC : SM_Real_Atomics_vi <0x4c, "S_BUFFER_ATOMIC_DEC">;
647 defm S_BUFFER_ATOMIC_SWAP_X2 : SM_Real_Atomics_vi <0x60, "S_BUFFER_ATOMIC_SWAP_X2">;
648 defm S_BUFFER_ATOMIC_CMPSWAP_X2 : SM_Real_Atomics_vi <0x61, "S_BUFFER_ATOMIC_CMPSWAP_X2">;
649 defm S_BUFFER_ATOMIC_ADD_X2 : SM_Real_Atomics_vi <0x62, "S_BUFFER_ATOMIC_ADD_X2">;
650 defm S_BUFFER_ATOMIC_SUB_X2 : SM_Real_Atomics_vi <0x63, "S_BUFFER_ATOMIC_SUB_X2">;
651 defm S_BUFFER_ATOMIC_SMIN_X2 : SM_Real_Atomics_vi <0x64, "S_BUFFER_ATOMIC_SMIN_X2">;
652 defm S_BUFFER_ATOMIC_UMIN_X2 : SM_Real_Atomics_vi <0x65, "S_BUFFER_ATOMIC_UMIN_X2">;
653 defm S_BUFFER_ATOMIC_SMAX_X2 : SM_Real_Atomics_vi <0x66, "S_BUFFER_ATOMIC_SMAX_X2">;
654 defm S_BUFFER_ATOMIC_UMAX_X2 : SM_Real_Atomics_vi <0x67, "S_BUFFER_ATOMIC_UMAX_X2">;
655 defm S_BUFFER_ATOMIC_AND_X2 : SM_Real_Atomics_vi <0x68, "S_BUFFER_ATOMIC_AND_X2">;
656 defm S_BUFFER_ATOMIC_OR_X2 : SM_Real_Atomics_vi <0x69, "S_BUFFER_ATOMIC_OR_X2">;
657 defm S_BUFFER_ATOMIC_XOR_X2 : SM_Real_Atomics_vi <0x6a, "S_BUFFER_ATOMIC_XOR_X2">;
658 defm S_BUFFER_ATOMIC_INC_X2 : SM_Real_Atomics_vi <0x6b, "S_BUFFER_ATOMIC_INC_X2">;
659 defm S_BUFFER_ATOMIC_DEC_X2 : SM_Real_Atomics_vi <0x6c, "S_BUFFER_ATOMIC_DEC_X2">;
661 defm S_ATOMIC_SWAP : SM_Real_Atomics_vi <0x80, "S_ATOMIC_SWAP">;
662 defm S_ATOMIC_CMPSWAP : SM_Real_Atomics_vi <0x81, "S_ATOMIC_CMPSWAP">;
663 defm S_ATOMIC_ADD : SM_Real_Atomics_vi <0x82, "S_ATOMIC_ADD">;
664 defm S_ATOMIC_SUB : SM_Real_Atomics_vi <0x83, "S_ATOMIC_SUB">;
665 defm S_ATOMIC_SMIN : SM_Real_Atomics_vi <0x84, "S_ATOMIC_SMIN">;
666 defm S_ATOMIC_UMIN : SM_Real_Atomics_vi <0x85, "S_ATOMIC_UMIN">;
667 defm S_ATOMIC_SMAX : SM_Real_Atomics_vi <0x86, "S_ATOMIC_SMAX">;
668 defm S_ATOMIC_UMAX : SM_Real_Atomics_vi <0x87, "S_ATOMIC_UMAX">;
669 defm S_ATOMIC_AND : SM_Real_Atomics_vi <0x88, "S_ATOMIC_AND">;
670 defm S_ATOMIC_OR : SM_Real_Atomics_vi <0x89, "S_ATOMIC_OR">;
671 defm S_ATOMIC_XOR : SM_Real_Atomics_vi <0x8a, "S_ATOMIC_XOR">;
672 defm S_ATOMIC_INC : SM_Real_Atomics_vi <0x8b, "S_ATOMIC_INC">;
673 defm S_ATOMIC_DEC : SM_Real_Atomics_vi <0x8c, "S_ATOMIC_DEC">;
675 defm S_ATOMIC_SWAP_X2 : SM_Real_Atomics_vi <0xa0, "S_ATOMIC_SWAP_X2">;
676 defm S_ATOMIC_CMPSWAP_X2 : SM_Real_Atomics_vi <0xa1, "S_ATOMIC_CMPSWAP_X2">;
677 defm S_ATOMIC_ADD_X2 : SM_Real_Atomics_vi <0xa2, "S_ATOMIC_ADD_X2">;
678 defm S_ATOMIC_SUB_X2 : SM_Real_Atomics_vi <0xa3, "S_ATOMIC_SUB_X2">;
679 defm S_ATOMIC_SMIN_X2 : SM_Real_Atomics_vi <0xa4, "S_ATOMIC_SMIN_X2">;
680 defm S_ATOMIC_UMIN_X2 : SM_Real_Atomics_vi <0xa5, "S_ATOMIC_UMIN_X2">;
681 defm S_ATOMIC_SMAX_X2 : SM_Real_Atomics_vi <0xa6, "S_ATOMIC_SMAX_X2">;
682 defm S_ATOMIC_UMAX_X2 : SM_Real_Atomics_vi <0xa7, "S_ATOMIC_UMAX_X2">;
683 defm S_ATOMIC_AND_X2 : SM_Real_Atomics_vi <0xa8, "S_ATOMIC_AND_X2">;
684 defm S_ATOMIC_OR_X2 : SM_Real_Atomics_vi <0xa9, "S_ATOMIC_OR_X2">;
685 defm S_ATOMIC_XOR_X2 : SM_Real_Atomics_vi <0xaa, "S_ATOMIC_XOR_X2">;
686 defm S_ATOMIC_INC_X2 : SM_Real_Atomics_vi <0xab, "S_ATOMIC_INC_X2">;
687 defm S_ATOMIC_DEC_X2 : SM_Real_Atomics_vi <0xac, "S_ATOMIC_DEC_X2">;
689 multiclass SM_Real_Discard_vi<bits<8> op, string ps> {
690 def _IMM_vi : SMEM_Real_vi <op, !cast<SM_Discard_Pseudo>(ps#_IMM)>;
691 def _SGPR_vi : SMEM_Real_vi <op, !cast<SM_Discard_Pseudo>(ps#_SGPR)>;
694 defm S_DCACHE_DISCARD : SM_Real_Discard_vi <0x28, "S_DCACHE_DISCARD">;
695 defm S_DCACHE_DISCARD_X2 : SM_Real_Discard_vi <0x29, "S_DCACHE_DISCARD_X2">;
697 //===----------------------------------------------------------------------===//
699 //===----------------------------------------------------------------------===//
701 def smrd_literal_offset : NamedOperandU32<"SMRDLiteralOffset",
702 NamedMatchClass<"SMRDLiteralOffset">> {
703 let OperandType = "OPERAND_IMMEDIATE";
706 class SMRD_Real_Load_IMM_ci <bits<5> op, SM_Load_Pseudo ps> :
710 let AssemblerPredicates = [isCIOnly];
711 let DecoderNamespace = "CI";
712 let InOperandList = (ins ps.BaseClass:$sbase, smrd_literal_offset:$offset, GLC:$glc);
714 let LGKM_CNT = ps.LGKM_CNT;
716 let mayLoad = ps.mayLoad;
717 let mayStore = ps.mayStore;
718 let hasSideEffects = ps.hasSideEffects;
719 let SchedRW = ps.SchedRW;
720 let UseNamedOperandTable = ps.UseNamedOperandTable;
722 let Inst{7-0} = 0xff;
724 let Inst{14-9} = sbase{6-1};
725 let Inst{21-15} = sdst{6-0};
726 let Inst{26-22} = op;
727 let Inst{31-27} = 0x18; //encoding
728 let Inst{63-32} = offset{31-0};
731 def S_LOAD_DWORD_IMM_ci : SMRD_Real_Load_IMM_ci <0x00, S_LOAD_DWORD_IMM>;
732 def S_LOAD_DWORDX2_IMM_ci : SMRD_Real_Load_IMM_ci <0x01, S_LOAD_DWORDX2_IMM>;
733 def S_LOAD_DWORDX4_IMM_ci : SMRD_Real_Load_IMM_ci <0x02, S_LOAD_DWORDX4_IMM>;
734 def S_LOAD_DWORDX8_IMM_ci : SMRD_Real_Load_IMM_ci <0x03, S_LOAD_DWORDX8_IMM>;
735 def S_LOAD_DWORDX16_IMM_ci : SMRD_Real_Load_IMM_ci <0x04, S_LOAD_DWORDX16_IMM>;
736 def S_BUFFER_LOAD_DWORD_IMM_ci : SMRD_Real_Load_IMM_ci <0x08, S_BUFFER_LOAD_DWORD_IMM>;
737 def S_BUFFER_LOAD_DWORDX2_IMM_ci : SMRD_Real_Load_IMM_ci <0x09, S_BUFFER_LOAD_DWORDX2_IMM>;
738 def S_BUFFER_LOAD_DWORDX4_IMM_ci : SMRD_Real_Load_IMM_ci <0x0a, S_BUFFER_LOAD_DWORDX4_IMM>;
739 def S_BUFFER_LOAD_DWORDX8_IMM_ci : SMRD_Real_Load_IMM_ci <0x0b, S_BUFFER_LOAD_DWORDX8_IMM>;
740 def S_BUFFER_LOAD_DWORDX16_IMM_ci : SMRD_Real_Load_IMM_ci <0x0c, S_BUFFER_LOAD_DWORDX16_IMM>;
742 class SMRD_Real_ci <bits<5> op, SM_Pseudo ps>
744 , SIMCInstr<ps.PseudoInstr, SIEncodingFamily.SI>
747 let AssemblerPredicates = [isCIOnly];
748 let DecoderNamespace = "CI";
750 let Inst{7-0} = !if(ps.has_offset, offset{7-0}, ?);
752 let Inst{14-9} = !if(ps.has_sbase, sbase{6-1}, ?);
753 let Inst{21-15} = !if(ps.has_sdst, sdst{6-0}, ?);
754 let Inst{26-22} = op;
755 let Inst{31-27} = 0x18; //encoding
758 def S_DCACHE_INV_VOL_ci : SMRD_Real_ci <0x1d, S_DCACHE_INV_VOL>;
760 let AddedComplexity = SM_LOAD_PATTERN.AddedComplexity in {
762 class SMRD_Pattern_ci <string Instr, ValueType vt> : GCNPat <
763 (smrd_load (SMRDImm32 i64:$sbase, i32:$offset)),
764 (vt (!cast<InstSI>(Instr#"_IMM_ci") $sbase, $offset, 0))> {
765 let OtherPredicates = [isCIOnly];
768 def : SMRD_Pattern_ci <"S_LOAD_DWORD", i32>;
769 def : SMRD_Pattern_ci <"S_LOAD_DWORDX2", v2i32>;
770 def : SMRD_Pattern_ci <"S_LOAD_DWORDX4", v4i32>;
771 def : SMRD_Pattern_ci <"S_LOAD_DWORDX8", v8i32>;
772 def : SMRD_Pattern_ci <"S_LOAD_DWORDX16", v16i32>;
775 (SIload_constant v4i32:$sbase, (SMRDBufferImm32 i32:$offset)),
776 (S_BUFFER_LOAD_DWORD_IMM_ci $sbase, $offset, 0)> {
777 let OtherPredicates = [isCI]; // should this be isCIOnly?
780 } // End let AddedComplexity = SM_LOAD_PATTERN.AddedComplexity