[ORC] Add std::tuple support to SimplePackedSerialization.
[llvm-project.git] / llvm / lib / Target / AMDGPU / AMDGPUGISel.td
blob12cef2774aaf1a6c18256a2bbc17b3010c37d050
1 //===-- AMDGPUGIsel.td - AMDGPU GlobalISel Patterns---------*- tablegen -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 // This files contains patterns that should only be used by GlobalISel.  For
9 // example patterns for V_* instructions that have S_* equivalents.
10 // SelectionDAG does not support selecting V_* instructions.
11 //===----------------------------------------------------------------------===//
13 include "AMDGPU.td"
14 include "AMDGPUCombine.td"
16 def sd_vsrc0 : ComplexPattern<i32, 1, "">;
17 def gi_vsrc0 :
18     GIComplexOperandMatcher<s32, "selectVSRC0">,
19     GIComplexPatternEquiv<sd_vsrc0>;
21 def sd_vcsrc : ComplexPattern<i32, 1, "">;
22 def gi_vcsrc :
23     GIComplexOperandMatcher<s32, "selectVCSRC">,
24     GIComplexPatternEquiv<sd_vcsrc>;
26 def gi_vop3mods0 :
27     GIComplexOperandMatcher<s32, "selectVOP3Mods0">,
28     GIComplexPatternEquiv<VOP3Mods0>;
30 def gi_vop3mods :
31     GIComplexOperandMatcher<s32, "selectVOP3Mods">,
32     GIComplexPatternEquiv<VOP3Mods>;
34 def gi_vop3_no_mods :
35     GIComplexOperandMatcher<s32, "selectVOP3NoMods">,
36     GIComplexPatternEquiv<VOP3NoMods>;
38 def gi_vop3mods_nnan :
39     GIComplexOperandMatcher<s32, "selectVOP3Mods_nnan">,
40     GIComplexPatternEquiv<VOP3Mods_nnan>;
42 def gi_vop3omods :
43     GIComplexOperandMatcher<s32, "selectVOP3OMods">,
44     GIComplexPatternEquiv<VOP3OMods>;
46 def gi_vop3pmods :
47     GIComplexOperandMatcher<s32, "selectVOP3PMods">,
48     GIComplexPatternEquiv<VOP3PMods>;
50 def gi_vop3opselmods :
51     GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
52     GIComplexPatternEquiv<VOP3OpSelMods>;
54 // FIXME: Why do we have both VOP3OpSel and VOP3OpSelMods?
55 def gi_vop3opsel :
56     GIComplexOperandMatcher<s32, "selectVOP3OpSelMods">,
57     GIComplexPatternEquiv<VOP3OpSel>;
59 def gi_smrd_imm :
60     GIComplexOperandMatcher<s64, "selectSmrdImm">,
61     GIComplexPatternEquiv<SMRDImm>;
63 def gi_smrd_imm32 :
64     GIComplexOperandMatcher<s64, "selectSmrdImm32">,
65     GIComplexPatternEquiv<SMRDImm32>;
67 def gi_smrd_sgpr :
68     GIComplexOperandMatcher<s64, "selectSmrdSgpr">,
69     GIComplexPatternEquiv<SMRDSgpr>;
71 def gi_flat_offset :
72     GIComplexOperandMatcher<s64, "selectFlatOffset">,
73     GIComplexPatternEquiv<FlatOffset>;
74 def gi_global_offset :
75     GIComplexOperandMatcher<s64, "selectGlobalOffset">,
76     GIComplexPatternEquiv<GlobalOffset>;
77 def gi_global_saddr :
78     GIComplexOperandMatcher<s64, "selectGlobalSAddr">,
79     GIComplexPatternEquiv<GlobalSAddr>;
81 def gi_mubuf_scratch_offset :
82     GIComplexOperandMatcher<s32, "selectMUBUFScratchOffset">,
83     GIComplexPatternEquiv<MUBUFScratchOffset>;
84 def gi_mubuf_scratch_offen :
85     GIComplexOperandMatcher<s32, "selectMUBUFScratchOffen">,
86     GIComplexPatternEquiv<MUBUFScratchOffen>;
88 def gi_flat_scratch_offset :
89     GIComplexOperandMatcher<s32, "selectScratchOffset">,
90     GIComplexPatternEquiv<ScratchOffset>;
92 def gi_flat_scratch_saddr :
93     GIComplexOperandMatcher<s32, "selectScratchSAddr">,
94     GIComplexPatternEquiv<ScratchSAddr>;
96 def gi_ds_1addr_1offset :
97     GIComplexOperandMatcher<s32, "selectDS1Addr1Offset">,
98     GIComplexPatternEquiv<DS1Addr1Offset>;
100 def gi_ds_64bit_4byte_aligned :
101     GIComplexOperandMatcher<s64, "selectDS64Bit4ByteAligned">,
102     GIComplexPatternEquiv<DS64Bit4ByteAligned>;
104 def gi_ds_128bit_8byte_aligned :
105     GIComplexOperandMatcher<s64, "selectDS128Bit8ByteAligned">,
106     GIComplexPatternEquiv<DS128Bit8ByteAligned>;
108 def gi_mubuf_addr64 :
109     GIComplexOperandMatcher<s64, "selectMUBUFAddr64">,
110     GIComplexPatternEquiv<MUBUFAddr64>;
112 def gi_mubuf_offset :
113     GIComplexOperandMatcher<s64, "selectMUBUFOffset">,
114     GIComplexPatternEquiv<MUBUFOffset>;
116 def gi_smrd_buffer_imm :
117     GIComplexOperandMatcher<s64, "selectSMRDBufferImm">,
118     GIComplexPatternEquiv<SMRDBufferImm>;
120 def gi_smrd_buffer_imm32 :
121     GIComplexOperandMatcher<s64, "selectSMRDBufferImm32">,
122     GIComplexPatternEquiv<SMRDBufferImm32>;
124 // Separate load nodes are defined to glue m0 initialization in
125 // SelectionDAG. The GISel selector can just insert m0 initialization
126 // directly before before selecting a glue-less load, so hide this
127 // distinction.
129 def : GINodeEquiv<G_LOAD, AMDGPUld_glue> {
130   let CheckMMOIsNonAtomic = 1;
131   let IfSignExtend = G_SEXTLOAD;
132   let IfZeroExtend = G_ZEXTLOAD;
135 def : GINodeEquiv<G_STORE, AMDGPUst_glue> {
136   let CheckMMOIsNonAtomic = 1;
139 def : GINodeEquiv<G_LOAD, AMDGPUatomic_ld_glue> {
140   bit CheckMMOIsAtomic = 1;
143 def : GINodeEquiv<G_STORE, AMDGPUatomic_st_glue> {
144   bit CheckMMOIsAtomic = 1;
148 def : GINodeEquiv<G_ATOMIC_CMPXCHG, atomic_cmp_swap_glue>;
149 def : GINodeEquiv<G_ATOMICRMW_XCHG, atomic_swap_glue>;
150 def : GINodeEquiv<G_ATOMICRMW_ADD, atomic_load_add_glue>;
151 def : GINodeEquiv<G_ATOMICRMW_SUB, atomic_load_sub_glue>;
152 def : GINodeEquiv<G_ATOMICRMW_AND, atomic_load_and_glue>;
153 def : GINodeEquiv<G_ATOMICRMW_OR, atomic_load_or_glue>;
154 def : GINodeEquiv<G_ATOMICRMW_XOR, atomic_load_xor_glue>;
155 def : GINodeEquiv<G_ATOMICRMW_MIN, atomic_load_min_glue>;
156 def : GINodeEquiv<G_ATOMICRMW_MAX, atomic_load_max_glue>;
157 def : GINodeEquiv<G_ATOMICRMW_UMIN, atomic_load_umin_glue>;
158 def : GINodeEquiv<G_ATOMICRMW_UMAX, atomic_load_umax_glue>;
159 def : GINodeEquiv<G_ATOMICRMW_FADD, atomic_load_fadd_glue>;
161 def : GINodeEquiv<G_AMDGPU_FFBH_U32, AMDGPUffbh_u32_impl>;
162 def : GINodeEquiv<G_AMDGPU_FFBL_B32, AMDGPUffbl_b32_impl>;
163 def : GINodeEquiv<G_AMDGPU_FMIN_LEGACY, AMDGPUfmin_legacy>;
164 def : GINodeEquiv<G_AMDGPU_FMAX_LEGACY, AMDGPUfmax_legacy>;
165 def : GINodeEquiv<G_AMDGPU_RCP_IFLAG, AMDGPUrcp_iflag>;
167 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE0, AMDGPUcvt_f32_ubyte0>;
168 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE1, AMDGPUcvt_f32_ubyte1>;
169 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE2, AMDGPUcvt_f32_ubyte2>;
170 def : GINodeEquiv<G_AMDGPU_CVT_F32_UBYTE3, AMDGPUcvt_f32_ubyte3>;
172 def : GINodeEquiv<G_AMDGPU_CVT_PK_I16_I32, AMDGPUpk_i16_i32_impl>;
173 def : GINodeEquiv<G_AMDGPU_SMED3, AMDGPUsmed3>;
174 def : GINodeEquiv<G_AMDGPU_UMED3, AMDGPUumed3>;
176 def : GINodeEquiv<G_AMDGPU_ATOMIC_CMPXCHG, AMDGPUatomic_cmp_swap>;
177 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD, SIbuffer_load>;
178 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_USHORT, SIbuffer_load_ushort>;
179 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_UBYTE, SIbuffer_load_ubyte>;
180 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SSHORT, SIbuffer_load_short>;
181 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_SBYTE, SIbuffer_load_byte>;
182 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT, SIbuffer_load_format>;
183 def : GINodeEquiv<G_AMDGPU_BUFFER_LOAD_FORMAT_D16, SIbuffer_load_format_d16>;
184 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT, SItbuffer_load>;
185 def : GINodeEquiv<G_AMDGPU_TBUFFER_LOAD_FORMAT_D16, SItbuffer_load_d16>;
186 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE, SIbuffer_store>;
187 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_SHORT, SIbuffer_store_short>;
188 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_BYTE, SIbuffer_store_byte>;
189 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT, SIbuffer_store_format>;
190 def : GINodeEquiv<G_AMDGPU_BUFFER_STORE_FORMAT_D16, SIbuffer_store_format_d16>;
191 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT, SItbuffer_store>;
192 def : GINodeEquiv<G_AMDGPU_TBUFFER_STORE_FORMAT_D16, SItbuffer_store_d16>;
194 // FIXME: Check MMO is atomic
195 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, SIatomic_inc>;
196 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, SIatomic_dec>;
197 def : GINodeEquiv<G_AMDGPU_ATOMIC_INC, atomic_inc_glue>;
198 def : GINodeEquiv<G_AMDGPU_ATOMIC_DEC, atomic_dec_glue>;
199 def : GINodeEquiv<G_AMDGPU_ATOMIC_FMIN, SIatomic_fmin>;
200 def : GINodeEquiv<G_AMDGPU_ATOMIC_FMAX, SIatomic_fmax>;
201 def : GINodeEquiv<G_AMDGPU_ATOMIC_FMIN, atomic_load_fmin_glue>;
202 def : GINodeEquiv<G_AMDGPU_ATOMIC_FMAX, atomic_load_fmax_glue>;
205 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SWAP, SIbuffer_atomic_swap>;
206 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_ADD, SIbuffer_atomic_add>;
207 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SUB, SIbuffer_atomic_sub>;
208 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMIN, SIbuffer_atomic_smin>;
209 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMIN, SIbuffer_atomic_umin>;
210 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_SMAX, SIbuffer_atomic_smax>;
211 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_UMAX, SIbuffer_atomic_umax>;
212 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_AND, SIbuffer_atomic_and>;
213 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_OR, SIbuffer_atomic_or>;
214 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_XOR, SIbuffer_atomic_xor>;
215 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_INC, SIbuffer_atomic_inc>;
216 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_DEC, SIbuffer_atomic_dec>;
217 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FADD, SIbuffer_atomic_fadd>;
218 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMIN, SIbuffer_atomic_fmin>;
219 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMAX, SIbuffer_atomic_fmax>;
220 def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
221 def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
223 class GISelSop2Pat <
224   SDPatternOperator node,
225   Instruction inst,
226   ValueType dst_vt,
227   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
229   (dst_vt (node (src0_vt SReg_32:$src0), (src1_vt SReg_32:$src1))),
230   (inst src0_vt:$src0, src1_vt:$src1)
233 class GISelVop2Pat <
234   SDPatternOperator node,
235   Instruction inst,
236   ValueType dst_vt,
237   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
239   (dst_vt (node (src0_vt (sd_vsrc0 src0_vt:$src0)), (src1_vt VGPR_32:$src1))),
240   (inst src0_vt:$src0, src1_vt:$src1)
243 class GISelVop2CommutePat <
244   SDPatternOperator node,
245   Instruction inst,
246   ValueType dst_vt,
247   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
249   (dst_vt (node (src1_vt VGPR_32:$src1), (src0_vt (sd_vsrc0 src0_vt:$src0)))),
250   (inst src0_vt:$src0, src1_vt:$src1)
253 class GISelVop3Pat2 <
254   SDPatternOperator node,
255   Instruction inst,
256   ValueType dst_vt,
257   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
259   (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
260   (inst src0_vt:$src0, src1_vt:$src1)
263 class GISelVop3Pat2CommutePat <
264   SDPatternOperator node,
265   Instruction inst,
266   ValueType dst_vt,
267   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt>   : GCNPat <
269   (dst_vt (node (src0_vt (sd_vcsrc src0_vt:$src0)), (src1_vt (sd_vcsrc src1_vt:$src1)))),
270   (inst src0_vt:$src1, src1_vt:$src0)
273 class GISelVop3Pat2ModsPat <
274   SDPatternOperator node,
275   Instruction inst,
276   ValueType dst_vt,
277   ValueType src0_vt = dst_vt, ValueType src1_vt = src0_vt> : GCNPat <
279   (dst_vt (node (src0_vt (VOP3Mods0 src0_vt:$src0, i32:$src0_modifiers, i1:$clamp, i32:$omods)),
280                 (src1_vt (VOP3Mods src1_vt:$src1, i32:$src1_modifiers)))),
281   (inst i32:$src0_modifiers, src0_vt:$src0,
282         i32:$src1_modifiers, src1_vt:$src1, $clamp, $omods)
285 multiclass GISelVop2IntrPat <
286   SDPatternOperator node, Instruction inst,
287   ValueType dst_vt, ValueType src_vt = dst_vt> {
289   def : GISelVop2Pat <node, inst, dst_vt, src_vt>;
291   // FIXME: Intrinsics aren't marked as commutable, so we need to add an explicit
292   // pattern to handle commuting.  This is another reason why legalizing to a
293   // generic machine instruction may be better that matching the intrinsic
294   // directly.
295   def : GISelVop2CommutePat <node, inst, dst_vt, src_vt>;
298 // Since GlobalISel is more flexible then SelectionDAG, I think we can get
299 // away with adding patterns for integer types and not legalizing all
300 // loads and stores to vector types.  This should help simplify the load/store
301 // legalization.
302 foreach Ty = [i64, p0, p1, p4] in {
303   defm : SMRD_Pattern <"S_LOAD_DWORDX2",  Ty>;
306 def gi_as_i32timm : GICustomOperandRenderer<"renderTruncTImm">,
307   GISDNodeXFormEquiv<as_i32timm>;
309 def gi_as_i16timm : GICustomOperandRenderer<"renderTruncTImm">,
310   GISDNodeXFormEquiv<as_i16timm>;
312 def gi_as_i8timm : GICustomOperandRenderer<"renderTruncTImm">,
313   GISDNodeXFormEquiv<as_i8timm>;
315 def gi_as_i1timm : GICustomOperandRenderer<"renderTruncTImm">,
316   GISDNodeXFormEquiv<as_i1timm>;
318 def gi_NegateImm : GICustomOperandRenderer<"renderNegateImm">,
319   GISDNodeXFormEquiv<NegateImm>;
321 def gi_bitcast_fpimm_to_i32 : GICustomOperandRenderer<"renderBitcastImm">,
322   GISDNodeXFormEquiv<bitcast_fpimm_to_i32>;
324 def gi_IMMPopCount : GICustomOperandRenderer<"renderPopcntImm">,
325   GISDNodeXFormEquiv<IMMPopCount>;
327 def gi_extract_cpol : GICustomOperandRenderer<"renderExtractCPol">,
328   GISDNodeXFormEquiv<extract_cpol>;
330 def gi_extract_swz : GICustomOperandRenderer<"renderExtractSWZ">,
331   GISDNodeXFormEquiv<extract_swz>;
333 def gi_set_glc : GICustomOperandRenderer<"renderSetGLC">,
334   GISDNodeXFormEquiv<set_glc>;
336 def gi_frameindex_to_targetframeindex : GICustomOperandRenderer<"renderFrameIndex">,
337   GISDNodeXFormEquiv<frameindex_to_targetframeindex>;