1 //===- IntrinsicsAMDGPU.td - Defines AMDGPU intrinsics -----*- tablegen -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines all of the R600-specific intrinsics.
11 //===----------------------------------------------------------------------===//
13 class AMDGPUReadPreloadRegisterIntrinsic
14 : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
16 class AMDGPUReadPreloadRegisterIntrinsicNamed<string name>
17 : Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>, GCCBuiltin<name>;
19 // Used to tag image and resource intrinsics with information used to generate
21 class AMDGPURsrcIntrinsic<int rsrcarg, bit isimage = 0> {
22 int RsrcArg = rsrcarg;
23 bit IsImage = isimage;
26 let TargetPrefix = "r600" in {
28 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz {
29 def _x : AMDGPUReadPreloadRegisterIntrinsic;
30 def _y : AMDGPUReadPreloadRegisterIntrinsic;
31 def _z : AMDGPUReadPreloadRegisterIntrinsic;
34 multiclass AMDGPUReadPreloadRegisterIntrinsic_xyz_named<string prefix> {
35 def _x : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_x")>;
36 def _y : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_y")>;
37 def _z : AMDGPUReadPreloadRegisterIntrinsicNamed<!strconcat(prefix, "_z")>;
40 defm int_r600_read_global_size : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
41 <"__builtin_r600_read_global_size">;
42 defm int_r600_read_ngroups : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
43 <"__builtin_r600_read_ngroups">;
44 defm int_r600_read_tgid : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
45 <"__builtin_r600_read_tgid">;
47 defm int_r600_read_local_size : AMDGPUReadPreloadRegisterIntrinsic_xyz;
48 defm int_r600_read_tidig : AMDGPUReadPreloadRegisterIntrinsic_xyz;
50 def int_r600_group_barrier : GCCBuiltin<"__builtin_r600_group_barrier">,
51 Intrinsic<[], [], [IntrConvergent]>;
53 // AS 7 is PARAM_I_ADDRESS, used for kernel arguments
54 def int_r600_implicitarg_ptr :
55 GCCBuiltin<"__builtin_r600_implicitarg_ptr">,
56 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 7>], [],
57 [IntrNoMem, IntrSpeculatable]>;
59 def int_r600_rat_store_typed :
60 // 1st parameter: Data
61 // 2nd parameter: Index
62 // 3rd parameter: Constant RAT ID
63 Intrinsic<[], [llvm_v4i32_ty, llvm_v4i32_ty, llvm_i32_ty], []>,
64 GCCBuiltin<"__builtin_r600_rat_store_typed">;
66 def int_r600_recipsqrt_ieee : Intrinsic<
67 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
70 def int_r600_recipsqrt_clamped : Intrinsic<
71 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
74 def int_r600_cube : Intrinsic<
75 [llvm_v4f32_ty], [llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
78 def int_r600_store_stream_output : Intrinsic<
79 [], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], []
82 class TextureIntrinsicFloatInput : Intrinsic<[llvm_v4f32_ty], [
83 llvm_v4f32_ty, // Coord
84 llvm_i32_ty, // offset_x
85 llvm_i32_ty, // offset_y,
86 llvm_i32_ty, // offset_z,
87 llvm_i32_ty, // resource_id
88 llvm_i32_ty, // samplerid
89 llvm_i32_ty, // coord_type_x
90 llvm_i32_ty, // coord_type_y
91 llvm_i32_ty, // coord_type_z
92 llvm_i32_ty], // coord_type_w
96 class TextureIntrinsicInt32Input : Intrinsic<[llvm_v4i32_ty], [
97 llvm_v4i32_ty, // Coord
98 llvm_i32_ty, // offset_x
99 llvm_i32_ty, // offset_y,
100 llvm_i32_ty, // offset_z,
101 llvm_i32_ty, // resource_id
102 llvm_i32_ty, // samplerid
103 llvm_i32_ty, // coord_type_x
104 llvm_i32_ty, // coord_type_y
105 llvm_i32_ty, // coord_type_z
106 llvm_i32_ty], // coord_type_w
110 def int_r600_store_swizzle :
111 Intrinsic<[], [llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty], []
114 def int_r600_tex : TextureIntrinsicFloatInput;
115 def int_r600_texc : TextureIntrinsicFloatInput;
116 def int_r600_txl : TextureIntrinsicFloatInput;
117 def int_r600_txlc : TextureIntrinsicFloatInput;
118 def int_r600_txb : TextureIntrinsicFloatInput;
119 def int_r600_txbc : TextureIntrinsicFloatInput;
120 def int_r600_txf : TextureIntrinsicInt32Input;
121 def int_r600_txq : TextureIntrinsicInt32Input;
122 def int_r600_ddx : TextureIntrinsicFloatInput;
123 def int_r600_ddy : TextureIntrinsicFloatInput;
125 def int_r600_dot4 : Intrinsic<[llvm_float_ty],
126 [llvm_v4f32_ty, llvm_v4f32_ty], [IntrNoMem, IntrSpeculatable]
129 def int_r600_kill : Intrinsic<[], [llvm_float_ty], []>;
131 } // End TargetPrefix = "r600"
133 let TargetPrefix = "amdgcn" in {
135 //===----------------------------------------------------------------------===//
136 // ABI Special Intrinsics
137 //===----------------------------------------------------------------------===//
139 defm int_amdgcn_workitem_id : AMDGPUReadPreloadRegisterIntrinsic_xyz;
140 defm int_amdgcn_workgroup_id : AMDGPUReadPreloadRegisterIntrinsic_xyz_named
141 <"__builtin_amdgcn_workgroup_id">;
143 def int_amdgcn_dispatch_ptr :
144 GCCBuiltin<"__builtin_amdgcn_dispatch_ptr">,
145 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
146 [IntrNoMem, IntrSpeculatable]>;
148 def int_amdgcn_queue_ptr :
149 GCCBuiltin<"__builtin_amdgcn_queue_ptr">,
150 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
151 [IntrNoMem, IntrSpeculatable]>;
153 def int_amdgcn_kernarg_segment_ptr :
154 GCCBuiltin<"__builtin_amdgcn_kernarg_segment_ptr">,
155 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
156 [IntrNoMem, IntrSpeculatable]>;
158 def int_amdgcn_implicitarg_ptr :
159 GCCBuiltin<"__builtin_amdgcn_implicitarg_ptr">,
160 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
161 [IntrNoMem, IntrSpeculatable]>;
163 def int_amdgcn_groupstaticsize :
164 GCCBuiltin<"__builtin_amdgcn_groupstaticsize">,
165 Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
167 def int_amdgcn_dispatch_id :
168 GCCBuiltin<"__builtin_amdgcn_dispatch_id">,
169 Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
171 def int_amdgcn_implicit_buffer_ptr :
172 GCCBuiltin<"__builtin_amdgcn_implicit_buffer_ptr">,
173 Intrinsic<[LLVMQualPointerType<llvm_i8_ty, 4>], [],
174 [IntrNoMem, IntrSpeculatable]>;
176 // Set EXEC to the 64-bit value given.
177 // This is always moved to the beginning of the basic block.
178 def int_amdgcn_init_exec : Intrinsic<[],
179 [llvm_i64_ty], // 64-bit literal constant
180 [IntrConvergent, ImmArg<0>]>;
182 // Set EXEC according to a thread count packed in an SGPR input:
183 // thread_count = (input >> bitoffset) & 0x7f;
184 // This is always moved to the beginning of the basic block.
185 def int_amdgcn_init_exec_from_input : Intrinsic<[],
186 [llvm_i32_ty, // 32-bit SGPR input
187 llvm_i32_ty], // bit offset of the thread count
190 def int_amdgcn_wavefrontsize :
191 GCCBuiltin<"__builtin_amdgcn_wavefrontsize">,
192 Intrinsic<[llvm_i32_ty], [], [IntrNoMem, IntrSpeculatable]>;
195 //===----------------------------------------------------------------------===//
196 // Instruction Intrinsics
197 //===----------------------------------------------------------------------===//
199 // The first parameter is s_sendmsg immediate (i16),
200 // the second one is copied to m0
201 def int_amdgcn_s_sendmsg : GCCBuiltin<"__builtin_amdgcn_s_sendmsg">,
202 Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
203 [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
204 def int_amdgcn_s_sendmsghalt : GCCBuiltin<"__builtin_amdgcn_s_sendmsghalt">,
205 Intrinsic <[], [llvm_i32_ty, llvm_i32_ty],
206 [ImmArg<0>, IntrNoMem, IntrHasSideEffects]>;
208 def int_amdgcn_s_barrier : GCCBuiltin<"__builtin_amdgcn_s_barrier">,
209 Intrinsic<[], [], [IntrConvergent]>;
211 def int_amdgcn_wave_barrier : GCCBuiltin<"__builtin_amdgcn_wave_barrier">,
212 Intrinsic<[], [], [IntrConvergent]>;
214 def int_amdgcn_s_waitcnt : GCCBuiltin<"__builtin_amdgcn_s_waitcnt">,
215 Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]>;
217 def int_amdgcn_div_scale : Intrinsic<
218 // 1st parameter: Numerator
219 // 2nd parameter: Denominator
220 // 3rd parameter: Constant to select select between first and
221 // second. (0 = first, 1 = second).
222 [llvm_anyfloat_ty, llvm_i1_ty],
223 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
224 [IntrNoMem, IntrSpeculatable, ImmArg<2>]
227 def int_amdgcn_div_fmas : Intrinsic<[llvm_anyfloat_ty],
228 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, llvm_i1_ty],
229 [IntrNoMem, IntrSpeculatable]
232 def int_amdgcn_div_fixup : Intrinsic<[llvm_anyfloat_ty],
233 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
234 [IntrNoMem, IntrSpeculatable]
237 def int_amdgcn_trig_preop : Intrinsic<
238 [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
239 [IntrNoMem, IntrSpeculatable]
242 def int_amdgcn_sin : Intrinsic<
243 [llvm_anyfloat_ty], [LLVMMatchType<0>],
244 [IntrNoMem, IntrSpeculatable]
247 def int_amdgcn_cos : Intrinsic<
248 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
251 def int_amdgcn_log_clamp : Intrinsic<
252 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
255 def int_amdgcn_fmul_legacy : GCCBuiltin<"__builtin_amdgcn_fmul_legacy">,
256 Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty],
257 [IntrNoMem, IntrSpeculatable]
260 def int_amdgcn_rcp : Intrinsic<
261 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
264 def int_amdgcn_rcp_legacy : GCCBuiltin<"__builtin_amdgcn_rcp_legacy">,
265 Intrinsic<[llvm_float_ty], [llvm_float_ty],
266 [IntrNoMem, IntrSpeculatable]
269 def int_amdgcn_rsq : Intrinsic<
270 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
273 def int_amdgcn_rsq_legacy : GCCBuiltin<"__builtin_amdgcn_rsq_legacy">,
275 [llvm_float_ty], [llvm_float_ty], [IntrNoMem, IntrSpeculatable]
278 def int_amdgcn_rsq_clamp : Intrinsic<
279 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]>;
281 def int_amdgcn_ldexp : Intrinsic<
282 [llvm_anyfloat_ty], [LLVMMatchType<0>, llvm_i32_ty],
283 [IntrNoMem, IntrSpeculatable]
286 def int_amdgcn_frexp_mant : Intrinsic<
287 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
290 def int_amdgcn_frexp_exp : Intrinsic<
291 [llvm_anyint_ty], [llvm_anyfloat_ty], [IntrNoMem, IntrSpeculatable]
294 // v_fract is buggy on SI/CI. It mishandles infinities, may return 1.0
295 // and always uses rtz, so is not suitable for implementing the OpenCL
296 // fract function. It should be ok on VI.
297 def int_amdgcn_fract : Intrinsic<
298 [llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
301 def int_amdgcn_cvt_pkrtz : GCCBuiltin<"__builtin_amdgcn_cvt_pkrtz">,
302 Intrinsic<[llvm_v2f16_ty], [llvm_float_ty, llvm_float_ty],
303 [IntrNoMem, IntrSpeculatable]
306 def int_amdgcn_cvt_pknorm_i16 :
307 GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_i16">,
308 Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
309 [IntrNoMem, IntrSpeculatable]
312 def int_amdgcn_cvt_pknorm_u16 :
313 GCCBuiltin<"__builtin_amdgcn_cvt_pknorm_u16">,
314 Intrinsic<[llvm_v2i16_ty], [llvm_float_ty, llvm_float_ty],
315 [IntrNoMem, IntrSpeculatable]
318 def int_amdgcn_cvt_pk_i16 :
319 GCCBuiltin<"__builtin_amdgcn_cvt_pk_i16">,
321 [llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
322 [IntrNoMem, IntrSpeculatable]
325 def int_amdgcn_cvt_pk_u16 : GCCBuiltin<"__builtin_amdgcn_cvt_pk_u16">,
326 Intrinsic<[llvm_v2i16_ty], [llvm_i32_ty, llvm_i32_ty],
327 [IntrNoMem, IntrSpeculatable]
330 def int_amdgcn_class : Intrinsic<
331 [llvm_i1_ty], [llvm_anyfloat_ty, llvm_i32_ty],
332 [IntrNoMem, IntrSpeculatable]
335 def int_amdgcn_fmed3 : GCCBuiltin<"__builtin_amdgcn_fmed3">,
336 Intrinsic<[llvm_anyfloat_ty],
337 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
338 [IntrNoMem, IntrSpeculatable]
341 def int_amdgcn_cubeid : GCCBuiltin<"__builtin_amdgcn_cubeid">,
342 Intrinsic<[llvm_float_ty],
343 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
344 [IntrNoMem, IntrSpeculatable]
347 def int_amdgcn_cubema : GCCBuiltin<"__builtin_amdgcn_cubema">,
348 Intrinsic<[llvm_float_ty],
349 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
350 [IntrNoMem, IntrSpeculatable]
353 def int_amdgcn_cubesc : GCCBuiltin<"__builtin_amdgcn_cubesc">,
354 Intrinsic<[llvm_float_ty],
355 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
356 [IntrNoMem, IntrSpeculatable]
359 def int_amdgcn_cubetc : GCCBuiltin<"__builtin_amdgcn_cubetc">,
360 Intrinsic<[llvm_float_ty],
361 [llvm_float_ty, llvm_float_ty, llvm_float_ty],
362 [IntrNoMem, IntrSpeculatable]
365 // v_ffbh_i32, as opposed to v_ffbh_u32. For v_ffbh_u32, llvm.ctlz
367 def int_amdgcn_sffbh :
368 Intrinsic<[llvm_anyint_ty], [LLVMMatchType<0>],
369 [IntrNoMem, IntrSpeculatable]
372 // v_mad_f32|f16/v_mac_f32|f16, selected regardless of denorm support.
373 def int_amdgcn_fmad_ftz :
374 Intrinsic<[llvm_anyfloat_ty],
375 [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
376 [IntrNoMem, IntrSpeculatable]
379 // Fields should mirror atomicrmw
380 class AMDGPUAtomicIncIntrin : Intrinsic<[llvm_anyint_ty],
383 llvm_i32_ty, // ordering
384 llvm_i32_ty, // scope
385 llvm_i1_ty], // isVolatile
386 [IntrArgMemOnly, NoCapture<0>, ImmArg<2>, ImmArg<3>, ImmArg<4>], "",
390 def int_amdgcn_atomic_inc : AMDGPUAtomicIncIntrin;
391 def int_amdgcn_atomic_dec : AMDGPUAtomicIncIntrin;
393 class AMDGPULDSF32Intrin<string clang_builtin> :
394 GCCBuiltin<clang_builtin>,
395 Intrinsic<[llvm_float_ty],
396 [LLVMQualPointerType<llvm_float_ty, 3>,
398 llvm_i32_ty, // ordering
399 llvm_i32_ty, // scope
400 llvm_i1_ty], // isVolatile
401 [IntrArgMemOnly, NoCapture<0>, ImmArg<2>, ImmArg<3>, ImmArg<4>]
404 // FIXME: The m0 argument should be moved after the normal arguments
405 class AMDGPUDSOrderedIntrinsic : Intrinsic<
407 // M0 = {hi16:address, lo16:waveID}. Allow passing M0 as a pointer, so that
408 // the bit packing can be optimized at the IR level.
409 [LLVMQualPointerType<llvm_i32_ty, 2>, // IntToPtr(M0)
410 llvm_i32_ty, // value to add or swap
411 llvm_i32_ty, // ordering
412 llvm_i32_ty, // scope
413 llvm_i1_ty, // isVolatile
414 llvm_i32_ty, // ordered count index (OA index), also added to the address
415 // gfx10: bits 24-27 indicate the number of active threads/dwords
416 llvm_i1_ty, // wave release, usually set to 1
417 llvm_i1_ty], // wave done, set to 1 for the last ordered instruction
419 ImmArg<2>, ImmArg<3>, ImmArg<4>,
420 ImmArg<5>, ImmArg<6>, ImmArg<7>
424 class AMDGPUDSAppendConsumedIntrinsic : Intrinsic<
426 [llvm_anyptr_ty, // LDS or GDS ptr
427 llvm_i1_ty], // isVolatile
428 [IntrConvergent, IntrArgMemOnly, NoCapture<0>, ImmArg<1>],
433 def int_amdgcn_ds_ordered_add : AMDGPUDSOrderedIntrinsic;
434 def int_amdgcn_ds_ordered_swap : AMDGPUDSOrderedIntrinsic;
436 // The pointer argument is assumed to be dynamically uniform if a VGPR.
437 def int_amdgcn_ds_append : AMDGPUDSAppendConsumedIntrinsic;
438 def int_amdgcn_ds_consume : AMDGPUDSAppendConsumedIntrinsic;
440 def int_amdgcn_ds_fadd : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_faddf">;
441 def int_amdgcn_ds_fmin : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fminf">;
442 def int_amdgcn_ds_fmax : AMDGPULDSF32Intrin<"__builtin_amdgcn_ds_fmaxf">;
444 } // TargetPrefix = "amdgcn"
446 // New-style image intrinsics
448 //////////////////////////////////////////////////////////////////////////
449 // Dimension-aware image intrinsics framework
450 //////////////////////////////////////////////////////////////////////////
452 // Helper class to represent (type, name) combinations of arguments. The
453 // argument names are explanatory and used as DAG operand names for codegen
455 class AMDGPUArg<LLVMType ty, string name> {
460 // Return [AMDGPUArg<basety, names[0]>, AMDGPUArg<LLVMMatchType<0>, names[1]>, ...]
461 class makeArgList<list<string> names, LLVMType basety> {
462 list<AMDGPUArg> ret =
463 !listconcat([AMDGPUArg<basety, names[0]>],
464 !foreach(name, !tail(names), AMDGPUArg<LLVMMatchType<0>, name>));
467 // Return arglist, with LLVMMatchType's references shifted by 'shift'.
468 class arglistmatchshift<list<AMDGPUArg> arglist, int shift> {
469 list<AMDGPUArg> ret =
470 !foreach(arg, arglist,
471 !if(!isa<LLVMMatchType>(arg.Type),
472 AMDGPUArg<LLVMMatchType<!add(!cast<LLVMMatchType>(arg.Type).Number, shift)>,
477 // Return the concatenation of the given arglists. LLVMMatchType's are adjusted
478 // accordingly, and shifted by an additional 'shift'.
479 class arglistconcat<list<list<AMDGPUArg>> arglists, int shift = 0> {
480 list<AMDGPUArg> ret =
481 !foldl([]<AMDGPUArg>, arglists, lhs, rhs,
484 arglistmatchshift<rhs,
485 !add(shift, !foldl(0, lhs, a, b,
486 !add(a, b.Type.isAny)))>.ret));
489 // Represent texture/image types / dimensionality.
490 class AMDGPUDimProps<bits<3> enc, string name, string asmsuffix,
491 list<string> coord_names, list<string> slice_names> {
492 AMDGPUDimProps Dim = !cast<AMDGPUDimProps>(NAME);
493 string Name = name; // e.g. "2darraymsaa"
494 string AsmSuffix = asmsuffix; // e.g. 2D_MSAA_ARRAY (used in assembly strings)
495 bits<3> Encoding = enc;
496 bit DA = 0; // DA bit in MIMG encoding
498 list<AMDGPUArg> CoordSliceArgs =
499 makeArgList<!listconcat(coord_names, slice_names), llvm_anyfloat_ty>.ret;
500 list<AMDGPUArg> CoordSliceIntArgs =
501 makeArgList<!listconcat(coord_names, slice_names), llvm_anyint_ty>.ret;
502 list<AMDGPUArg> GradientArgs =
503 makeArgList<!listconcat(!foreach(name, coord_names, "d" # name # "dh"),
504 !foreach(name, coord_names, "d" # name # "dv")),
505 llvm_anyfloat_ty>.ret;
507 bits<8> NumCoords = !size(CoordSliceArgs);
508 bits<8> NumGradients = !size(GradientArgs);
511 def AMDGPUDim1D : AMDGPUDimProps<0x0, "1d", "1D", ["s"], []>;
512 def AMDGPUDim2D : AMDGPUDimProps<0x1, "2d", "2D", ["s", "t"], []>;
513 def AMDGPUDim3D : AMDGPUDimProps<0x2, "3d", "3D", ["s", "t", "r"], []>;
515 def AMDGPUDimCube : AMDGPUDimProps<0x3, "cube", "CUBE", ["s", "t"], ["face"]>;
516 def AMDGPUDim1DArray : AMDGPUDimProps<0x4, "1darray", "1D_ARRAY", ["s"], ["slice"]>;
517 def AMDGPUDim2DArray : AMDGPUDimProps<0x5, "2darray", "2D_ARRAY", ["s", "t"], ["slice"]>;
519 def AMDGPUDim2DMsaa : AMDGPUDimProps<0x6, "2dmsaa", "2D_MSAA", ["s", "t"], ["fragid"]>;
521 def AMDGPUDim2DArrayMsaa : AMDGPUDimProps<0x7, "2darraymsaa", "2D_MSAA_ARRAY", ["s", "t"], ["slice", "fragid"]>;
525 list<AMDGPUDimProps> NoMsaa = [AMDGPUDim1D, AMDGPUDim2D, AMDGPUDim3D,
526 AMDGPUDimCube, AMDGPUDim1DArray,
528 list<AMDGPUDimProps> Msaa = [AMDGPUDim2DMsaa, AMDGPUDim2DArrayMsaa];
529 list<AMDGPUDimProps> All = !listconcat(NoMsaa, Msaa);
532 // Represent sample variants, i.e. _C, _O, _B, ... and combinations thereof.
533 class AMDGPUSampleVariant<string ucmod, string lcmod, list<AMDGPUArg> extra_addr> {
534 string UpperCaseMod = ucmod;
535 string LowerCaseMod = lcmod;
537 // {offset} {bias} {z-compare}
538 list<AMDGPUArg> ExtraAddrArgs = extra_addr;
541 // Name of the {lod} or {clamp} argument that is appended to the coordinates,
543 string LodOrClamp = "";
546 // AMDGPUSampleVariants: all variants supported by IMAGE_SAMPLE
547 // AMDGPUSampleVariantsNoGradients: variants supported by IMAGE_GATHER4
548 defset list<AMDGPUSampleVariant> AMDGPUSampleVariants = {
549 multiclass AMDGPUSampleHelper_Offset<string ucmod, string lcmod,
550 list<AMDGPUArg> extra_addr> {
551 def NAME#lcmod : AMDGPUSampleVariant<ucmod, lcmod, extra_addr>;
552 def NAME#lcmod#_o : AMDGPUSampleVariant<
553 ucmod#"_O", lcmod#"_o", !listconcat([AMDGPUArg<llvm_i32_ty, "offset">], extra_addr)>;
556 multiclass AMDGPUSampleHelper_Compare<string ucmod, string lcmod,
557 list<AMDGPUArg> extra_addr> {
558 defm NAME : AMDGPUSampleHelper_Offset<ucmod, lcmod, extra_addr>;
559 defm NAME : AMDGPUSampleHelper_Offset<
560 "_C"#ucmod, "_c"#lcmod, !listconcat(extra_addr, [AMDGPUArg<llvm_float_ty, "zcompare">])>;
563 multiclass AMDGPUSampleHelper_Clamp<string ucmod, string lcmod,
564 list<AMDGPUArg> extra_addr> {
565 defm NAME : AMDGPUSampleHelper_Compare<ucmod, lcmod, extra_addr>;
566 let LodOrClamp = "clamp" in
567 defm NAME : AMDGPUSampleHelper_Compare<ucmod#"_CL", lcmod#"_cl", extra_addr>;
570 defset list<AMDGPUSampleVariant> AMDGPUSampleVariantsNoGradients = {
571 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"", "", []>;
572 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<
573 "_B", "_b", [AMDGPUArg<llvm_anyfloat_ty, "bias">]>;
574 let LodOrClamp = "lod" in
575 defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_L", "_l", []>;
576 defm AMDGPUSample : AMDGPUSampleHelper_Compare<"_LZ", "_lz", []>;
579 let Gradients = 1 in {
580 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_D", "_d", []>;
581 defm AMDGPUSample : AMDGPUSampleHelper_Clamp<"_CD", "_cd", []>;
585 // Helper class to capture the profile of a dimension-aware image intrinsic.
586 // This information is used to generate the intrinsic's type and to inform
587 // codegen pattern matching.
588 class AMDGPUDimProfile<string opmod,
589 AMDGPUDimProps dim> {
590 AMDGPUDimProps Dim = dim;
591 string OpMod = opmod; // the corresponding instruction is named IMAGE_OpMod
593 // These are entended to be overwritten by subclasses
596 list<LLVMType> RetTypes = [];
597 list<AMDGPUArg> DataArgs = [];
598 list<AMDGPUArg> ExtraAddrArgs = [];
600 string LodClampMip = "";
602 int NumRetAndDataAnyTypes =
603 !foldl(0, !listconcat(RetTypes, !foreach(arg, DataArgs, arg.Type)), a, b,
606 list<AMDGPUArg> AddrArgs =
607 arglistconcat<[ExtraAddrArgs,
608 !if(Gradients, dim.GradientArgs, []),
609 !listconcat(!if(IsSample, dim.CoordSliceArgs, dim.CoordSliceIntArgs),
610 !if(!eq(LodClampMip, ""),
612 [AMDGPUArg<LLVMMatchType<0>, LodClampMip>]))],
613 NumRetAndDataAnyTypes>.ret;
614 list<LLVMType> AddrTypes = !foreach(arg, AddrArgs, arg.Type);
615 list<AMDGPUArg> AddrDefaultArgs =
616 !foreach(arg, AddrArgs,
617 AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
618 !if(IsSample, llvm_float_ty, llvm_i32_ty), arg.Type),
620 list<AMDGPUArg> AddrA16Args =
621 !foreach(arg, AddrArgs,
622 AMDGPUArg<!if(!or(arg.Type.isAny, !isa<LLVMMatchType>(arg.Type)),
623 !if(IsSample, llvm_half_ty, llvm_i16_ty), arg.Type),
627 class AMDGPUDimProfileCopy<AMDGPUDimProfile base> : AMDGPUDimProfile<base.OpMod, base.Dim> {
628 let IsSample = base.IsSample;
629 let IsAtomic = base.IsAtomic;
630 let RetTypes = base.RetTypes;
631 let DataArgs = base.DataArgs;
632 let ExtraAddrArgs = base.ExtraAddrArgs;
633 let Gradients = base.Gradients;
634 let LodClampMip = base.LodClampMip;
637 class AMDGPUDimSampleProfile<string opmod,
639 AMDGPUSampleVariant sample> : AMDGPUDimProfile<opmod, dim> {
641 let RetTypes = [llvm_any_ty];
642 let ExtraAddrArgs = sample.ExtraAddrArgs;
643 let Gradients = sample.Gradients;
644 let LodClampMip = sample.LodOrClamp;
647 class AMDGPUDimNoSampleProfile<string opmod,
649 list<LLVMType> retty,
650 list<AMDGPUArg> dataargs,
651 bit Mip = 0> : AMDGPUDimProfile<opmod, dim> {
652 let RetTypes = retty;
653 let DataArgs = dataargs;
654 let LodClampMip = !if(Mip, "mip", "");
657 class AMDGPUDimAtomicProfile<string opmod,
659 list<AMDGPUArg> dataargs> : AMDGPUDimProfile<opmod, dim> {
660 let RetTypes = [llvm_anyint_ty];
661 let DataArgs = dataargs;
665 class AMDGPUDimGetResInfoProfile<AMDGPUDimProps dim> : AMDGPUDimProfile<"GET_RESINFO", dim> {
666 let RetTypes = [llvm_anyfloat_ty];
668 let AddrArgs = [AMDGPUArg<llvm_anyint_ty, "mip">];
669 let LodClampMip = "mip";
672 // Helper class for figuring out image intrinsic argument indexes.
673 class AMDGPUImageDimIntrinsicEval<AMDGPUDimProfile P_> {
674 int NumDataArgs = !size(P_.DataArgs);
675 int NumDmaskArgs = !if(P_.IsAtomic, 0, 1);
676 int NumVAddrArgs = !size(P_.AddrArgs);
678 int NumSampArgs = !if(P_.IsSample, 2, 0);
679 int DmaskArgIndex = NumDataArgs;
680 int UnormArgIndex = !add(NumDataArgs, NumDmaskArgs, NumVAddrArgs, NumRSrcArgs, 1);
681 int TexFailCtrlArgIndex = !add(NumDataArgs, NumDmaskArgs, NumVAddrArgs, NumRSrcArgs, NumSampArgs);
682 int CachePolicyArgIndex = !add(TexFailCtrlArgIndex, 1);
685 // All dimension-aware intrinsics are derived from this class.
686 class AMDGPUImageDimIntrinsic<AMDGPUDimProfile P_,
687 list<IntrinsicProperty> props,
688 list<SDNodeProperty> sdnodeprops> : Intrinsic<
689 P_.RetTypes, // vdata(VGPR) -- for load/atomic-with-return
691 !foreach(arg, P_.DataArgs, arg.Type), // vdata(VGPR) -- for store/atomic
692 !if(P_.IsAtomic, [], [llvm_i32_ty]), // dmask(imm)
693 P_.AddrTypes, // vaddr(VGPR)
694 [llvm_v8i32_ty], // rsrc(SGPR)
695 !if(P_.IsSample, [llvm_v4i32_ty, // samp(SGPR)
696 llvm_i1_ty], []), // unorm(imm)
697 [llvm_i32_ty, // texfailctrl(imm; bit 0 = tfe, bit 1 = lwe)
698 llvm_i32_ty]), // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc)
700 !if(P_.IsAtomic, [], [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.DmaskArgIndex>]),
701 !if(P_.IsSample, [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.UnormArgIndex>], []),
702 [ImmArg<AMDGPUImageDimIntrinsicEval<P_>.TexFailCtrlArgIndex>,
703 ImmArg<AMDGPUImageDimIntrinsicEval<P_>.CachePolicyArgIndex>]),
705 AMDGPURsrcIntrinsic<!add(!size(P_.DataArgs), !size(P_.AddrTypes),
706 !if(P_.IsAtomic, 0, 1)), 1> {
707 AMDGPUDimProfile P = P_;
709 AMDGPUImageDimIntrinsic Intr = !cast<AMDGPUImageDimIntrinsic>(NAME);
711 let TargetPrefix = "amdgcn";
714 // Marker class for intrinsics with a DMask that determines the returned
716 class AMDGPUImageDMaskIntrinsic;
718 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimIntrinsics = {
720 //////////////////////////////////////////////////////////////////////////
721 // Load and store intrinsics
722 //////////////////////////////////////////////////////////////////////////
723 multiclass AMDGPUImageDimIntrinsicsNoMsaa<string opmod,
724 list<LLVMType> retty,
725 list<AMDGPUArg> dataargs,
726 list<IntrinsicProperty> props,
727 list<SDNodeProperty> sdnodeprops,
729 foreach dim = AMDGPUDims.NoMsaa in {
730 def !strconcat(NAME, "_", dim.Name)
731 : AMDGPUImageDimIntrinsic<
732 AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
737 multiclass AMDGPUImageDimIntrinsicsAll<string opmod,
738 list<LLVMType> retty,
739 list<AMDGPUArg> dataargs,
740 list<IntrinsicProperty> props,
741 list<SDNodeProperty> sdnodeprops,
743 foreach dim = AMDGPUDims.All in {
744 def !strconcat(NAME, "_", dim.Name)
745 : AMDGPUImageDimIntrinsic<
746 AMDGPUDimNoSampleProfile<opmod, dim, retty, dataargs, Mip>,
751 defm int_amdgcn_image_load
752 : AMDGPUImageDimIntrinsicsAll<"LOAD", [llvm_any_ty], [], [IntrReadMem],
754 AMDGPUImageDMaskIntrinsic;
755 defm int_amdgcn_image_load_mip
756 : AMDGPUImageDimIntrinsicsNoMsaa<"LOAD_MIP", [llvm_any_ty], [],
757 [IntrReadMem], [SDNPMemOperand], 1>,
758 AMDGPUImageDMaskIntrinsic;
760 defm int_amdgcn_image_store : AMDGPUImageDimIntrinsicsAll<
761 "STORE", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
762 [IntrWriteMem], [SDNPMemOperand]>;
763 defm int_amdgcn_image_store_mip : AMDGPUImageDimIntrinsicsNoMsaa<
764 "STORE_MIP", [], [AMDGPUArg<llvm_anyfloat_ty, "vdata">],
765 [IntrWriteMem], [SDNPMemOperand], 1>;
767 //////////////////////////////////////////////////////////////////////////
768 // sample and getlod intrinsics
769 //////////////////////////////////////////////////////////////////////////
770 multiclass AMDGPUImageDimSampleDims<string opmod,
771 AMDGPUSampleVariant sample,
773 foreach dim = AMDGPUDims.NoMsaa in {
774 def !strconcat(NAME, "_", dim.Name) : AMDGPUImageDimIntrinsic<
775 AMDGPUDimSampleProfile<opmod, dim, sample>,
776 !if(NoMem, [IntrNoMem], [IntrReadMem]),
777 !if(NoMem, [], [SDNPMemOperand])>;
781 foreach sample = AMDGPUSampleVariants in {
782 defm int_amdgcn_image_sample # sample.LowerCaseMod
783 : AMDGPUImageDimSampleDims<"SAMPLE" # sample.UpperCaseMod, sample>,
784 AMDGPUImageDMaskIntrinsic;
787 defm int_amdgcn_image_getlod
788 : AMDGPUImageDimSampleDims<"GET_LOD", AMDGPUSample, 1>,
789 AMDGPUImageDMaskIntrinsic;
791 //////////////////////////////////////////////////////////////////////////
792 // getresinfo intrinsics
793 //////////////////////////////////////////////////////////////////////////
794 foreach dim = AMDGPUDims.All in {
795 def !strconcat("int_amdgcn_image_getresinfo_", dim.Name)
796 : AMDGPUImageDimIntrinsic<AMDGPUDimGetResInfoProfile<dim>, [IntrNoMem], []>,
797 AMDGPUImageDMaskIntrinsic;
800 //////////////////////////////////////////////////////////////////////////
801 // gather4 intrinsics
802 //////////////////////////////////////////////////////////////////////////
803 foreach sample = AMDGPUSampleVariantsNoGradients in {
804 foreach dim = [AMDGPUDim2D, AMDGPUDimCube, AMDGPUDim2DArray] in {
805 def int_amdgcn_image_gather4 # sample.LowerCaseMod # _ # dim.Name:
806 AMDGPUImageDimIntrinsic<
807 AMDGPUDimSampleProfile<"GATHER4" # sample.UpperCaseMod, dim, sample>,
808 [IntrReadMem], [SDNPMemOperand]>;
813 //////////////////////////////////////////////////////////////////////////
815 //////////////////////////////////////////////////////////////////////////
816 defset list<AMDGPUImageDimIntrinsic> AMDGPUImageDimAtomicIntrinsics = {
817 multiclass AMDGPUImageDimAtomicX<string opmod, list<AMDGPUArg> dataargs> {
818 foreach dim = AMDGPUDims.All in {
819 def !strconcat(NAME, "_", dim.Name)
820 : AMDGPUImageDimIntrinsic<
821 AMDGPUDimAtomicProfile<opmod, dim, dataargs>,
822 [], [SDNPMemOperand]>;
826 multiclass AMDGPUImageDimAtomic<string opmod> {
827 defm "" : AMDGPUImageDimAtomicX<opmod, [AMDGPUArg<LLVMMatchType<0>, "vdata">]>;
830 defm int_amdgcn_image_atomic_swap : AMDGPUImageDimAtomic<"ATOMIC_SWAP">;
831 defm int_amdgcn_image_atomic_add : AMDGPUImageDimAtomic<"ATOMIC_ADD">;
832 defm int_amdgcn_image_atomic_sub : AMDGPUImageDimAtomic<"ATOMIC_SUB">;
833 defm int_amdgcn_image_atomic_smin : AMDGPUImageDimAtomic<"ATOMIC_SMIN">;
834 defm int_amdgcn_image_atomic_umin : AMDGPUImageDimAtomic<"ATOMIC_UMIN">;
835 defm int_amdgcn_image_atomic_smax : AMDGPUImageDimAtomic<"ATOMIC_SMAX">;
836 defm int_amdgcn_image_atomic_umax : AMDGPUImageDimAtomic<"ATOMIC_UMAX">;
837 defm int_amdgcn_image_atomic_and : AMDGPUImageDimAtomic<"ATOMIC_AND">;
838 defm int_amdgcn_image_atomic_or : AMDGPUImageDimAtomic<"ATOMIC_OR">;
839 defm int_amdgcn_image_atomic_xor : AMDGPUImageDimAtomic<"ATOMIC_XOR">;
840 defm int_amdgcn_image_atomic_inc : AMDGPUImageDimAtomic<"ATOMIC_INC">;
841 defm int_amdgcn_image_atomic_dec : AMDGPUImageDimAtomic<"ATOMIC_DEC">;
843 defm int_amdgcn_image_atomic_cmpswap :
844 AMDGPUImageDimAtomicX<"ATOMIC_CMPSWAP", [AMDGPUArg<LLVMMatchType<0>, "src">,
845 AMDGPUArg<LLVMMatchType<0>, "cmp">]>;
848 //////////////////////////////////////////////////////////////////////////
850 //////////////////////////////////////////////////////////////////////////
852 let TargetPrefix = "amdgcn" in {
854 defset list<AMDGPURsrcIntrinsic> AMDGPUBufferIntrinsics = {
856 class AMDGPUBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
858 [llvm_v4i32_ty, // rsrc(SGPR)
859 llvm_i32_ty, // vindex(VGPR)
860 llvm_i32_ty, // offset(SGPR/VGPR/imm)
861 llvm_i1_ty, // glc(imm)
862 llvm_i1_ty], // slc(imm)
863 [IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
864 AMDGPURsrcIntrinsic<0>;
865 def int_amdgcn_buffer_load_format : AMDGPUBufferLoad<llvm_anyfloat_ty>;
866 def int_amdgcn_buffer_load : AMDGPUBufferLoad;
868 def int_amdgcn_s_buffer_load : Intrinsic <
870 [llvm_v4i32_ty, // rsrc(SGPR)
871 llvm_i32_ty, // byte offset(SGPR/imm)
872 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 2 = dlc)
873 [IntrNoMem, ImmArg<2>]>,
874 AMDGPURsrcIntrinsic<0>;
876 class AMDGPUBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
878 [data_ty, // vdata(VGPR)
879 llvm_v4i32_ty, // rsrc(SGPR)
880 llvm_i32_ty, // vindex(VGPR)
881 llvm_i32_ty, // offset(SGPR/VGPR/imm)
882 llvm_i1_ty, // glc(imm)
883 llvm_i1_ty], // slc(imm)
884 [IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
885 AMDGPURsrcIntrinsic<1>;
886 def int_amdgcn_buffer_store_format : AMDGPUBufferStore<llvm_anyfloat_ty>;
887 def int_amdgcn_buffer_store : AMDGPUBufferStore;
889 // New buffer intrinsics with separate raw and struct variants. The raw
890 // variant never has an index. The struct variant always has an index, even if
891 // it is const 0. A struct intrinsic with constant 0 index is different to the
892 // corresponding raw intrinsic on gfx9+ because the behavior of bound checking
893 // and swizzling changes depending on whether idxen is set in the instruction.
894 // These new instrinsics also keep the offset and soffset arguments separate as
895 // they behave differently in bounds checking and swizzling.
896 class AMDGPURawBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
898 [llvm_v4i32_ty, // rsrc(SGPR)
899 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
900 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
901 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
902 [IntrReadMem, ImmArg<3>], "", [SDNPMemOperand]>,
903 AMDGPURsrcIntrinsic<0>;
904 def int_amdgcn_raw_buffer_load_format : AMDGPURawBufferLoad<llvm_anyfloat_ty>;
905 def int_amdgcn_raw_buffer_load : AMDGPURawBufferLoad;
907 class AMDGPUStructBufferLoad<LLVMType data_ty = llvm_any_ty> : Intrinsic <
909 [llvm_v4i32_ty, // rsrc(SGPR)
910 llvm_i32_ty, // vindex(VGPR)
911 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
912 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
913 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
914 [IntrReadMem, ImmArg<4>], "", [SDNPMemOperand]>,
915 AMDGPURsrcIntrinsic<0>;
916 def int_amdgcn_struct_buffer_load_format : AMDGPUStructBufferLoad<llvm_anyfloat_ty>;
917 def int_amdgcn_struct_buffer_load : AMDGPUStructBufferLoad;
919 class AMDGPURawBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
921 [data_ty, // vdata(VGPR)
922 llvm_v4i32_ty, // rsrc(SGPR)
923 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
924 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
925 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
926 [IntrWriteMem, ImmArg<4>], "", [SDNPMemOperand]>,
927 AMDGPURsrcIntrinsic<1>;
928 def int_amdgcn_raw_buffer_store_format : AMDGPURawBufferStore<llvm_anyfloat_ty>;
929 def int_amdgcn_raw_buffer_store : AMDGPURawBufferStore;
931 class AMDGPUStructBufferStore<LLVMType data_ty = llvm_any_ty> : Intrinsic <
933 [data_ty, // vdata(VGPR)
934 llvm_v4i32_ty, // rsrc(SGPR)
935 llvm_i32_ty, // vindex(VGPR)
936 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
937 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
938 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
939 [IntrWriteMem, ImmArg<5>], "", [SDNPMemOperand]>,
940 AMDGPURsrcIntrinsic<1>;
941 def int_amdgcn_struct_buffer_store_format : AMDGPUStructBufferStore<llvm_anyfloat_ty>;
942 def int_amdgcn_struct_buffer_store : AMDGPUStructBufferStore;
944 class AMDGPURawBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
946 [LLVMMatchType<0>, // vdata(VGPR)
947 llvm_v4i32_ty, // rsrc(SGPR)
948 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
949 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
950 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
951 [ImmArg<4>], "", [SDNPMemOperand]>,
952 AMDGPURsrcIntrinsic<1, 0>;
953 def int_amdgcn_raw_buffer_atomic_swap : AMDGPURawBufferAtomic;
954 def int_amdgcn_raw_buffer_atomic_add : AMDGPURawBufferAtomic;
955 def int_amdgcn_raw_buffer_atomic_sub : AMDGPURawBufferAtomic;
956 def int_amdgcn_raw_buffer_atomic_smin : AMDGPURawBufferAtomic;
957 def int_amdgcn_raw_buffer_atomic_umin : AMDGPURawBufferAtomic;
958 def int_amdgcn_raw_buffer_atomic_smax : AMDGPURawBufferAtomic;
959 def int_amdgcn_raw_buffer_atomic_umax : AMDGPURawBufferAtomic;
960 def int_amdgcn_raw_buffer_atomic_and : AMDGPURawBufferAtomic;
961 def int_amdgcn_raw_buffer_atomic_or : AMDGPURawBufferAtomic;
962 def int_amdgcn_raw_buffer_atomic_xor : AMDGPURawBufferAtomic;
963 def int_amdgcn_raw_buffer_atomic_inc : AMDGPURawBufferAtomic;
964 def int_amdgcn_raw_buffer_atomic_dec : AMDGPURawBufferAtomic;
965 def int_amdgcn_raw_buffer_atomic_cmpswap : Intrinsic<
967 [LLVMMatchType<0>, // src(VGPR)
968 LLVMMatchType<0>, // cmp(VGPR)
969 llvm_v4i32_ty, // rsrc(SGPR)
970 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
971 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
972 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
973 [ImmArg<5>], "", [SDNPMemOperand]>,
974 AMDGPURsrcIntrinsic<2, 0>;
976 class AMDGPUStructBufferAtomic<LLVMType data_ty = llvm_any_ty> : Intrinsic <
978 [LLVMMatchType<0>, // vdata(VGPR)
979 llvm_v4i32_ty, // rsrc(SGPR)
980 llvm_i32_ty, // vindex(VGPR)
981 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
982 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
983 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
984 [ImmArg<5>], "", [SDNPMemOperand]>,
985 AMDGPURsrcIntrinsic<1, 0>;
986 def int_amdgcn_struct_buffer_atomic_swap : AMDGPUStructBufferAtomic;
987 def int_amdgcn_struct_buffer_atomic_add : AMDGPUStructBufferAtomic;
988 def int_amdgcn_struct_buffer_atomic_sub : AMDGPUStructBufferAtomic;
989 def int_amdgcn_struct_buffer_atomic_smin : AMDGPUStructBufferAtomic;
990 def int_amdgcn_struct_buffer_atomic_umin : AMDGPUStructBufferAtomic;
991 def int_amdgcn_struct_buffer_atomic_smax : AMDGPUStructBufferAtomic;
992 def int_amdgcn_struct_buffer_atomic_umax : AMDGPUStructBufferAtomic;
993 def int_amdgcn_struct_buffer_atomic_and : AMDGPUStructBufferAtomic;
994 def int_amdgcn_struct_buffer_atomic_or : AMDGPUStructBufferAtomic;
995 def int_amdgcn_struct_buffer_atomic_xor : AMDGPUStructBufferAtomic;
996 def int_amdgcn_struct_buffer_atomic_inc : AMDGPUStructBufferAtomic;
997 def int_amdgcn_struct_buffer_atomic_dec : AMDGPUStructBufferAtomic;
998 def int_amdgcn_struct_buffer_atomic_cmpswap : Intrinsic<
1000 [LLVMMatchType<0>, // src(VGPR)
1001 LLVMMatchType<0>, // cmp(VGPR)
1002 llvm_v4i32_ty, // rsrc(SGPR)
1003 llvm_i32_ty, // vindex(VGPR)
1004 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1005 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1006 llvm_i32_ty], // cachepolicy(imm; bit 1 = slc)
1007 [ImmArg<6>], "", [SDNPMemOperand]>,
1008 AMDGPURsrcIntrinsic<2, 0>;
1010 // Obsolescent tbuffer intrinsics.
1011 def int_amdgcn_tbuffer_load : Intrinsic <
1012 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1013 [llvm_v4i32_ty, // rsrc(SGPR)
1014 llvm_i32_ty, // vindex(VGPR)
1015 llvm_i32_ty, // voffset(VGPR)
1016 llvm_i32_ty, // soffset(SGPR)
1017 llvm_i32_ty, // offset(imm)
1018 llvm_i32_ty, // dfmt(imm)
1019 llvm_i32_ty, // nfmt(imm)
1020 llvm_i1_ty, // glc(imm)
1021 llvm_i1_ty], // slc(imm)
1022 [IntrReadMem, ImmArg<4>, ImmArg<5>, ImmArg<6>,
1023 ImmArg<7>, ImmArg<8>], "", [SDNPMemOperand]>,
1024 AMDGPURsrcIntrinsic<0>;
1026 def int_amdgcn_tbuffer_store : Intrinsic <
1028 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1029 llvm_v4i32_ty, // rsrc(SGPR)
1030 llvm_i32_ty, // vindex(VGPR)
1031 llvm_i32_ty, // voffset(VGPR)
1032 llvm_i32_ty, // soffset(SGPR)
1033 llvm_i32_ty, // offset(imm)
1034 llvm_i32_ty, // dfmt(imm)
1035 llvm_i32_ty, // nfmt(imm)
1036 llvm_i1_ty, // glc(imm)
1037 llvm_i1_ty], // slc(imm)
1038 [IntrWriteMem, ImmArg<5>, ImmArg<6>, ImmArg<7>,
1039 ImmArg<8>, ImmArg<9>], "", [SDNPMemOperand]>,
1040 AMDGPURsrcIntrinsic<1>;
1042 // New tbuffer intrinsics, with:
1043 // - raw and struct variants
1044 // - joint format field
1045 // - joint cachepolicy field
1046 def int_amdgcn_raw_tbuffer_load : Intrinsic <
1047 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1048 [llvm_v4i32_ty, // rsrc(SGPR)
1049 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1050 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1051 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1052 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
1053 [IntrReadMem, ImmArg<3>, ImmArg<4>], "", [SDNPMemOperand]>,
1054 AMDGPURsrcIntrinsic<0>;
1056 def int_amdgcn_raw_tbuffer_store : Intrinsic <
1058 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1059 llvm_v4i32_ty, // rsrc(SGPR)
1060 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1061 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1062 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1063 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
1064 [IntrWriteMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
1065 AMDGPURsrcIntrinsic<1>;
1067 def int_amdgcn_struct_tbuffer_load : Intrinsic <
1068 [llvm_any_ty], // overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1069 [llvm_v4i32_ty, // rsrc(SGPR)
1070 llvm_i32_ty, // vindex(VGPR)
1071 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1072 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1073 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1074 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
1075 [IntrReadMem, ImmArg<4>, ImmArg<5>], "", [SDNPMemOperand]>,
1076 AMDGPURsrcIntrinsic<0>;
1078 def int_amdgcn_struct_tbuffer_store : Intrinsic <
1080 [llvm_any_ty, // vdata(VGPR), overloaded for types f32/i32, v2f32/v2i32, v4f32/v4i32
1081 llvm_v4i32_ty, // rsrc(SGPR)
1082 llvm_i32_ty, // vindex(VGPR)
1083 llvm_i32_ty, // offset(VGPR/imm, included in bounds checking and swizzling)
1084 llvm_i32_ty, // soffset(SGPR/imm, excluded from bounds checking and swizzling)
1085 llvm_i32_ty, // format(imm; bits 3..0 = dfmt, bits 6..4 = nfmt)
1086 llvm_i32_ty], // cachepolicy(imm; bit 0 = glc, bit 1 = slc, bit 2 = dlc on gfx10+)
1087 [IntrWriteMem, ImmArg<5>, ImmArg<6>], "", [SDNPMemOperand]>,
1088 AMDGPURsrcIntrinsic<1>;
1090 class AMDGPUBufferAtomic : Intrinsic <
1092 [LLVMMatchType<0>, // vdata(VGPR)
1093 llvm_v4i32_ty, // rsrc(SGPR)
1094 llvm_i32_ty, // vindex(VGPR)
1095 llvm_i32_ty, // offset(SGPR/VGPR/imm)
1096 llvm_i1_ty], // slc(imm)
1097 [ImmArg<4>], "", [SDNPMemOperand]>,
1098 AMDGPURsrcIntrinsic<1, 0>;
1099 def int_amdgcn_buffer_atomic_swap : AMDGPUBufferAtomic;
1100 def int_amdgcn_buffer_atomic_add : AMDGPUBufferAtomic;
1101 def int_amdgcn_buffer_atomic_sub : AMDGPUBufferAtomic;
1102 def int_amdgcn_buffer_atomic_smin : AMDGPUBufferAtomic;
1103 def int_amdgcn_buffer_atomic_umin : AMDGPUBufferAtomic;
1104 def int_amdgcn_buffer_atomic_smax : AMDGPUBufferAtomic;
1105 def int_amdgcn_buffer_atomic_umax : AMDGPUBufferAtomic;
1106 def int_amdgcn_buffer_atomic_and : AMDGPUBufferAtomic;
1107 def int_amdgcn_buffer_atomic_or : AMDGPUBufferAtomic;
1108 def int_amdgcn_buffer_atomic_xor : AMDGPUBufferAtomic;
1109 def int_amdgcn_buffer_atomic_cmpswap : Intrinsic<
1111 [llvm_i32_ty, // src(VGPR)
1112 llvm_i32_ty, // cmp(VGPR)
1113 llvm_v4i32_ty, // rsrc(SGPR)
1114 llvm_i32_ty, // vindex(VGPR)
1115 llvm_i32_ty, // offset(SGPR/VGPR/imm)
1116 llvm_i1_ty], // slc(imm)
1117 [ImmArg<5>], "", [SDNPMemOperand]>,
1118 AMDGPURsrcIntrinsic<2, 0>;
1120 } // defset AMDGPUBufferIntrinsics
1122 // Uses that do not set the done bit should set IntrWriteMem on the
1124 def int_amdgcn_exp : Intrinsic <[], [
1125 llvm_i32_ty, // tgt,
1127 llvm_any_ty, // src0 (f32 or i32)
1128 LLVMMatchType<0>, // src1
1129 LLVMMatchType<0>, // src2
1130 LLVMMatchType<0>, // src3
1134 [ImmArg<0>, ImmArg<1>, ImmArg<6>, ImmArg<7>, IntrInaccessibleMemOnly]
1137 // exp with compr bit set.
1138 def int_amdgcn_exp_compr : Intrinsic <[], [
1139 llvm_i32_ty, // tgt,
1141 llvm_anyvector_ty, // src0 (v2f16 or v2i16)
1142 LLVMMatchType<0>, // src1
1145 [ImmArg<0>, ImmArg<1>, ImmArg<4>, ImmArg<5>, IntrInaccessibleMemOnly]
1148 def int_amdgcn_buffer_wbinvl1_sc :
1149 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_sc">,
1150 Intrinsic<[], [], []>;
1152 def int_amdgcn_buffer_wbinvl1 :
1153 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1">,
1154 Intrinsic<[], [], []>;
1156 def int_amdgcn_s_dcache_inv :
1157 GCCBuiltin<"__builtin_amdgcn_s_dcache_inv">,
1158 Intrinsic<[], [], []>;
1160 def int_amdgcn_s_memtime :
1161 GCCBuiltin<"__builtin_amdgcn_s_memtime">,
1162 Intrinsic<[llvm_i64_ty], []>;
1164 def int_amdgcn_s_sleep :
1165 GCCBuiltin<"__builtin_amdgcn_s_sleep">,
1166 Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
1169 def int_amdgcn_s_incperflevel :
1170 GCCBuiltin<"__builtin_amdgcn_s_incperflevel">,
1171 Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
1174 def int_amdgcn_s_decperflevel :
1175 GCCBuiltin<"__builtin_amdgcn_s_decperflevel">,
1176 Intrinsic<[], [llvm_i32_ty], [ImmArg<0>]> {
1179 def int_amdgcn_s_getreg :
1180 GCCBuiltin<"__builtin_amdgcn_s_getreg">,
1181 Intrinsic<[llvm_i32_ty], [llvm_i32_ty],
1182 [IntrInaccessibleMemOnly, IntrReadMem, IntrSpeculatable, ImmArg<0>]
1185 // int_amdgcn_s_getpc is provided to allow a specific style of position
1186 // independent code to determine the high part of its address when it is
1187 // known (through convention) that the code and any data of interest does
1188 // not cross a 4Gb address boundary. Use for any other purpose may not
1189 // produce the desired results as optimizations may cause code movement,
1190 // especially as we explicitly use IntrNoMem to allow optimizations.
1191 def int_amdgcn_s_getpc :
1192 GCCBuiltin<"__builtin_amdgcn_s_getpc">,
1193 Intrinsic<[llvm_i64_ty], [], [IntrNoMem, IntrSpeculatable]>;
1195 // __builtin_amdgcn_interp_mov <param>, <attr_chan>, <attr>, <m0>
1196 // param values: 0 = P10, 1 = P20, 2 = P0
1197 def int_amdgcn_interp_mov :
1198 GCCBuiltin<"__builtin_amdgcn_interp_mov">,
1199 Intrinsic<[llvm_float_ty],
1200 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1201 [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>]>;
1203 // __builtin_amdgcn_interp_p1 <i>, <attr_chan>, <attr>, <m0>
1204 // This intrinsic reads from lds, but the memory values are constant,
1205 // so it behaves like IntrNoMem.
1206 def int_amdgcn_interp_p1 :
1207 GCCBuiltin<"__builtin_amdgcn_interp_p1">,
1208 Intrinsic<[llvm_float_ty],
1209 [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1210 [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>]>;
1212 // __builtin_amdgcn_interp_p2 <p1>, <j>, <attr_chan>, <attr>, <m0>
1213 def int_amdgcn_interp_p2 :
1214 GCCBuiltin<"__builtin_amdgcn_interp_p2">,
1215 Intrinsic<[llvm_float_ty],
1216 [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1217 [IntrNoMem, IntrSpeculatable, ImmArg<2>, ImmArg<3>]>;
1218 // See int_amdgcn_v_interp_p1 for why this is IntrNoMem.
1220 // __builtin_amdgcn_interp_p1_f16 <i>, <attr_chan>, <attr>, <high>, <m0>
1221 def int_amdgcn_interp_p1_f16 :
1222 GCCBuiltin<"__builtin_amdgcn_interp_p1_f16">,
1223 Intrinsic<[llvm_float_ty],
1224 [llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
1225 [IntrNoMem, IntrSpeculatable, ImmArg<1>, ImmArg<2>, ImmArg<3>]>;
1227 // __builtin_amdgcn_interp_p2_f16 <p1>, <j>, <attr_chan>, <attr>, <high>, <m0>
1228 def int_amdgcn_interp_p2_f16 :
1229 GCCBuiltin<"__builtin_amdgcn_interp_p2_f16">,
1230 Intrinsic<[llvm_half_ty],
1231 [llvm_float_ty, llvm_float_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i32_ty],
1232 [IntrNoMem, IntrSpeculatable, ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
1234 // Pixel shaders only: whether the current pixel is live (i.e. not a helper
1235 // invocation for derivative computation).
1236 def int_amdgcn_ps_live : Intrinsic <
1241 def int_amdgcn_mbcnt_lo :
1242 GCCBuiltin<"__builtin_amdgcn_mbcnt_lo">,
1243 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
1245 def int_amdgcn_mbcnt_hi :
1246 GCCBuiltin<"__builtin_amdgcn_mbcnt_hi">,
1247 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem]>;
1249 // llvm.amdgcn.ds.swizzle src offset
1250 def int_amdgcn_ds_swizzle :
1251 GCCBuiltin<"__builtin_amdgcn_ds_swizzle">,
1252 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
1253 [IntrNoMem, IntrConvergent, ImmArg<1>]>;
1255 def int_amdgcn_ubfe : Intrinsic<[llvm_anyint_ty],
1256 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
1257 [IntrNoMem, IntrSpeculatable]
1260 def int_amdgcn_sbfe : Intrinsic<[llvm_anyint_ty],
1261 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty],
1262 [IntrNoMem, IntrSpeculatable]
1265 def int_amdgcn_lerp :
1266 GCCBuiltin<"__builtin_amdgcn_lerp">,
1267 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1268 [IntrNoMem, IntrSpeculatable]
1271 def int_amdgcn_sad_u8 :
1272 GCCBuiltin<"__builtin_amdgcn_sad_u8">,
1273 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1274 [IntrNoMem, IntrSpeculatable]
1277 def int_amdgcn_msad_u8 :
1278 GCCBuiltin<"__builtin_amdgcn_msad_u8">,
1279 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1280 [IntrNoMem, IntrSpeculatable]
1283 def int_amdgcn_sad_hi_u8 :
1284 GCCBuiltin<"__builtin_amdgcn_sad_hi_u8">,
1285 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1286 [IntrNoMem, IntrSpeculatable]
1289 def int_amdgcn_sad_u16 :
1290 GCCBuiltin<"__builtin_amdgcn_sad_u16">,
1291 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1292 [IntrNoMem, IntrSpeculatable]
1295 def int_amdgcn_qsad_pk_u16_u8 :
1296 GCCBuiltin<"__builtin_amdgcn_qsad_pk_u16_u8">,
1297 Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
1298 [IntrNoMem, IntrSpeculatable]
1301 def int_amdgcn_mqsad_pk_u16_u8 :
1302 GCCBuiltin<"__builtin_amdgcn_mqsad_pk_u16_u8">,
1303 Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i32_ty, llvm_i64_ty],
1304 [IntrNoMem, IntrSpeculatable]
1307 def int_amdgcn_mqsad_u32_u8 :
1308 GCCBuiltin<"__builtin_amdgcn_mqsad_u32_u8">,
1309 Intrinsic<[llvm_v4i32_ty], [llvm_i64_ty, llvm_i32_ty, llvm_v4i32_ty],
1310 [IntrNoMem, IntrSpeculatable]
1313 def int_amdgcn_cvt_pk_u8_f32 :
1314 GCCBuiltin<"__builtin_amdgcn_cvt_pk_u8_f32">,
1315 Intrinsic<[llvm_i32_ty], [llvm_float_ty, llvm_i32_ty, llvm_i32_ty],
1316 [IntrNoMem, IntrSpeculatable]
1319 def int_amdgcn_icmp :
1320 Intrinsic<[llvm_anyint_ty], [llvm_anyint_ty, LLVMMatchType<1>, llvm_i32_ty],
1321 [IntrNoMem, IntrConvergent, ImmArg<2>]>;
1323 def int_amdgcn_fcmp :
1324 Intrinsic<[llvm_anyint_ty], [llvm_anyfloat_ty, LLVMMatchType<1>, llvm_i32_ty],
1325 [IntrNoMem, IntrConvergent, ImmArg<2>]>;
1327 def int_amdgcn_readfirstlane :
1328 GCCBuiltin<"__builtin_amdgcn_readfirstlane">,
1329 Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1331 // The lane argument must be uniform across the currently active threads of the
1332 // current wave. Otherwise, the result is undefined.
1333 def int_amdgcn_readlane :
1334 GCCBuiltin<"__builtin_amdgcn_readlane">,
1335 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1337 // The value to write and lane select arguments must be uniform across the
1338 // currently active threads of the current wave. Otherwise, the result is
1340 def int_amdgcn_writelane :
1341 GCCBuiltin<"__builtin_amdgcn_writelane">,
1342 Intrinsic<[llvm_i32_ty], [
1343 llvm_i32_ty, // uniform value to write: returned by the selected lane
1344 llvm_i32_ty, // uniform lane select
1345 llvm_i32_ty // returned by all lanes other than the selected one
1347 [IntrNoMem, IntrConvergent]
1350 def int_amdgcn_alignbit :
1351 GCCBuiltin<"__builtin_amdgcn_alignbit">, Intrinsic<[llvm_i32_ty],
1352 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1353 [IntrNoMem, IntrSpeculatable]
1356 def int_amdgcn_alignbyte : GCCBuiltin<"__builtin_amdgcn_alignbyte">,
1357 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1358 [IntrNoMem, IntrSpeculatable]
1361 def int_amdgcn_mul_i24 : Intrinsic<[llvm_i32_ty],
1362 [llvm_i32_ty, llvm_i32_ty],
1363 [IntrNoMem, IntrSpeculatable]
1366 def int_amdgcn_mul_u24 : Intrinsic<[llvm_i32_ty],
1367 [llvm_i32_ty, llvm_i32_ty],
1368 [IntrNoMem, IntrSpeculatable]
1371 // llvm.amdgcn.ds.gws.init(i32 bar_val, i32 resource_id)
1373 // bar_val is the total number of waves that will wait on this
1374 // barrier, minus 1.
1375 def int_amdgcn_ds_gws_init :
1376 GCCBuiltin<"__builtin_amdgcn_ds_gws_init">,
1378 [llvm_i32_ty, llvm_i32_ty],
1379 [IntrConvergent, IntrWriteMem, IntrInaccessibleMemOnly], "",
1383 // llvm.amdgcn.ds.gws.barrier(i32 vsrc0, i32 resource_id)
1384 // bar_val is the total number of waves that will wait on this
1385 // barrier, minus 1.
1386 def int_amdgcn_ds_gws_barrier :
1387 GCCBuiltin<"__builtin_amdgcn_ds_gws_barrier">,
1389 [llvm_i32_ty, llvm_i32_ty],
1390 [IntrConvergent, IntrInaccessibleMemOnly], "",
1394 // llvm.amdgcn.ds.gws.sema.v(i32 resource_id)
1395 def int_amdgcn_ds_gws_sema_v :
1396 GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_v">,
1399 [IntrConvergent, IntrInaccessibleMemOnly], "",
1403 // llvm.amdgcn.ds.gws.sema.br(i32 vsrc, i32 resource_id)
1404 def int_amdgcn_ds_gws_sema_br :
1405 GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_br">,
1407 [llvm_i32_ty, llvm_i32_ty],
1408 [IntrConvergent, IntrInaccessibleMemOnly], "",
1412 // llvm.amdgcn.ds.gws.sema.p(i32 resource_id)
1413 def int_amdgcn_ds_gws_sema_p :
1414 GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_p">,
1417 [IntrConvergent, IntrInaccessibleMemOnly], "",
1421 // llvm.amdgcn.ds.gws.sema.release.all(i32 resource_id)
1422 def int_amdgcn_ds_gws_sema_release_all :
1423 GCCBuiltin<"__builtin_amdgcn_ds_gws_sema_release_all">,
1426 [IntrConvergent, IntrInaccessibleMemOnly], "",
1431 // Copies the source value to the destination value, with the guarantee that
1432 // the source value is computed as if the entire program were executed in WQM.
1433 def int_amdgcn_wqm : Intrinsic<[llvm_any_ty],
1434 [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
1437 // Copies the source value to the destination value, such that the source
1438 // is computed as if the entire program were executed in WQM if any other
1439 // program code executes in WQM.
1440 def int_amdgcn_softwqm : Intrinsic<[llvm_any_ty],
1441 [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable]
1444 // Return true if at least one thread within the pixel quad passes true into
1446 def int_amdgcn_wqm_vote : Intrinsic<[llvm_i1_ty],
1447 [llvm_i1_ty], [IntrNoMem, IntrConvergent]
1450 // If false, set EXEC=0 for the current thread until the end of program.
1451 def int_amdgcn_kill : Intrinsic<[], [llvm_i1_ty], []>;
1453 // Copies the active channels of the source value to the destination value,
1454 // with the guarantee that the source value is computed as if the entire
1455 // program were executed in Whole Wavefront Mode, i.e. with all channels
1456 // enabled, with a few exceptions: - Phi nodes with require WWM return an
1458 def int_amdgcn_wwm : Intrinsic<[llvm_any_ty],
1459 [LLVMMatchType<0>], [IntrNoMem, IntrSpeculatable, IntrConvergent]
1462 // Given a value, copies it while setting all the inactive lanes to a given
1463 // value. Note that OpenGL helper lanes are considered active, so if the
1464 // program ever uses WQM, then the instruction and the first source will be
1466 def int_amdgcn_set_inactive :
1467 Intrinsic<[llvm_anyint_ty],
1468 [LLVMMatchType<0>, // value to be copied
1469 LLVMMatchType<0>], // value for the inactive lanes to take
1470 [IntrNoMem, IntrConvergent]>;
1472 //===----------------------------------------------------------------------===//
1474 //===----------------------------------------------------------------------===//
1476 def int_amdgcn_s_dcache_inv_vol :
1477 GCCBuiltin<"__builtin_amdgcn_s_dcache_inv_vol">,
1478 Intrinsic<[], [], []>;
1480 def int_amdgcn_buffer_wbinvl1_vol :
1481 GCCBuiltin<"__builtin_amdgcn_buffer_wbinvl1_vol">,
1482 Intrinsic<[], [], []>;
1484 //===----------------------------------------------------------------------===//
1486 //===----------------------------------------------------------------------===//
1488 // llvm.amdgcn.mov.dpp.i32 <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1489 def int_amdgcn_mov_dpp :
1490 Intrinsic<[llvm_anyint_ty],
1491 [LLVMMatchType<0>, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
1492 llvm_i1_ty], [IntrNoMem, IntrConvergent, ImmArg<1>,
1493 ImmArg<2>, ImmArg<3>, ImmArg<4>]>;
1495 // llvm.amdgcn.update.dpp.i32 <old> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1496 // Should be equivalent to:
1497 // v_mov_b32 <dest> <old>
1498 // v_mov_b32 <dest> <src> <dpp_ctrl> <row_mask> <bank_mask> <bound_ctrl>
1499 def int_amdgcn_update_dpp :
1500 Intrinsic<[llvm_anyint_ty],
1501 [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty,
1502 llvm_i32_ty, llvm_i32_ty, llvm_i1_ty],
1503 [IntrNoMem, IntrConvergent,
1504 ImmArg<2>, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1506 def int_amdgcn_s_dcache_wb :
1507 GCCBuiltin<"__builtin_amdgcn_s_dcache_wb">,
1508 Intrinsic<[], [], []>;
1510 def int_amdgcn_s_dcache_wb_vol :
1511 GCCBuiltin<"__builtin_amdgcn_s_dcache_wb_vol">,
1512 Intrinsic<[], [], []>;
1514 def int_amdgcn_s_memrealtime :
1515 GCCBuiltin<"__builtin_amdgcn_s_memrealtime">,
1516 Intrinsic<[llvm_i64_ty]>;
1518 // llvm.amdgcn.ds.permute <index> <src>
1519 def int_amdgcn_ds_permute :
1520 GCCBuiltin<"__builtin_amdgcn_ds_permute">,
1521 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1523 // llvm.amdgcn.ds.bpermute <index> <src>
1524 def int_amdgcn_ds_bpermute :
1525 GCCBuiltin<"__builtin_amdgcn_ds_bpermute">,
1526 Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>;
1528 //===----------------------------------------------------------------------===//
1530 //===----------------------------------------------------------------------===//
1532 // llvm.amdgcn.permlane16 <old> <src0> <src1> <src2> <fi> <bound_control>
1533 def int_amdgcn_permlane16 : GCCBuiltin<"__builtin_amdgcn_permlane16">,
1534 Intrinsic<[llvm_i32_ty],
1535 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
1536 [IntrNoMem, IntrConvergent, ImmArg<4>, ImmArg<5>]>;
1538 // llvm.amdgcn.permlanex16 <old> <src0> <src1> <src2> <fi> <bound_control>
1539 def int_amdgcn_permlanex16 : GCCBuiltin<"__builtin_amdgcn_permlanex16">,
1540 Intrinsic<[llvm_i32_ty],
1541 [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i1_ty, llvm_i1_ty],
1542 [IntrNoMem, IntrConvergent, ImmArg<4>, ImmArg<5>]>;
1544 // llvm.amdgcn.mov.dpp8.i32 <src> <sel>
1545 // <sel> is a 32-bit constant whose high 8 bits must be zero which selects
1546 // the lanes to read from.
1547 def int_amdgcn_mov_dpp8 :
1548 Intrinsic<[llvm_anyint_ty],
1549 [LLVMMatchType<0>, llvm_i32_ty],
1550 [IntrNoMem, IntrConvergent, ImmArg<1>]>;
1552 def int_amdgcn_s_get_waveid_in_workgroup :
1553 GCCBuiltin<"__builtin_amdgcn_s_get_waveid_in_workgroup">,
1554 Intrinsic<[llvm_i32_ty], [], [IntrReadMem, IntrInaccessibleMemOnly]>;
1556 //===----------------------------------------------------------------------===//
1557 // Deep learning intrinsics.
1558 //===----------------------------------------------------------------------===//
1560 // f32 %r = llvm.amdgcn.fdot2(v2f16 %a, v2f16 %b, f32 %c, i1 %clamp)
1561 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1562 def int_amdgcn_fdot2 :
1563 GCCBuiltin<"__builtin_amdgcn_fdot2">,
1565 [llvm_float_ty], // %r
1567 llvm_v2f16_ty, // %a
1568 llvm_v2f16_ty, // %b
1569 llvm_float_ty, // %c
1570 llvm_i1_ty // %clamp
1572 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1575 // i32 %r = llvm.amdgcn.sdot2(v2i16 %a, v2i16 %b, i32 %c, i1 %clamp)
1576 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1577 def int_amdgcn_sdot2 :
1578 GCCBuiltin<"__builtin_amdgcn_sdot2">,
1580 [llvm_i32_ty], // %r
1582 llvm_v2i16_ty, // %a
1583 llvm_v2i16_ty, // %b
1585 llvm_i1_ty // %clamp
1587 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1590 // u32 %r = llvm.amdgcn.udot2(v2u16 %a, v2u16 %b, u32 %c, i1 %clamp)
1591 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %c
1592 def int_amdgcn_udot2 :
1593 GCCBuiltin<"__builtin_amdgcn_udot2">,
1595 [llvm_i32_ty], // %r
1597 llvm_v2i16_ty, // %a
1598 llvm_v2i16_ty, // %b
1600 llvm_i1_ty // %clamp
1602 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1605 // i32 %r = llvm.amdgcn.sdot4(v4i8 (as i32) %a, v4i8 (as i32) %b, i32 %c, i1 %clamp)
1606 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
1607 def int_amdgcn_sdot4 :
1608 GCCBuiltin<"__builtin_amdgcn_sdot4">,
1610 [llvm_i32_ty], // %r
1615 llvm_i1_ty // %clamp
1617 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1620 // u32 %r = llvm.amdgcn.udot4(v4u8 (as u32) %a, v4u8 (as u32) %b, u32 %c, i1 %clamp)
1621 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] + %c
1622 def int_amdgcn_udot4 :
1623 GCCBuiltin<"__builtin_amdgcn_udot4">,
1625 [llvm_i32_ty], // %r
1630 llvm_i1_ty // %clamp
1632 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1635 // i32 %r = llvm.amdgcn.sdot8(v8i4 (as i32) %a, v8i4 (as i32) %b, i32 %c, i1 %clamp)
1636 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
1637 // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
1638 def int_amdgcn_sdot8 :
1639 GCCBuiltin<"__builtin_amdgcn_sdot8">,
1641 [llvm_i32_ty], // %r
1646 llvm_i1_ty // %clamp
1648 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1651 // u32 %r = llvm.amdgcn.udot8(v8u4 (as u32) %a, v8u4 (as u32) %b, u32 %c, i1 %clamp)
1652 // %r = %a[0] * %b[0] + %a[1] * %b[1] + %a[2] * %b[2] + %a[3] * %b[3] +
1653 // %a[4] * %b[4] + %a[5] * %b[5] + %a[6] * %b[6] + %a[7] * %b[7] + %c
1654 def int_amdgcn_udot8 :
1655 GCCBuiltin<"__builtin_amdgcn_udot8">,
1657 [llvm_i32_ty], // %r
1662 llvm_i1_ty // %clamp
1664 [IntrNoMem, IntrSpeculatable, ImmArg<3>]
1667 //===----------------------------------------------------------------------===//
1668 // gfx908 intrinsics
1669 // ===----------------------------------------------------------------------===//
1671 class AMDGPUBufferAtomicNoRtn : Intrinsic <
1673 [llvm_anyfloat_ty, // vdata(VGPR)
1674 llvm_v4i32_ty, // rsrc(SGPR)
1675 llvm_i32_ty, // vindex(VGPR)
1676 llvm_i32_ty, // offset(SGPR/VGPR/imm)
1677 llvm_i1_ty], // slc(imm)
1678 [], "", [SDNPMemOperand]>,
1679 AMDGPURsrcIntrinsic<1, 0>;
1681 class AMDGPUGlobalAtomicNoRtn : Intrinsic <
1683 [llvm_anyptr_ty, // vaddr
1684 llvm_anyfloat_ty], // vdata(VGPR)
1685 [IntrArgMemOnly, NoCapture<0>], "", [SDNPMemOperand]>;
1687 def int_amdgcn_buffer_atomic_fadd : AMDGPUBufferAtomicNoRtn;
1688 def int_amdgcn_global_atomic_fadd : AMDGPUGlobalAtomicNoRtn;
1690 // llvm.amdgcn.mfma.f32.* vdst, srcA, srcB, srcC, cbsz, abid, blgp
1691 def int_amdgcn_mfma_f32_32x32x1f32 : Intrinsic<[llvm_v32f32_ty],
1692 [llvm_float_ty, llvm_float_ty, llvm_v32f32_ty,
1693 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1694 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1696 def int_amdgcn_mfma_f32_16x16x1f32 : Intrinsic<[llvm_v16f32_ty],
1697 [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
1698 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1699 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1701 def int_amdgcn_mfma_f32_4x4x1f32 : Intrinsic<[llvm_v4f32_ty],
1702 [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
1703 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1704 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1706 def int_amdgcn_mfma_f32_32x32x2f32 : Intrinsic<[llvm_v16f32_ty],
1707 [llvm_float_ty, llvm_float_ty, llvm_v16f32_ty,
1708 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1709 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1711 def int_amdgcn_mfma_f32_16x16x4f32 : Intrinsic<[llvm_v4f32_ty],
1712 [llvm_float_ty, llvm_float_ty, llvm_v4f32_ty,
1713 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1714 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1716 def int_amdgcn_mfma_f32_32x32x4f16 : Intrinsic<[llvm_v32f32_ty],
1717 [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v32f32_ty,
1718 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1719 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1721 def int_amdgcn_mfma_f32_16x16x4f16 : Intrinsic<[llvm_v16f32_ty],
1722 [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
1723 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1724 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1726 def int_amdgcn_mfma_f32_4x4x4f16 : Intrinsic<[llvm_v4f32_ty],
1727 [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
1728 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1729 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1731 def int_amdgcn_mfma_f32_32x32x8f16 : Intrinsic<[llvm_v16f32_ty],
1732 [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v16f32_ty,
1733 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1734 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1736 def int_amdgcn_mfma_f32_16x16x16f16 : Intrinsic<[llvm_v4f32_ty],
1737 [llvm_v4f16_ty, llvm_v4f16_ty, llvm_v4f32_ty,
1738 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1739 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1741 def int_amdgcn_mfma_i32_32x32x4i8 : Intrinsic<[llvm_v32i32_ty],
1742 [llvm_i32_ty, llvm_i32_ty, llvm_v32i32_ty,
1743 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1744 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1746 def int_amdgcn_mfma_i32_16x16x4i8 : Intrinsic<[llvm_v16i32_ty],
1747 [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
1748 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1749 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1751 def int_amdgcn_mfma_i32_4x4x4i8 : Intrinsic<[llvm_v4i32_ty],
1752 [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
1753 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1754 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1756 def int_amdgcn_mfma_i32_32x32x8i8 : Intrinsic<[llvm_v16i32_ty],
1757 [llvm_i32_ty, llvm_i32_ty, llvm_v16i32_ty,
1758 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1759 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1761 def int_amdgcn_mfma_i32_16x16x16i8 : Intrinsic<[llvm_v4i32_ty],
1762 [llvm_i32_ty, llvm_i32_ty, llvm_v4i32_ty,
1763 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1764 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1766 def int_amdgcn_mfma_f32_32x32x2bf16 : Intrinsic<[llvm_v32f32_ty],
1767 [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v32f32_ty,
1768 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1769 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1771 def int_amdgcn_mfma_f32_16x16x2bf16 : Intrinsic<[llvm_v16f32_ty],
1772 [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
1773 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1774 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1776 def int_amdgcn_mfma_f32_4x4x2bf16 : Intrinsic<[llvm_v4f32_ty],
1777 [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
1778 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1779 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1781 def int_amdgcn_mfma_f32_32x32x4bf16 : Intrinsic<[llvm_v16f32_ty],
1782 [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v16f32_ty,
1783 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1784 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1786 def int_amdgcn_mfma_f32_16x16x8bf16 : Intrinsic<[llvm_v4f32_ty],
1787 [llvm_v2i16_ty, llvm_v2i16_ty, llvm_v4f32_ty,
1788 llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
1789 [IntrConvergent, IntrNoMem, ImmArg<3>, ImmArg<4>, ImmArg<5>]>;
1791 //===----------------------------------------------------------------------===//
1792 // Special Intrinsics for backend internal use only. No frontend
1793 // should emit calls to these.
1794 // ===----------------------------------------------------------------------===//
1795 def int_amdgcn_if : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
1796 [llvm_i1_ty], [IntrConvergent]
1799 def int_amdgcn_else : Intrinsic<[llvm_i1_ty, llvm_anyint_ty],
1800 [llvm_anyint_ty], [IntrConvergent]
1803 def int_amdgcn_if_break : Intrinsic<[llvm_anyint_ty],
1804 [llvm_i1_ty, llvm_anyint_ty], [IntrNoMem, IntrConvergent]
1807 def int_amdgcn_loop : Intrinsic<[llvm_i1_ty],
1808 [llvm_anyint_ty], [IntrConvergent]
1811 def int_amdgcn_end_cf : Intrinsic<[], [llvm_anyint_ty], [IntrConvergent]>;
1813 // Represent unreachable in a divergent region.
1814 def int_amdgcn_unreachable : Intrinsic<[], [], [IntrConvergent]>;
1816 // Emit 2.5 ulp, no denormal division. Should only be inserted by
1817 // pass based on !fpmath metadata.
1818 def int_amdgcn_fdiv_fast : Intrinsic<
1819 [llvm_float_ty], [llvm_float_ty, llvm_float_ty],
1820 [IntrNoMem, IntrSpeculatable]