1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX10 %s
3 ; RUN: llc -march=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX11 %s
5 define amdgpu_ps float @_amdgpu_ps_main() #0 {
6 ; GFX10-LABEL: _amdgpu_ps_main:
7 ; GFX10: ; %bb.0: ; %.entry
8 ; GFX10-NEXT: image_sample v[0:1], v[0:1], s[0:7], s[0:3] dmask:0x3 dim:SQ_RSRC_IMG_2D
9 ; GFX10-NEXT: v_mov_b32_e32 v4, 0
10 ; GFX10-NEXT: v_mov_b32_e32 v7, 0x3ca3d70a
11 ; GFX10-NEXT: s_waitcnt vmcnt(0)
12 ; GFX10-NEXT: s_clause 0x1
13 ; GFX10-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
14 ; GFX10-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
15 ; GFX10-NEXT: s_waitcnt vmcnt(0)
16 ; GFX10-NEXT: image_load_mip v4, v[2:4], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
17 ; GFX10-NEXT: s_clause 0x3
18 ; GFX10-NEXT: s_buffer_load_dword s24, s[0:3], 0x5c
19 ; GFX10-NEXT: s_buffer_load_dword s28, s[0:3], 0x7c
20 ; GFX10-NEXT: s_buffer_load_dword s29, s[0:3], 0xc0
21 ; GFX10-NEXT: s_waitcnt_depctr 0xffe3
23 ; GFX10-NEXT: s_buffer_load_dwordx4 s[0:3], s[0:3], 0x40
24 ; GFX10-NEXT: s_waitcnt lgkmcnt(0)
25 ; GFX10-NEXT: s_clause 0x1
26 ; GFX10-NEXT: s_buffer_load_dwordx4 s[4:7], s[0:3], 0x50
28 ; GFX10-NEXT: s_buffer_load_dword s0, s[0:3], 0x2c
29 ; GFX10-NEXT: v_sub_f32_e64 v5, s24, s28
30 ; GFX10-NEXT: s_waitcnt lgkmcnt(0)
31 ; GFX10-NEXT: s_clause 0x4
32 ; GFX10-NEXT: s_buffer_load_dwordx4 s[8:11], s[0:3], 0x60
33 ; GFX10-NEXT: s_buffer_load_dwordx4 s[12:15], s[0:3], 0x20
34 ; GFX10-NEXT: s_buffer_load_dwordx4 s[16:19], s[0:3], 0x0
35 ; GFX10-NEXT: s_buffer_load_dwordx4 s[20:23], s[0:3], 0x70
36 ; GFX10-NEXT: s_buffer_load_dwordx4 s[24:27], s[0:3], 0x10
37 ; GFX10-NEXT: v_fma_f32 v1, v1, v5, s28
38 ; GFX10-NEXT: v_max_f32_e64 v6, s0, s0 clamp
39 ; GFX10-NEXT: v_add_f32_e64 v5, s29, -1.0
40 ; GFX10-NEXT: v_sub_f32_e32 v9, s0, v1
41 ; GFX10-NEXT: v_fma_f32 v8, -s2, v6, s6
42 ; GFX10-NEXT: v_fma_f32 v5, v6, v5, 1.0
43 ; GFX10-NEXT: v_mad_f32 v11, s2, v6, v2
44 ; GFX10-NEXT: v_fmac_f32_e32 v1, v6, v9
45 ; GFX10-NEXT: v_fmac_f32_e32 v11, v8, v6
46 ; GFX10-NEXT: s_waitcnt lgkmcnt(0)
47 ; GFX10-NEXT: v_mul_f32_e32 v10, s10, v0
48 ; GFX10-NEXT: v_fma_f32 v0, -v0, s10, s14
49 ; GFX10-NEXT: v_mul_f32_e32 v9, s18, v2
50 ; GFX10-NEXT: v_mul_f32_e32 v3, s22, v3
51 ; GFX10-NEXT: v_fmac_f32_e32 v10, v0, v6
52 ; GFX10-NEXT: v_sub_f32_e32 v0, v1, v5
53 ; GFX10-NEXT: v_mul_f32_e32 v1, v9, v6
54 ; GFX10-NEXT: v_mul_f32_e32 v8, v6, v3
55 ; GFX10-NEXT: v_fma_f32 v3, -v6, v3, v10
56 ; GFX10-NEXT: v_fmac_f32_e32 v5, v0, v6
57 ; GFX10-NEXT: v_fma_f32 v0, v2, s26, -v1
58 ; GFX10-NEXT: v_fmac_f32_e32 v8, v3, v6
59 ; GFX10-NEXT: v_fmac_f32_e32 v1, v0, v6
60 ; GFX10-NEXT: v_mul_f32_e32 v0, v2, v6
61 ; GFX10-NEXT: s_waitcnt vmcnt(0)
62 ; GFX10-NEXT: v_add_f32_e32 v4, v4, v11
63 ; GFX10-NEXT: v_mul_f32_e32 v3, v4, v6
64 ; GFX10-NEXT: v_fmamk_f32 v4, v5, 0x3c23d70a, v7
65 ; GFX10-NEXT: v_mul_f32_e32 v1, v3, v1
66 ; GFX10-NEXT: v_mul_f32_e32 v2, v8, v4
67 ; GFX10-NEXT: v_fmac_f32_e32 v1, v2, v0
68 ; GFX10-NEXT: v_max_f32_e32 v0, 0, v1
69 ; GFX10-NEXT: ; return to shader part epilog
71 ; GFX11-LABEL: _amdgpu_ps_main:
72 ; GFX11: ; %bb.0: ; %.entry
73 ; GFX11-NEXT: image_sample v[0:1], v[0:1], s[0:7], s[0:3] dmask:0x3 dim:SQ_RSRC_IMG_2D
74 ; GFX11-NEXT: v_dual_mov_b32 v4, 0 :: v_dual_mov_b32 v7, 0x3ca3d70a
75 ; GFX11-NEXT: s_waitcnt vmcnt(0)
76 ; GFX11-NEXT: s_clause 0x1
77 ; GFX11-NEXT: image_sample v2, v[0:1], s[0:7], s[0:3] dmask:0x4 dim:SQ_RSRC_IMG_2D
78 ; GFX11-NEXT: image_sample v3, v[0:1], s[0:7], s[0:3] dmask:0x1 dim:SQ_RSRC_IMG_2D
79 ; GFX11-NEXT: s_waitcnt vmcnt(0)
80 ; GFX11-NEXT: image_load_mip v4, v[2:4], s[0:7] dmask:0x4 dim:SQ_RSRC_IMG_2D unorm
81 ; GFX11-NEXT: s_clause 0x3
82 ; GFX11-NEXT: s_buffer_load_b32 s24, s[0:3], 0x5c
83 ; GFX11-NEXT: s_buffer_load_b32 s28, s[0:3], 0x7c
84 ; GFX11-NEXT: s_buffer_load_b32 s29, s[0:3], 0xc0
85 ; GFX11-NEXT: s_buffer_load_b128 s[0:3], s[0:3], 0x40
86 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
87 ; GFX11-NEXT: s_clause 0x1
88 ; GFX11-NEXT: s_buffer_load_b128 s[4:7], s[0:3], 0x50
89 ; GFX11-NEXT: s_buffer_load_b32 s0, s[0:3], 0x2c
90 ; GFX11-NEXT: v_sub_f32_e64 v5, s24, s28
91 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
92 ; GFX11-NEXT: s_clause 0x3
93 ; GFX11-NEXT: s_buffer_load_b128 s[8:11], s[0:3], 0x60
94 ; GFX11-NEXT: s_buffer_load_b128 s[12:15], s[0:3], 0x20
95 ; GFX11-NEXT: s_buffer_load_b128 s[16:19], s[0:3], 0x0
96 ; GFX11-NEXT: s_buffer_load_b128 s[20:23], s[0:3], 0x70
97 ; GFX11-NEXT: v_fma_f32 v1, v1, v5, s28
98 ; GFX11-NEXT: v_max_f32_e64 v6, s0, s0 clamp
99 ; GFX11-NEXT: v_add_f32_e64 v5, s29, -1.0
100 ; GFX11-NEXT: s_buffer_load_b128 s[24:27], s[0:3], 0x10
101 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
102 ; GFX11-NEXT: v_sub_f32_e32 v9, s0, v1
103 ; GFX11-NEXT: v_fma_f32 v8, -s2, v6, s6
104 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
105 ; GFX11-NEXT: v_fma_f32 v5, v6, v5, 1.0
106 ; GFX11-NEXT: v_fma_f32 v11, s2, v6, v2
107 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
108 ; GFX11-NEXT: v_mul_f32_e32 v10, s10, v0
109 ; GFX11-NEXT: v_fma_f32 v0, -v0, s10, s14
110 ; GFX11-NEXT: v_fmac_f32_e32 v1, v6, v9
111 ; GFX11-NEXT: v_mul_f32_e32 v9, s18, v2
112 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
113 ; GFX11-NEXT: v_fmac_f32_e32 v10, v0, v6
114 ; GFX11-NEXT: v_sub_f32_e32 v0, v1, v5
115 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
116 ; GFX11-NEXT: v_fmac_f32_e32 v5, v0, v6
117 ; GFX11-NEXT: v_mul_f32_e32 v3, s22, v3
118 ; GFX11-NEXT: v_dual_fmac_f32 v11, v8, v6 :: v_dual_mul_f32 v8, v6, v3
119 ; GFX11-NEXT: v_mul_f32_e32 v1, v9, v6
120 ; GFX11-NEXT: v_fma_f32 v3, -v6, v3, v10
121 ; GFX11-NEXT: s_waitcnt vmcnt(0)
122 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
123 ; GFX11-NEXT: v_add_f32_e32 v4, v4, v11
124 ; GFX11-NEXT: v_fma_f32 v0, v2, s26, -v1
125 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_3) | instid1(VALU_DEP_1)
126 ; GFX11-NEXT: v_fmac_f32_e32 v1, v0, v6
127 ; GFX11-NEXT: v_mul_f32_e32 v0, v2, v6
128 ; GFX11-NEXT: v_fmac_f32_e32 v8, v3, v6
129 ; GFX11-NEXT: v_dual_mul_f32 v3, v4, v6 :: v_dual_fmamk_f32 v4, v5, 0x3c23d70a, v7
130 ; GFX11-NEXT: v_dual_mul_f32 v1, v3, v1 :: v_dual_mul_f32 v2, v8, v4
131 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
132 ; GFX11-NEXT: v_fmac_f32_e32 v1, v2, v0
133 ; GFX11-NEXT: v_max_f32_e32 v0, 0, v1
134 ; GFX11-NEXT: ; return to shader part epilog
136 %0 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
137 %.i2243 = extractelement <3 x float> %0, i32 2
138 %1 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 0, i32 0)
139 %2 = shufflevector <3 x i32> %1, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
140 %3 = bitcast <4 x i32> %2 to <4 x float>
141 %.i2248 = extractelement <4 x float> %3, i32 2
142 %.i2249 = fmul reassoc nnan nsz arcp contract afn float %.i2243, %.i2248
143 %4 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00)
144 %5 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
145 %.i2333 = extractelement <3 x float> %5, i32 2
146 %6 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00)
147 %7 = call <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 3, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
148 %.i1408 = extractelement <2 x float> %7, i32 1
149 %.i0364 = extractelement <2 x float> %7, i32 0
150 %8 = call float @llvm.amdgcn.image.sample.2d.f32.f32(i32 1, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
151 %9 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 112, i32 0)
152 %10 = shufflevector <3 x i32> %9, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
153 %11 = bitcast <4 x i32> %10 to <4 x float>
154 %.i2360 = extractelement <4 x float> %11, i32 2
155 %.i2363 = fmul reassoc nnan nsz arcp contract afn float %.i2360, %8
156 %12 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 96, i32 0)
157 %13 = shufflevector <3 x i32> %12, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
158 %14 = bitcast <4 x i32> %13 to <4 x float>
159 %.i2367 = extractelement <4 x float> %14, i32 2
160 %.i2370 = fmul reassoc nnan nsz arcp contract afn float %.i0364, %.i2367
161 %15 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 32, i32 0)
162 %16 = shufflevector <3 x i32> %15, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
163 %17 = bitcast <4 x i32> %16 to <4 x float>
164 %.i2373 = extractelement <4 x float> %17, i32 2
165 %.i2376 = fsub reassoc nnan nsz arcp contract afn float %.i2373, %.i2370
166 %.i2383 = fmul reassoc nnan nsz arcp contract afn float %.i2376, %6
167 %.i2386 = fadd reassoc nnan nsz arcp contract afn float %.i2370, %.i2383
168 %18 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00)
169 %19 = fmul reassoc nnan nsz arcp contract afn float %18, %.i2363
170 %.i2394 = fsub reassoc nnan nsz arcp contract afn float %.i2386, %19
171 %.i2397 = fmul reassoc nnan nsz arcp contract afn float %.i2363, %18
172 %.i2404 = fmul reassoc nnan nsz arcp contract afn float %.i2394, %4
173 %.i2407 = fadd reassoc nnan nsz arcp contract afn float %.i2397, %.i2404
174 %20 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 92, i32 0)
175 %21 = bitcast i32 %20 to float
176 %22 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 124, i32 0)
177 %23 = bitcast i32 %22 to float
178 %24 = fsub reassoc nnan nsz arcp contract afn float %21, %23
179 %25 = fmul reassoc nnan nsz arcp contract afn float %.i1408, %24
180 %26 = fadd reassoc nnan nsz arcp contract afn float %25, %23
181 %27 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 44, i32 0)
182 %28 = bitcast i32 %27 to float
183 %29 = fsub reassoc nnan nsz arcp contract afn float %28, %26
184 %30 = fmul reassoc nnan nsz arcp contract afn float %6, %29
185 %31 = fadd reassoc nnan nsz arcp contract afn float %26, %30
186 %32 = call i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32> undef, i32 192, i32 0)
187 %33 = bitcast i32 %32 to float
188 %34 = fadd reassoc nnan nsz arcp contract afn float %33, -1.000000e+00
189 %35 = fmul reassoc nnan nsz arcp contract afn float %18, %34
190 %36 = fadd reassoc nnan nsz arcp contract afn float %35, 1.000000e+00
191 %37 = fsub reassoc nnan nsz arcp contract afn float %31, %36
192 %38 = fmul reassoc nnan nsz arcp contract afn float %37, %4
193 %39 = fadd reassoc nnan nsz arcp contract afn float %36, %38
194 %40 = fmul reassoc nnan nsz arcp contract afn float %39, 0x3F847AE140000000
195 %41 = fadd reassoc nnan nsz arcp contract afn float %40, 0x3F947AE140000000
196 %.i2415 = fmul reassoc nnan nsz arcp contract afn float %.i2407, %41
197 %42 = call <3 x float> @llvm.amdgcn.image.load.mip.2d.v3f32.i32(i32 7, i32 undef, i32 undef, i32 0, <8 x i32> undef, i32 0, i32 0)
198 %.i2521 = extractelement <3 x float> %42, i32 2
199 %43 = call reassoc nnan nsz arcp contract afn float @llvm.amdgcn.fmed3.f32(float undef, float 0.000000e+00, float 1.000000e+00)
200 %44 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
201 %.i2465 = extractelement <3 x float> %44, i32 2
202 %.i2466 = fmul reassoc nnan nsz arcp contract afn float %.i2465, %43
203 %.i2469 = fmul reassoc nnan nsz arcp contract afn float %.i2415, %.i2466
204 %45 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 64, i32 0)
205 %46 = shufflevector <3 x i32> %45, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
206 %47 = bitcast <4 x i32> %46 to <4 x float>
207 %.i2476 = extractelement <4 x float> %47, i32 2
208 %.i2479 = fmul reassoc nnan nsz arcp contract afn float %.i2476, %18
209 %48 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 80, i32 0)
210 %49 = shufflevector <3 x i32> %48, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
211 %50 = bitcast <4 x i32> %49 to <4 x float>
212 %.i2482 = extractelement <4 x float> %50, i32 2
213 %.i2485 = fsub reassoc nnan nsz arcp contract afn float %.i2482, %.i2479
214 %.i2488 = fmul reassoc nnan nsz arcp contract afn float %.i2249, %18
215 %.i2491 = fmul reassoc nnan nsz arcp contract afn float %.i2485, %4
216 %.i2494 = fadd reassoc nnan nsz arcp contract afn float %.i2479, %.i2491
217 %51 = call <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 7, float undef, float undef, <8 x i32> undef, <4 x i32> undef, i1 false, i32 0, i32 0)
218 %.i2515 = extractelement <3 x float> %51, i32 2
219 %.i2516 = fadd reassoc nnan nsz arcp contract afn float %.i2515, %.i2494
220 %.i2522 = fadd reassoc nnan nsz arcp contract afn float %.i2521, %.i2516
221 %.i2525 = fmul reassoc nnan nsz arcp contract afn float %.i2522, %43
222 %52 = call <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32> undef, i32 16, i32 0)
223 %53 = shufflevector <3 x i32> %52, <3 x i32> poison, <4 x i32> <i32 0, i32 1, i32 2, i32 undef>
224 %54 = bitcast <4 x i32> %53 to <4 x float>
225 %.i2530 = extractelement <4 x float> %54, i32 2
226 %.i2531 = fmul reassoc nnan nsz arcp contract afn float %.i2333, %.i2530
227 %.i2536 = fsub reassoc nnan nsz arcp contract afn float %.i2531, %.i2488
228 %.i2539 = fmul reassoc nnan nsz arcp contract afn float %.i2536, %4
229 %.i2542 = fadd reassoc nnan nsz arcp contract afn float %.i2488, %.i2539
230 %.i2545 = fmul reassoc nnan nsz arcp contract afn float %.i2525, %.i2542
231 %.i2548 = fadd reassoc nnan nsz arcp contract afn float %.i2469, %.i2545
232 %.i2551 = call reassoc nnan nsz arcp contract afn float @llvm.maxnum.f32(float %.i2548, float 0.000000e+00)
236 define float @fmac_sequence_simple(float %a, float %b, float %c, float %d, float %e) #0 {
237 ; GFX10-LABEL: fmac_sequence_simple:
239 ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
240 ; GFX10-NEXT: v_fma_f32 v2, v2, v3, v4
241 ; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
242 ; GFX10-NEXT: v_mov_b32_e32 v0, v2
243 ; GFX10-NEXT: s_setpc_b64 s[30:31]
245 ; GFX11-LABEL: fmac_sequence_simple:
247 ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
248 ; GFX11-NEXT: v_fma_f32 v2, v2, v3, v4
249 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
250 ; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v1
251 ; GFX11-NEXT: v_mov_b32_e32 v0, v2
252 ; GFX11-NEXT: s_setpc_b64 s[30:31]
253 %t0 = fmul fast float %a, %b
254 %t1 = fmul fast float %c, %d
255 %t2 = fadd fast float %t0, %t1
256 %t5 = fadd fast float %t2, %e
260 define float @fmac_sequence_innermost_fmul(float %a, float %b, float %c, float %d, float %e, float %f, float %g) #0 {
261 ; GFX10-LABEL: fmac_sequence_innermost_fmul:
263 ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
264 ; GFX10-NEXT: v_mad_f32 v2, v2, v3, v6
265 ; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
266 ; GFX10-NEXT: v_fmac_f32_e32 v2, v4, v5
267 ; GFX10-NEXT: v_mov_b32_e32 v0, v2
268 ; GFX10-NEXT: s_setpc_b64 s[30:31]
270 ; GFX11-LABEL: fmac_sequence_innermost_fmul:
272 ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
273 ; GFX11-NEXT: v_fma_f32 v2, v2, v3, v6
274 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
275 ; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v1
276 ; GFX11-NEXT: v_fmac_f32_e32 v2, v4, v5
277 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
278 ; GFX11-NEXT: v_mov_b32_e32 v0, v2
279 ; GFX11-NEXT: s_setpc_b64 s[30:31]
280 %t0 = fmul fast float %a, %b
281 %t1 = fmul fast float %c, %d
282 %t2 = fadd fast float %t0, %t1
283 %t3 = fmul fast float %e, %f
284 %t4 = fadd fast float %t2, %t3
285 %t5 = fadd fast float %t4, %g
289 define float @fmac_sequence_innermost_fmul_swapped_operands(float %a, float %b, float %c, float %d, float %e, float %f, float %g) #0 {
290 ; GFX10-LABEL: fmac_sequence_innermost_fmul_swapped_operands:
292 ; GFX10-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
293 ; GFX10-NEXT: v_mad_f32 v2, v2, v3, v6
294 ; GFX10-NEXT: v_fmac_f32_e32 v2, v0, v1
295 ; GFX10-NEXT: v_fmac_f32_e32 v2, v4, v5
296 ; GFX10-NEXT: v_mov_b32_e32 v0, v2
297 ; GFX10-NEXT: s_setpc_b64 s[30:31]
299 ; GFX11-LABEL: fmac_sequence_innermost_fmul_swapped_operands:
301 ; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
302 ; GFX11-NEXT: v_fma_f32 v2, v2, v3, v6
303 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
304 ; GFX11-NEXT: v_fmac_f32_e32 v2, v0, v1
305 ; GFX11-NEXT: v_fmac_f32_e32 v2, v4, v5
306 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
307 ; GFX11-NEXT: v_mov_b32_e32 v0, v2
308 ; GFX11-NEXT: s_setpc_b64 s[30:31]
309 %t0 = fmul fast float %a, %b
310 %t1 = fmul fast float %c, %d
311 %t2 = fadd fast float %t0, %t1
312 %t3 = fmul fast float %e, %f
313 %t4 = fadd fast float %t2, %t3
314 %t5 = fadd fast float %g, %t4
318 define amdgpu_ps float @fmac_sequence_innermost_fmul_sgpr(float inreg %a, float inreg %b, float inreg %c, float inreg %d, float inreg %e, float inreg %f, float %g) #0 {
319 ; GFX10-LABEL: fmac_sequence_innermost_fmul_sgpr:
321 ; GFX10-NEXT: v_mac_f32_e64 v0, s2, s3
322 ; GFX10-NEXT: v_fmac_f32_e64 v0, s0, s1
323 ; GFX10-NEXT: v_fmac_f32_e64 v0, s4, s5
324 ; GFX10-NEXT: ; return to shader part epilog
326 ; GFX11-LABEL: fmac_sequence_innermost_fmul_sgpr:
328 ; GFX11-NEXT: v_fmac_f32_e64 v0, s2, s3
329 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
330 ; GFX11-NEXT: v_fmac_f32_e64 v0, s0, s1
331 ; GFX11-NEXT: v_fmac_f32_e64 v0, s4, s5
332 ; GFX11-NEXT: ; return to shader part epilog
333 %t0 = fmul fast float %a, %b
334 %t1 = fmul fast float %c, %d
335 %t2 = fadd fast float %t0, %t1
336 %t3 = fmul fast float %e, %f
337 %t4 = fadd fast float %t2, %t3
338 %t5 = fadd fast float %t4, %g
342 define amdgpu_ps float @fmac_sequence_innermost_fmul_multiple_use(float inreg %a, float inreg %b, float inreg %c, float inreg %d, float inreg %e, float inreg %f, float %g) #0 {
343 ; GFX10-LABEL: fmac_sequence_innermost_fmul_multiple_use:
345 ; GFX10-NEXT: v_mul_f32_e64 v1, s2, s3
346 ; GFX10-NEXT: v_fmac_f32_e64 v1, s0, s1
347 ; GFX10-NEXT: v_fma_f32 v2, s5, s4, v1
348 ; GFX10-NEXT: v_fmac_f32_e32 v1, s5, v2
349 ; GFX10-NEXT: v_add_f32_e32 v0, v1, v0
350 ; GFX10-NEXT: ; return to shader part epilog
352 ; GFX11-LABEL: fmac_sequence_innermost_fmul_multiple_use:
354 ; GFX11-NEXT: v_mul_f32_e64 v1, s2, s3
355 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
356 ; GFX11-NEXT: v_fmac_f32_e64 v1, s0, s1
357 ; GFX11-NEXT: v_fma_f32 v2, s5, s4, v1
358 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
359 ; GFX11-NEXT: v_fmac_f32_e32 v1, s5, v2
360 ; GFX11-NEXT: v_add_f32_e32 v0, v1, v0
361 ; GFX11-NEXT: ; return to shader part epilog
362 %t0 = fmul fast float %a, %b
363 %t1 = fmul fast float %c, %d
364 %t2 = fadd fast float %t0, %t1
365 %t3 = fmul fast float %e, %f
366 %t4 = fadd fast float %t2, %t3
367 %t5 = fmul fast float %f, %t4
368 %t6 = fadd fast float %t5, %t2
369 %t7 = fadd fast float %t6, %g
373 ; "fmul %m, 2.0" could select to an FMA instruction, but it is no better than
374 ; selecting it as a multiply. In some cases the multiply is better because
375 ; SIFoldOperands can fold it into a previous instruction as an output modifier.
376 define amdgpu_ps float @fma_vs_output_modifier(float %x, i32 %n) #0 {
377 ; GFX10-LABEL: fma_vs_output_modifier:
379 ; GFX10-NEXT: v_cvt_f32_i32_e64 v1, v1 mul:2
380 ; GFX10-NEXT: v_mul_f32_e32 v0, v0, v0
381 ; GFX10-NEXT: v_mul_f32_e32 v0, v0, v1
382 ; GFX10-NEXT: ; return to shader part epilog
384 ; GFX11-LABEL: fma_vs_output_modifier:
386 ; GFX11-NEXT: v_cvt_f32_i32_e64 v1, v1 mul:2
387 ; GFX11-NEXT: v_mul_f32_e32 v0, v0, v0
388 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
389 ; GFX11-NEXT: v_mul_f32_e32 v0, v0, v1
390 ; GFX11-NEXT: ; return to shader part epilog
391 %s = sitofp i32 %n to float
392 %m = fmul contract float %x, %x
393 %a = fmul contract float %m, 2.0
394 %r = fmul reassoc nsz float %a, %s
398 define amdgpu_ps float @fma_vs_output_modifier_2(float %x) #0 {
399 ; GCN-LABEL: fma_vs_output_modifier_2:
401 ; GCN-NEXT: v_mul_f32_e64 v0, v0, v0 mul:2
402 ; GCN-NEXT: ; return to shader part epilog
403 %m = fmul contract float %x, %x
404 %a = fadd nsz contract float %m, %m
408 ; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
409 declare float @llvm.maxnum.f32(float, float) #1
411 ; Function Attrs: nounwind readnone speculatable willreturn
412 declare float @llvm.amdgcn.fmed3.f32(float, float, float) #2
414 ; Function Attrs: nounwind readonly willreturn
415 declare <2 x float> @llvm.amdgcn.image.sample.2d.v2f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3
417 ; Function Attrs: nounwind readonly willreturn
418 declare float @llvm.amdgcn.image.sample.2d.f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3
420 ; Function Attrs: nounwind readonly willreturn
421 declare <3 x float> @llvm.amdgcn.image.sample.2d.v3f32.f32(i32 immarg, float, float, <8 x i32>, <4 x i32>, i1 immarg, i32 immarg, i32 immarg) #3
423 ; Function Attrs: nounwind readonly willreturn
424 declare <3 x float> @llvm.amdgcn.image.load.mip.2d.v3f32.i32(i32 immarg, i32, i32, i32, <8 x i32>, i32 immarg, i32 immarg) #3
426 ; Function Attrs: nounwind readnone willreturn
427 declare i32 @llvm.amdgcn.s.buffer.load.i32(<4 x i32>, i32, i32 immarg) #3
429 ; Function Attrs: nounwind readnone willreturn
430 declare <3 x i32> @llvm.amdgcn.s.buffer.load.v3i32(<4 x i32>, i32, i32 immarg) #3
432 attributes #0 = { "denormal-fp-math-f32"="preserve-sign" }
433 attributes #1 = { nofree nosync nounwind readnone speculatable willreturn }
434 attributes #2 = { nounwind readnone speculatable willreturn }
435 attributes #3 = { nounwind readonly willreturn }