1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 %s
4 ; Make sure that AMDGPUCodeGenPrepare introduces mul24 intrinsics
5 ; after SLSR, as the intrinsics would interfere. It's unclear if these
6 ; should be introduced before LSR or not. It seems to help in some
7 ; cases, and hurt others.
9 define void @lsr_order_mul24_0(i32 %arg, i32 %arg2, i32 %arg6, i32 %arg13, i32 %arg16) #0 {
10 ; GFX9-LABEL: lsr_order_mul24_0:
11 ; GFX9: ; %bb.0: ; %bb
12 ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
13 ; GFX9-NEXT: global_load_dword v5, v[0:1], off
14 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v2
15 ; GFX9-NEXT: v_sub_u32_e32 v4, v4, v1
16 ; GFX9-NEXT: s_mov_b64 s[4:5], 0
17 ; GFX9-NEXT: s_waitcnt vmcnt(0)
18 ; GFX9-NEXT: ds_write_b32 v0, v5
19 ; GFX9-NEXT: BB0_1: ; %bb23
20 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
21 ; GFX9-NEXT: v_mul_u32_u24_e32 v5, v0, v2
22 ; GFX9-NEXT: v_add_u32_e32 v0, v0, v1
23 ; GFX9-NEXT: v_sub_u32_e32 v5, v4, v5
24 ; GFX9-NEXT: v_add_u32_e32 v5, v5, v0
25 ; GFX9-NEXT: v_cmp_ge_u32_e32 vcc, v5, v3
26 ; GFX9-NEXT: s_or_b64 s[4:5], vcc, s[4:5]
27 ; GFX9-NEXT: s_andn2_b64 exec, exec, s[4:5]
28 ; GFX9-NEXT: s_cbranch_execnz BB0_1
29 ; GFX9-NEXT: ; %bb.2: ; %.loopexit
30 ; GFX9-NEXT: s_or_b64 exec, exec, s[4:5]
31 ; GFX9-NEXT: s_waitcnt lgkmcnt(0)
32 ; GFX9-NEXT: s_setpc_b64 s[30:31]
34 %tmp22 = and i32 %arg6, 16777215
37 .loopexit: ; preds = %bb23
40 bb23: ; preds = %bb23, %bb
41 %tmp24 = phi i32 [ %arg, %bb ], [ %tmp47, %bb23 ]
42 %tmp28 = and i32 %tmp24, 16777215
43 %tmp29 = mul i32 %tmp28, %tmp22
44 %tmp30 = sub i32 %tmp24, %tmp29
45 %tmp31 = add i32 %tmp30, %arg16
46 %tmp37 = icmp ult i32 %tmp31, %arg13
47 %tmp44 = load float, float addrspace(1)* undef, align 4
48 store float %tmp44, float addrspace(3)* undef, align 4
49 %tmp47 = add i32 %tmp24, %arg2
50 br i1 %tmp37, label %bb23, label %.loopexit
53 define void @lsr_order_mul24_1(i32 %arg, i32 %arg1, i32 %arg2, float addrspace(3)* nocapture %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 %arg7, i32 %arg8, i32 %arg9, float addrspace(1)* nocapture readonly %arg10, i32 %arg11, i32 %arg12, i32 %arg13, i32 %arg14, i32 %arg15, i32 %arg16, i1 zeroext %arg17, i1 zeroext %arg18) #0 {
54 ; GFX9-LABEL: lsr_order_mul24_1:
55 ; GFX9: ; %bb.0: ; %bb
56 ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
57 ; GFX9-NEXT: v_and_b32_e32 v5, 1, v18
58 ; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 1, v5
59 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[4:5], v0, v1
60 ; GFX9-NEXT: s_and_saveexec_b64 s[10:11], s[4:5]
61 ; GFX9-NEXT: ; mask branch BB1_4
62 ; GFX9-NEXT: s_cbranch_execz BB1_4
63 ; GFX9-NEXT: BB1_1: ; %bb19
64 ; GFX9-NEXT: v_cvt_f32_u32_e32 v7, v6
65 ; GFX9-NEXT: v_and_b32_e32 v5, 0xffffff, v6
66 ; GFX9-NEXT: v_add_u32_e32 v6, v4, v0
67 ; GFX9-NEXT: v_lshl_add_u32 v3, v6, 2, v3
68 ; GFX9-NEXT: v_rcp_iflag_f32_e32 v4, v7
69 ; GFX9-NEXT: v_lshlrev_b32_e32 v6, 2, v2
70 ; GFX9-NEXT: v_add_u32_e32 v7, v17, v12
71 ; GFX9-NEXT: s_mov_b64 s[12:13], 0
72 ; GFX9-NEXT: BB1_2: ; %bb23
73 ; GFX9-NEXT: ; =>This Inner Loop Header: Depth=1
74 ; GFX9-NEXT: v_cvt_f32_u32_e32 v8, v0
75 ; GFX9-NEXT: v_add_u32_e32 v9, v17, v0
76 ; GFX9-NEXT: v_add_u32_e32 v12, v7, v0
77 ; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
78 ; GFX9-NEXT: v_madak_f32 v8, v8, v4, 0x3727c5ac
79 ; GFX9-NEXT: v_cvt_u32_f32_e32 v8, v8
80 ; GFX9-NEXT: v_cmp_ge_u32_e64 s[4:5], v0, v1
81 ; GFX9-NEXT: v_mul_u32_u24_e32 v18, v8, v5
82 ; GFX9-NEXT: v_add_u32_e32 v8, v8, v16
83 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[6:7], v8, v13
84 ; GFX9-NEXT: v_mul_lo_u32 v8, v8, v15
85 ; GFX9-NEXT: v_sub_u32_e32 v19, v9, v18
86 ; GFX9-NEXT: v_cmp_lt_u32_e64 s[8:9], v19, v14
87 ; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], s[8:9]
88 ; GFX9-NEXT: v_sub_u32_e32 v12, v12, v18
89 ; GFX9-NEXT: s_and_b64 s[6:7], s[6:7], vcc
90 ; GFX9-NEXT: v_add_u32_e32 v8, v12, v8
91 ; GFX9-NEXT: v_mov_b32_e32 v9, 0
92 ; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[6:7]
93 ; GFX9-NEXT: v_lshlrev_b64 v[8:9], 2, v[8:9]
94 ; GFX9-NEXT: s_or_b64 s[12:13], s[4:5], s[12:13]
95 ; GFX9-NEXT: v_add_co_u32_e64 v8, s[4:5], v10, v8
96 ; GFX9-NEXT: v_addc_co_u32_e64 v9, s[4:5], v11, v9, s[4:5]
97 ; GFX9-NEXT: global_load_dword v8, v[8:9], off
98 ; GFX9-NEXT: s_waitcnt vmcnt(0)
99 ; GFX9-NEXT: v_cndmask_b32_e64 v8, 0, v8, s[6:7]
100 ; GFX9-NEXT: ds_write_b32 v3, v8
101 ; GFX9-NEXT: v_add_u32_e32 v3, v3, v6
102 ; GFX9-NEXT: s_andn2_b64 exec, exec, s[12:13]
103 ; GFX9-NEXT: s_cbranch_execnz BB1_2
104 ; GFX9-NEXT: ; %bb.3: ; %Flow
105 ; GFX9-NEXT: s_or_b64 exec, exec, s[12:13]
106 ; GFX9-NEXT: BB1_4: ; %Flow3
107 ; GFX9-NEXT: s_or_b64 exec, exec, s[10:11]
108 ; GFX9-NEXT: s_waitcnt lgkmcnt(0)
109 ; GFX9-NEXT: s_setpc_b64 s[30:31]
111 %tmp = icmp ult i32 %arg, %arg1
112 br i1 %tmp, label %bb19, label %.loopexit
115 %tmp20 = uitofp i32 %arg6 to float
116 %tmp21 = fdiv float 1.000000e+00, %tmp20
117 %tmp22 = and i32 %arg6, 16777215
120 .loopexit: ; preds = %bb23, %bb
123 bb23: ; preds = %bb19, %bb23
124 %tmp24 = phi i32 [ %arg, %bb19 ], [ %tmp47, %bb23 ]
125 %tmp25 = uitofp i32 %tmp24 to float
126 %tmp26 = tail call float @llvm.fmuladd.f32(float %tmp25, float %tmp21, float 0x3EE4F8B580000000) #2
127 %tmp27 = fptoui float %tmp26 to i32
128 %tmp28 = and i32 %tmp27, 16777215
129 %tmp29 = mul i32 %tmp28, %tmp22
130 %tmp30 = sub i32 %tmp24, %tmp29
131 %tmp31 = add i32 %tmp30, %arg16
132 %tmp32 = add i32 %tmp27, %arg15
133 %tmp33 = mul i32 %tmp32, %arg14
134 %tmp34 = add i32 %tmp33, %arg11
135 %tmp35 = add i32 %tmp34, %tmp31
136 %tmp36 = add i32 %tmp24, %arg4
137 %tmp37 = icmp ult i32 %tmp31, %arg13
138 %tmp38 = icmp ult i32 %tmp32, %arg12
139 %tmp39 = and i1 %tmp38, %tmp37
140 %tmp40 = and i1 %tmp39, %arg17
141 %tmp41 = zext i32 %tmp35 to i64
142 %tmp42 = select i1 %tmp40, i64 %tmp41, i64 0
143 %tmp43 = getelementptr inbounds float, float addrspace(1)* %arg10, i64 %tmp42
144 %tmp44 = load float, float addrspace(1)* %tmp43, align 4
145 %tmp45 = select i1 %tmp40, float %tmp44, float 0.000000e+00
146 %tmp46 = getelementptr inbounds float, float addrspace(3)* %arg3, i32 %tmp36
147 store float %tmp45, float addrspace(3)* %tmp46, align 4
148 %tmp47 = add i32 %tmp24, %arg2
149 %tmp48 = icmp ult i32 %tmp47, %arg1
150 br i1 %tmp48, label %bb23, label %.loopexit
153 define void @slsr1_0(i32 %b.arg, i32 %s.arg) #0 {
154 ; GFX9-LABEL: slsr1_0:
156 ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
157 ; GFX9-NEXT: v_mul_u32_u24_e32 v3, v0, v1
158 ; GFX9-NEXT: v_and_b32_e32 v2, 0xffffff, v1
159 ; GFX9-NEXT: global_store_dword v[0:1], v3, off
160 ; GFX9-NEXT: v_mad_u32_u24 v0, v0, v1, v2
161 ; GFX9-NEXT: global_store_dword v[0:1], v0, off
162 ; GFX9-NEXT: v_add_u32_e32 v0, v0, v2
163 ; GFX9-NEXT: global_store_dword v[0:1], v0, off
164 ; GFX9-NEXT: s_waitcnt vmcnt(0)
165 ; GFX9-NEXT: s_setpc_b64 s[30:31]
166 %b = and i32 %b.arg, 16777215
167 %s = and i32 %s.arg, 16777215
169 ; CHECK-LABEL: @slsr1(
171 %mul0 = mul i32 %b, %s
174 store volatile i32 %mul0, i32 addrspace(1)* undef
178 %mul1 = mul i32 %b1, %s
179 store volatile i32 %mul1, i32 addrspace(1)* undef
183 %mul2 = mul i32 %b2, %s
184 store volatile i32 %mul2, i32 addrspace(1)* undef
188 define void @slsr1_1(i32 %b.arg, i32 %s.arg) #0 {
189 ; GFX9-LABEL: slsr1_1:
191 ; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
192 ; GFX9-NEXT: s_or_saveexec_b64 s[4:5], -1
193 ; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
194 ; GFX9-NEXT: s_mov_b64 exec, s[4:5]
195 ; GFX9-NEXT: v_writelane_b32 v35, s34, 4
196 ; GFX9-NEXT: s_mov_b32 s34, s32
197 ; GFX9-NEXT: s_add_u32 s32, s32, 0x800
198 ; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s34 offset:8 ; 4-byte Folded Spill
199 ; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s34 offset:4 ; 4-byte Folded Spill
200 ; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s34 ; 4-byte Folded Spill
201 ; GFX9-NEXT: v_writelane_b32 v35, s36, 0
202 ; GFX9-NEXT: v_writelane_b32 v35, s37, 1
203 ; GFX9-NEXT: s_getpc_b64 s[4:5]
204 ; GFX9-NEXT: s_add_u32 s4, s4, foo@gotpcrel32@lo+4
205 ; GFX9-NEXT: s_addc_u32 s5, s5, foo@gotpcrel32@hi+4
206 ; GFX9-NEXT: s_load_dwordx2 s[36:37], s[4:5], 0x0
207 ; GFX9-NEXT: v_mov_b32_e32 v32, v1
208 ; GFX9-NEXT: v_mov_b32_e32 v33, v0
209 ; GFX9-NEXT: v_writelane_b32 v35, s30, 2
210 ; GFX9-NEXT: v_mul_u32_u24_e32 v0, v33, v32
211 ; GFX9-NEXT: v_writelane_b32 v35, s31, 3
212 ; GFX9-NEXT: v_and_b32_e32 v34, 0xffffff, v32
213 ; GFX9-NEXT: s_waitcnt lgkmcnt(0)
214 ; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
215 ; GFX9-NEXT: v_mad_u32_u24 v32, v33, v32, v34
216 ; GFX9-NEXT: v_mov_b32_e32 v0, v32
217 ; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
218 ; GFX9-NEXT: v_add_u32_e32 v0, v32, v34
219 ; GFX9-NEXT: s_swappc_b64 s[30:31], s[36:37]
220 ; GFX9-NEXT: v_readlane_b32 s4, v35, 2
221 ; GFX9-NEXT: v_readlane_b32 s5, v35, 3
222 ; GFX9-NEXT: v_readlane_b32 s37, v35, 1
223 ; GFX9-NEXT: v_readlane_b32 s36, v35, 0
224 ; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s34 ; 4-byte Folded Reload
225 ; GFX9-NEXT: buffer_load_dword v33, off, s[0:3], s34 offset:4 ; 4-byte Folded Reload
226 ; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s34 offset:8 ; 4-byte Folded Reload
227 ; GFX9-NEXT: s_sub_u32 s32, s32, 0x800
228 ; GFX9-NEXT: v_readlane_b32 s34, v35, 4
229 ; GFX9-NEXT: s_or_saveexec_b64 s[6:7], -1
230 ; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
231 ; GFX9-NEXT: s_mov_b64 exec, s[6:7]
232 ; GFX9-NEXT: s_waitcnt vmcnt(0)
233 ; GFX9-NEXT: s_setpc_b64 s[4:5]
234 %b = and i32 %b.arg, 16777215
235 %s = and i32 %s.arg, 16777215
237 ; CHECK-LABEL: @slsr1(
239 %mul0 = mul i32 %b, %s
242 call void @foo(i32 %mul0)
246 %mul1 = mul i32 %b1, %s
247 call void @foo(i32 %mul1)
251 %mul2 = mul i32 %b2, %s
252 call void @foo(i32 %mul2)
257 declare void @foo(i32) #0
258 declare float @llvm.fmuladd.f32(float, float, float) #1
260 attributes #0 = { nounwind willreturn }
261 attributes #1 = { nounwind readnone speculatable }