1 ; RUN: llc < %s -march=r600 -mcpu=redwood | FileCheck %s --check-prefix=EG --check-prefix=FUNC
2 ; RUN: llc < %s -march=r600 -mcpu=cayman | FileCheck %s --check-prefix=EG --check-prefix=FUNC
3 ; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s --check-prefix=SI --check-prefix=FUNC --check-prefix=GCN
4 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
5 ; RUN: llc < %s -march=amdgcn -mcpu=fiji -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s --check-prefix=VI --check-prefix=FUNC --check-prefix=GCN --check-prefix=GCN2
7 declare i32 @llvm.amdgcn.workitem.id.x() nounwind readnone
9 ; FUNC-LABEL: {{^}}u32_mad24:
13 ; VI: s_mul_{{[iu]}}32
14 ; VI: s_add_{{[iu]}}32
16 define amdgpu_kernel void @u32_mad24(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c) {
19 %a_24 = lshr i32 %0, 8
21 %b_24 = lshr i32 %1, 8
22 %2 = mul i32 %a_24, %b_24
24 store i32 %3, i32 addrspace(1)* %out
28 ; FUNC-LABEL: {{^}}i16_mad24:
29 ; The order of A and B does not matter.
30 ; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
31 ; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
32 ; The result must be sign-extended
33 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
35 ; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
36 ; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
37 ; GCN: s_sext_i32_i16 [[EXT:s[0-9]]], [[MAD]]
38 ; GCN: v_mov_b32_e32 v0, [[EXT]]
39 define amdgpu_kernel void @i16_mad24(i32 addrspace(1)* %out, i16 %a, i16 %b, i16 %c) {
43 %2 = sext i16 %1 to i32
44 store i32 %2, i32 addrspace(1)* %out
48 ; FIXME: Need to handle non-uniform case for function below (load without gep).
49 ; FUNC-LABEL: {{^}}i8_mad24:
50 ; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
51 ; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
52 ; The result must be sign-extended
53 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
55 ; GCN: s_mul_i32 [[MUL:s[0-9]]], {{[s][0-9], [s][0-9]}}
56 ; GCN: s_add_i32 [[MAD:s[0-9]]], [[MUL]], s{{[0-9]}}
57 ; GCN: s_sext_i32_i8 [[EXT:s[0-9]]], [[MAD]]
58 ; GCN: v_mov_b32_e32 v0, [[EXT]]
59 define amdgpu_kernel void @i8_mad24(i32 addrspace(1)* %out, i8 %a, i8 %b, i8 %c) {
63 %2 = sext i8 %1 to i32
64 store i32 %2, i32 addrspace(1)* %out
68 ; This tests for a bug where the mad_u24 pattern matcher would call
69 ; SimplifyDemandedBits on the first operand of the mul instruction
70 ; assuming that the pattern would be matched to a 24-bit mad. This
71 ; led to some instructions being incorrectly erased when the entire
72 ; 24-bit mad pattern wasn't being matched.
74 ; Check that the select instruction is not deleted.
75 ; FUNC-LABEL: {{^}}i24_i32_i32_mad:
79 define amdgpu_kernel void @i24_i32_i32_mad(i32 addrspace(1)* %out, i32 %a, i32 %b, i32 %c, i32 %d) {
82 %1 = icmp ne i32 %c, 0
83 %2 = select i1 %1, i32 %0, i32 34
86 store i32 %4, i32 addrspace(1)* %out
90 ; FUNC-LABEL: {{^}}extra_and:
96 define amdgpu_kernel void @extra_and(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
100 bb4: ; preds = %bb4, %bb
101 %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
102 %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
103 %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
104 %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
105 %tmp8 = and i32 %tmp7, 16777215
106 %tmp9 = and i32 %tmp6, 16777215
107 %tmp10 = and i32 %tmp5, 16777215
108 %tmp11 = and i32 %tmp, 16777215
109 %tmp12 = mul i32 %tmp8, %tmp11
110 %tmp13 = add i32 %arg2, %tmp12
111 %tmp14 = mul i32 %tmp9, %tmp11
112 %tmp15 = add i32 %arg3, %tmp14
113 %tmp16 = add nuw nsw i32 %tmp13, %tmp15
114 %tmp17 = icmp eq i32 %tmp16, 8
115 br i1 %tmp17, label %bb18, label %bb4
118 store i32 %tmp16, i32 addrspace(1)* %arg
122 ; FUNC-LABEL: {{^}}dont_remove_shift
128 define amdgpu_kernel void @dont_remove_shift(i32 addrspace(1)* %arg, i32 %arg2, i32 %arg3) {
132 bb4: ; preds = %bb4, %bb
133 %tmp = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
134 %tmp5 = phi i32 [ 0, %bb ], [ %tmp13, %bb4 ]
135 %tmp6 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
136 %tmp7 = phi i32 [ 0, %bb ], [ %tmp15, %bb4 ]
137 %tmp8 = lshr i32 %tmp7, 8
138 %tmp9 = lshr i32 %tmp6, 8
139 %tmp10 = lshr i32 %tmp5, 8
140 %tmp11 = lshr i32 %tmp, 8
141 %tmp12 = mul i32 %tmp8, %tmp11
142 %tmp13 = add i32 %arg2, %tmp12
143 %tmp14 = mul i32 %tmp9, %tmp11
144 %tmp15 = add i32 %arg3, %tmp14
145 %tmp16 = add nuw nsw i32 %tmp13, %tmp15
146 %tmp17 = icmp eq i32 %tmp16, 8
147 br i1 %tmp17, label %bb18, label %bb4
150 store i32 %tmp16, i32 addrspace(1)* %arg
154 ; FUNC-LABEL: {{^}}i8_mad_sat_16:
155 ; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
156 ; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
157 ; The result must be sign-extended
158 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
160 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
161 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
162 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
163 ; GCN: v_med3_i32 v{{[0-9]}}, [[EXT]],
164 define amdgpu_kernel void @i8_mad_sat_16(i8 addrspace(1)* %out, i8 addrspace(1)* %in0, i8 addrspace(1)* %in1, i8 addrspace(1)* %in2, i64 addrspace(5)* %idx) {
166 %retval.0.i = load i64, i64 addrspace(5)* %idx
167 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %in0, i64 %retval.0.i
168 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %in1, i64 %retval.0.i
169 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %in2, i64 %retval.0.i
170 %l1 = load i8, i8 addrspace(1)* %arrayidx, align 1
171 %l2 = load i8, i8 addrspace(1)* %arrayidx2, align 1
172 %l3 = load i8, i8 addrspace(1)* %arrayidx4, align 1
173 %conv1.i = sext i8 %l1 to i16
174 %conv3.i = sext i8 %l2 to i16
175 %conv5.i = sext i8 %l3 to i16
176 %mul.i.i.i = mul nsw i16 %conv3.i, %conv1.i
177 %add.i.i = add i16 %mul.i.i.i, %conv5.i
178 %c4 = icmp sgt i16 %add.i.i, -128
179 %cond.i.i = select i1 %c4, i16 %add.i.i, i16 -128
180 %c5 = icmp slt i16 %cond.i.i, 127
181 %cond13.i.i = select i1 %c5, i16 %cond.i.i, i16 127
182 %conv8.i = trunc i16 %cond13.i.i to i8
183 %arrayidx7 = getelementptr inbounds i8, i8 addrspace(1)* %out, i64 %retval.0.i
184 store i8 %conv8.i, i8 addrspace(1)* %arrayidx7, align 1
188 ; FUNC-LABEL: {{^}}i8_mad_32:
189 ; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
190 ; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
191 ; The result must be sign-extended
192 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
194 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
195 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
196 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
197 define amdgpu_kernel void @i8_mad_32(i32 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
199 %retval.0.i = load i64, i64 addrspace(5)* %idx
200 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
201 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
202 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
203 %la = load i8, i8 addrspace(1)* %arrayidx, align 1
204 %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
205 %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
206 %exta = sext i8 %la to i16
207 %extb = sext i8 %lb to i16
208 %extc = sext i8 %lc to i16
209 %mul = mul i16 %exta, %extb
210 %mad = add i16 %mul, %extc
211 %mad_ext = sext i16 %mad to i32
212 store i32 %mad_ext, i32 addrspace(1)* %out
216 ; FUNC-LABEL: {{^}}i8_mad_64:
217 ; EG: MULLO_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
218 ; EG: ADD_INT {{[* ]*}}T{{[0-9]}}.[[MAD_CHAN:[XYZW]]]
219 ; The result must be sign-extended
220 ; EG: BFE_INT {{[* ]*}}T{{[0-9]\.[XYZW]}}, PV.[[MAD_CHAN]], 0.0, literal.x
222 ; SI: v_mad_u32_u24 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
223 ; VI: v_mad_u16 [[MAD:v[0-9]]], {{[sv][0-9], [sv][0-9]}}
224 ; GCN: v_bfe_i32 [[EXT:v[0-9]]], [[MAD]], 0, 16
225 define amdgpu_kernel void @i8_mad_64(i64 addrspace(1)* %out, i8 addrspace(1)* %a, i8 addrspace(1)* %b, i8 addrspace(1)* %c, i64 addrspace(5)* %idx) {
227 %retval.0.i = load i64, i64 addrspace(5)* %idx
228 %arrayidx = getelementptr inbounds i8, i8 addrspace(1)* %a, i64 %retval.0.i
229 %arrayidx2 = getelementptr inbounds i8, i8 addrspace(1)* %b, i64 %retval.0.i
230 %arrayidx4 = getelementptr inbounds i8, i8 addrspace(1)* %c, i64 %retval.0.i
231 %la = load i8, i8 addrspace(1)* %arrayidx, align 1
232 %lb = load i8, i8 addrspace(1)* %arrayidx2, align 1
233 %lc = load i8, i8 addrspace(1)* %arrayidx4, align 1
234 %exta = sext i8 %la to i16
235 %extb = sext i8 %lb to i16
236 %extc = sext i8 %lc to i16
237 %mul = mul i16 %exta, %extb
238 %mad = add i16 %mul, %extc
239 %mad_ext = sext i16 %mad to i64
240 store i64 %mad_ext, i64 addrspace(1)* %out
244 ; The ands are asserting the high bits are 0. SimplifyDemandedBits on
245 ; the adds would remove the ands before the target combine on the mul
246 ; had a chance to form mul24. The mul combine would then see
247 ; extractelement with no known bits and fail. All of the mul/add
248 ; combos in this loop should form v_mad_u32_u24.
250 ; FUNC-LABEL: {{^}}mad24_known_bits_destroyed:
259 define void @mad24_known_bits_destroyed(i32 %arg, <4 x i32> %arg1, <4 x i32> %arg2, <4 x i32> %arg3, i32 %arg4, i32 %arg5, i32 %arg6, i32 addrspace(1)* %arg7, <4 x i32> addrspace(1)* %arg8) #0 {
261 %tmp = and i32 %arg4, 16777215
262 %tmp9 = extractelement <4 x i32> %arg1, i64 1
263 %tmp10 = extractelement <4 x i32> %arg3, i64 1
264 %tmp11 = and i32 %tmp9, 16777215
265 %tmp12 = extractelement <4 x i32> %arg1, i64 2
266 %tmp13 = extractelement <4 x i32> %arg3, i64 2
267 %tmp14 = and i32 %tmp12, 16777215
268 %tmp15 = extractelement <4 x i32> %arg1, i64 3
269 %tmp16 = extractelement <4 x i32> %arg3, i64 3
270 %tmp17 = and i32 %tmp15, 16777215
273 bb18: ; preds = %bb19
276 bb19: ; preds = %bb19, %bb
277 %tmp20 = phi i32 [ %arg, %bb ], [ %tmp40, %bb19 ]
278 %tmp21 = phi i32 [ 0, %bb ], [ %tmp54, %bb19 ]
279 %tmp22 = phi <4 x i32> [ %arg2, %bb ], [ %tmp53, %bb19 ]
280 %tmp23 = and i32 %tmp20, 16777215
281 %tmp24 = mul i32 %tmp23, %tmp
282 %tmp25 = add i32 %tmp24, %arg5
283 %tmp26 = extractelement <4 x i32> %tmp22, i64 1
284 %tmp27 = and i32 %tmp26, 16777215
285 %tmp28 = mul i32 %tmp27, %tmp11
286 %tmp29 = add i32 %tmp28, %tmp10
287 %tmp30 = extractelement <4 x i32> %tmp22, i64 2
288 %tmp31 = and i32 %tmp30, 16777215
289 %tmp32 = mul i32 %tmp31, %tmp14
290 %tmp33 = add i32 %tmp32, %tmp13
291 %tmp34 = extractelement <4 x i32> %tmp22, i64 3
292 %tmp35 = and i32 %tmp34, 16777215
293 %tmp36 = mul i32 %tmp35, %tmp17
294 %tmp37 = add i32 %tmp36, %tmp16
295 %tmp38 = and i32 %tmp25, 16777215
296 %tmp39 = mul i32 %tmp38, %tmp
297 %tmp40 = add i32 %tmp39, %arg5
298 store i32 %tmp40, i32 addrspace(1)* %arg7
299 %tmp41 = insertelement <4 x i32> undef, i32 %tmp40, i32 0
300 %tmp42 = and i32 %tmp29, 16777215
301 %tmp43 = mul i32 %tmp42, %tmp11
302 %tmp44 = add i32 %tmp43, %tmp10
303 %tmp45 = insertelement <4 x i32> %tmp41, i32 %tmp44, i32 1
304 %tmp46 = and i32 %tmp33, 16777215
305 %tmp47 = mul i32 %tmp46, %tmp14
306 %tmp48 = add i32 %tmp47, %tmp13
307 %tmp49 = insertelement <4 x i32> %tmp45, i32 %tmp48, i32 2
308 %tmp50 = and i32 %tmp37, 16777215
309 %tmp51 = mul i32 %tmp50, %tmp17
310 %tmp52 = add i32 %tmp51, %tmp16
311 %tmp53 = insertelement <4 x i32> %tmp49, i32 %tmp52, i32 3
312 store <4 x i32> %tmp53, <4 x i32> addrspace(1)* %arg8
313 %tmp54 = add nuw nsw i32 %tmp21, 1
314 %tmp55 = icmp eq i32 %tmp54, %arg6
315 br i1 %tmp55, label %bb18, label %bb19
318 attributes #0 = { norecurse nounwind }