1 ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GCN %s
2 ; RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefix=GFX9 %s
4 ; GCN-LABEL: {{^}}add1:
5 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
6 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
9 ; GFX9-LABEL: {{^}}add1:
10 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
11 define amdgpu_kernel void @add1(i32 addrspace(1)* nocapture %arg) {
13 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
14 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
15 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
16 %v = load i32, i32 addrspace(1)* %gep, align 4
17 %cmp = icmp ugt i32 %x, %y
18 %ext = zext i1 %cmp to i32
19 %add = add i32 %v, %ext
20 store i32 %add, i32 addrspace(1)* %gep, align 4
24 ; GCN-LABEL: {{^}}add1_i16:
25 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
26 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
29 ; GFX9-LABEL: {{^}}add1_i16:
30 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
31 define i16 @add1_i16(i32 addrspace(1)* nocapture %arg, i16 addrspace(1)* nocapture %dst) {
33 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
34 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
35 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
36 %v = load i32, i32 addrspace(1)* %gep, align 4
37 %cmp = icmp ugt i32 %x, %y
38 %ext = zext i1 %cmp to i32
39 %add = add i32 %v, %ext
40 %trunc = trunc i32 %add to i16
44 ; GCN-LABEL: {{^}}sub1:
45 ; GCN: v_cmp_gt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
46 ; GCN: v_subbrev_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc
49 ; GFX9-LABEL: {{^}}sub1:
50 ; GFX9: v_subbrev_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
51 define amdgpu_kernel void @sub1(i32 addrspace(1)* nocapture %arg) {
53 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
54 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
55 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
56 %v = load i32, i32 addrspace(1)* %gep, align 4
57 %cmp = icmp ugt i32 %x, %y
58 %ext = sext i1 %cmp to i32
59 %add = add i32 %v, %ext
60 store i32 %add, i32 addrspace(1)* %gep, align 4
64 ; GCN-LABEL: {{^}}add_adde:
65 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
66 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
70 ; GFX9-LABEL: {{^}}add_adde:
71 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
72 define amdgpu_kernel void @add_adde(i32 addrspace(1)* nocapture %arg, i32 %a) {
74 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
75 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
76 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
77 %v = load i32, i32 addrspace(1)* %gep, align 4
78 %cmp = icmp ugt i32 %x, %y
79 %ext = zext i1 %cmp to i32
80 %adde = add i32 %v, %ext
81 %add2 = add i32 %adde, %a
82 store i32 %add2, i32 addrspace(1)* %gep, align 4
86 ; GCN-LABEL: {{^}}adde_add:
87 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
88 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
92 ; GFX9-LABEL: {{^}}adde_add:
93 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
94 define amdgpu_kernel void @adde_add(i32 addrspace(1)* nocapture %arg, i32 %a) {
96 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
97 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
98 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
99 %v = load i32, i32 addrspace(1)* %gep, align 4
100 %cmp = icmp ugt i32 %x, %y
101 %ext = zext i1 %cmp to i32
102 %add = add i32 %v, %a
103 %adde = add i32 %add, %ext
104 store i32 %adde, i32 addrspace(1)* %gep, align 4
108 ; GCN-LABEL: {{^}}sub_sube:
109 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
110 ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
114 ; GFX9-LABEL: {{^}}sub_sube:
115 ; GFX9: v_subb_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
116 define amdgpu_kernel void @sub_sube(i32 addrspace(1)* nocapture %arg, i32 %a) {
118 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
119 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
120 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
121 %v = load i32, i32 addrspace(1)* %gep, align 4
122 %cmp = icmp ugt i32 %x, %y
123 %ext = sext i1 %cmp to i32
124 %adde = add i32 %v, %ext
125 %sub = sub i32 %adde, %a
126 store i32 %sub, i32 addrspace(1)* %gep, align 4
130 ; GCN-LABEL: {{^}}sub_sube_commuted:
131 ; GCN-DAG: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
132 ; GCN-DAG: buffer_load_dword [[V:v[0-9]+]],
133 ; GCN: v_cndmask_b32_e64 [[CCZEXT:v[0-9]+]], 0, 1, [[CC]]
134 ; GCN: v_sub_i32_e32 [[SUB:v[0-9]+]], vcc, [[CCZEXT]], v4
135 ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, {{.*}}, [[SUB]]
136 ; GCN: v_add_i32_e32 {{.*}}, 0x64, [[ADD]]
138 ; GFX9-LABEL: {{^}}sub_sube_commuted:
139 ; GFX9-DAG: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
140 ; GFX9-DAG: global_load_dword [[V:v[0-9]+]],
141 ; GFX9-DAG: v_cndmask_b32_e64 [[CCZEXT:v[0-9]+]], 0, 1, [[CC]]
142 ; GFX9: v_sub_u32_e32 {{.*}}, [[CCZEXT]]
143 ; GFX9: v_add_u32_e32
144 ; GFX9: v_add_u32_e32 {{.*}}, 0x64,
145 define amdgpu_kernel void @sub_sube_commuted(i32 addrspace(1)* nocapture %arg, i32 %a) {
147 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
148 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
149 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
150 %v = load i32, i32 addrspace(1)* %gep, align 4
151 %cmp = icmp ugt i32 %x, %y
152 %ext = sext i1 %cmp to i32
153 %adde = add i32 %v, %ext
154 %sub = sub i32 %adde, %a
155 %sub2 = sub i32 100, %sub
156 store i32 %sub2, i32 addrspace(1)* %gep, align 4
160 ; GCN-LABEL: {{^}}sube_sub:
161 ; GCN: v_cmp_gt_u32_e{{32|64}} [[CC:[^,]+]], v{{[0-9]+}}, v{{[0-9]+}}
162 ; GCN: v_subb_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, v{{[0-9]+}}, v{{[0-9]+}}, [[CC]]
166 ; GFX9-LABEL: {{^}}sube_sub:
167 ; GFX9: v_subb_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
168 define amdgpu_kernel void @sube_sub(i32 addrspace(1)* nocapture %arg, i32 %a) {
170 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
171 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
172 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
173 %v = load i32, i32 addrspace(1)* %gep, align 4
174 %cmp = icmp ugt i32 %x, %y
175 %ext = sext i1 %cmp to i32
176 %sub = sub i32 %v, %a
177 %adde = add i32 %sub, %ext
178 store i32 %adde, i32 addrspace(1)* %gep, align 4
182 ; GCN-LABEL: {{^}}zext_flclass:
183 ; GCN: v_cmp_class_f32_e{{32|64}} [[CC:[^,]+]],
184 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
187 ; GFX9-LABEL: {{^}}zext_flclass:
188 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
189 define amdgpu_kernel void @zext_flclass(i32 addrspace(1)* nocapture %arg, float %x) {
191 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
192 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id
193 %v = load i32, i32 addrspace(1)* %gep, align 4
194 %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608)
195 %ext = zext i1 %cmp to i32
196 %add = add i32 %v, %ext
197 store i32 %add, i32 addrspace(1)* %gep, align 4
201 ; GCN-LABEL: {{^}}sext_flclass:
202 ; GCN: v_cmp_class_f32_e32 vcc,
203 ; GCN: v_subbrev_u32_e32 v{{[0-9]+}}, vcc, 0, v{{[0-9]+}}, vcc
206 ; GFX9-LABEL: {{^}}sext_flclass:
207 ; GFX9: v_subbrev_co_u32_e32 v{{[0-9]+}}, vcc
208 define amdgpu_kernel void @sext_flclass(i32 addrspace(1)* nocapture %arg, float %x) {
210 %id = tail call i32 @llvm.amdgcn.workitem.id.x()
211 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %id
212 %v = load i32, i32 addrspace(1)* %gep, align 4
213 %cmp = tail call zeroext i1 @llvm.amdgcn.class.f32(float %x, i32 608)
214 %ext = sext i1 %cmp to i32
215 %add = add i32 %v, %ext
216 store i32 %add, i32 addrspace(1)* %gep, align 4
220 ; GCN-LABEL: {{^}}add_and:
221 ; GCN: s_and_b64 [[CC:[^,]+]],
222 ; GCN: v_addc_u32_e{{32|64}} v{{[0-9]+}}, {{[^,]+}}, 0, v{{[0-9]+}}, [[CC]]
225 ; GFX9-LABEL: {{^}}add_and:
226 ; GFX9: v_addc_co_u32_e{{32|64}} v{{[0-9]+}}, vcc
227 define amdgpu_kernel void @add_and(i32 addrspace(1)* nocapture %arg) {
229 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
230 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
231 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
232 %v = load i32, i32 addrspace(1)* %gep, align 4
233 %cmp1 = icmp ugt i32 %x, %y
234 %cmp2 = icmp ugt i32 %x, 1
235 %cmp = and i1 %cmp1, %cmp2
236 %ext = zext i1 %cmp to i32
237 %add = add i32 %v, %ext
238 store i32 %add, i32 addrspace(1)* %gep, align 4
242 ; sub x, sext (setcc) => addcarry x, 0, setcc
243 ; GCN-LABEL: {{^}}cmp_sub_sext:
244 ; GCN: v_cmp_gt_u32_e32 vcc, v
246 ; GCN: v_addc_u32_e32 [[RESULT:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
247 define amdgpu_kernel void @cmp_sub_sext(i32 addrspace(1)* nocapture %arg) {
249 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
250 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
251 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
252 %v = load i32, i32 addrspace(1)* %gep, align 4
253 %cmp = icmp ugt i32 %x, %y
254 %ext = sext i1 %cmp to i32
255 %add = sub i32 %v, %ext
256 store i32 %add, i32 addrspace(1)* %gep, align 4
260 ; sub x, zext (setcc) => subcarry x, 0, setcc
261 ; GCN-LABEL: {{^}}cmp_sub_zext:
262 ; GCN: v_cmp_gt_u32_e32 vcc, v
264 ; GCN: v_subbrev_u32_e32 [[RESULT:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
265 define amdgpu_kernel void @cmp_sub_zext(i32 addrspace(1)* nocapture %arg) {
267 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
268 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
269 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
270 %v = load i32, i32 addrspace(1)* %gep, align 4
271 %cmp = icmp ugt i32 %x, %y
272 %ext = zext i1 %cmp to i32
273 %add = sub i32 %v, %ext
274 store i32 %add, i32 addrspace(1)* %gep, align 4
278 ; GCN-LABEL: {{^}}sub_addcarry:
279 ; GCN: v_cmp_gt_u32_e32 vcc, v
281 ; GCN: v_addc_u32_e32 [[ADDC:v[0-9]+]], vcc, 0, v{{[0-9]+}}, vcc
283 ; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc,
284 define amdgpu_kernel void @sub_addcarry(i32 addrspace(1)* nocapture %arg, i32 %a) {
286 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
287 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
288 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
289 %v = load i32, i32 addrspace(1)* %gep, align 4
290 %cmp = icmp ugt i32 %x, %y
291 %ext = zext i1 %cmp to i32
292 %adde = add i32 %v, %ext
293 %add2 = sub i32 %adde, %a
294 store i32 %add2, i32 addrspace(1)* %gep, align 4
298 ; GCN-LABEL: {{^}}sub_subcarry:
299 ; GCN: v_cmp_gt_u32_e32 vcc, v
301 ; GCN: v_subb_u32_e32 [[RESULT:v[0-9]+]], vcc, v{{[0-9]+}}, v{{[0-9]+}}, vcc
302 define amdgpu_kernel void @sub_subcarry(i32 addrspace(1)* nocapture %arg, i32 %a) {
304 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
305 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
306 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
307 %v = load i32, i32 addrspace(1)* %gep, align 4
308 %cmp = icmp ugt i32 %x, %y
309 %ext = zext i1 %cmp to i32
310 %adde = sub i32 %v, %ext
311 %add2 = sub i32 %adde, %a
312 store i32 %add2, i32 addrspace(1)* %gep, align 4
316 ; Check case where sub is commuted with zext
317 ; GCN-LABEL: {{^}}sub_zext_setcc_commute:
318 ; GCN: v_cmp_gt_u32_e32 vcc, v
321 ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc,
322 ; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc, s{{[0-9]+}}, [[ADD]]
323 define amdgpu_kernel void @sub_zext_setcc_commute(i32 addrspace(1)* nocapture %arg, i32 %a, i32%b) {
325 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
326 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
327 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
328 %v = load i32, i32 addrspace(1)* %gep, align 4
329 %cmp = icmp ugt i32 %x, %y
330 %ext = zext i1 %cmp to i32
331 %adde = sub i32 %v, %ext
332 %sub = sub i32 %a, %adde
333 %sub2 = sub i32 %sub, %b
334 store i32 %sub2, i32 addrspace(1)* %gep, align 4
338 ; Check case where sub is commuted with sext
339 ; GCN-LABEL: {{^}}sub_sext_setcc_commute:
340 ; GCN: v_cmp_gt_u32_e32 vcc, v
343 ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc,
344 ; GCN: v_subrev_i32_e32 [[RESULT:v[0-9]+]], vcc, s{{[0-9]+}}, [[ADD]]
345 define amdgpu_kernel void @sub_sext_setcc_commute(i32 addrspace(1)* nocapture %arg, i32 %a, i32%b) {
347 %x = tail call i32 @llvm.amdgcn.workitem.id.x()
348 %y = tail call i32 @llvm.amdgcn.workitem.id.y()
349 %gep = getelementptr inbounds i32, i32 addrspace(1)* %arg, i32 %x
350 %v = load i32, i32 addrspace(1)* %gep, align 4
351 %cmp = icmp ugt i32 %x, %y
352 %ext = sext i1 %cmp to i32
353 %adde = sub i32 %v, %ext
354 %sub = sub i32 %a, %adde
355 %sub2 = sub i32 %sub, %b
356 store i32 %sub2, i32 addrspace(1)* %gep, align 4
360 declare i1 @llvm.amdgcn.class.f32(float, i32) #0
362 declare i32 @llvm.amdgcn.workitem.id.x() #0
364 declare i32 @llvm.amdgcn.workitem.id.y() #0
366 attributes #0 = { nounwind readnone speculatable }