1 ; RUN: llc -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI %s
2 ; RUN: llc -mtriple=amdgcn -verify-machineinstrs < %s| FileCheck -check-prefix=GCN -check-prefix=SI %s
4 ;;;==========================================================================;;;
5 ;; 16-bit integer comparisons
6 ;;;==========================================================================;;;
8 ; GCN-LABEL: {{^}}i16_eq:
9 ; VI: v_cmp_eq_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
10 ; SI: v_cmp_eq_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
11 define amdgpu_kernel void @i16_eq(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
13 %tid = call i32 @llvm.amdgcn.workitem.id.x()
14 %tid.ext = sext i32 %tid to i64
15 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
16 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
17 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
18 %a = load i16, ptr addrspace(1) %a.gep
19 %b = load i16, ptr addrspace(1) %b.gep
20 %tmp0 = icmp eq i16 %a, %b
21 %tmp1 = sext i1 %tmp0 to i32
22 store i32 %tmp1, ptr addrspace(1) %out.gep
26 ; GCN-LABEL: {{^}}i16_ne:
27 ; VI: v_cmp_ne_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
28 ; SI: v_cmp_ne_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
29 define amdgpu_kernel void @i16_ne(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
31 %tid = call i32 @llvm.amdgcn.workitem.id.x()
32 %tid.ext = sext i32 %tid to i64
33 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
34 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
35 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
36 %a = load i16, ptr addrspace(1) %a.gep
37 %b = load i16, ptr addrspace(1) %b.gep
38 %tmp0 = icmp ne i16 %a, %b
39 %tmp1 = sext i1 %tmp0 to i32
40 store i32 %tmp1, ptr addrspace(1) %out.gep
44 ; GCN-LABEL: {{^}}i16_ugt:
45 ; VI: v_cmp_gt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
46 ; SI: v_cmp_gt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
47 define amdgpu_kernel void @i16_ugt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
49 %tid = call i32 @llvm.amdgcn.workitem.id.x()
50 %tid.ext = sext i32 %tid to i64
51 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
52 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
53 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
54 %a = load i16, ptr addrspace(1) %a.gep
55 %b = load i16, ptr addrspace(1) %b.gep
56 %tmp0 = icmp ugt i16 %a, %b
57 %tmp1 = sext i1 %tmp0 to i32
58 store i32 %tmp1, ptr addrspace(1) %out.gep
62 ; GCN-LABEL: {{^}}i16_uge:
63 ; VI: v_cmp_ge_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
64 ; SI: v_cmp_ge_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
65 define amdgpu_kernel void @i16_uge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
67 %tid = call i32 @llvm.amdgcn.workitem.id.x()
68 %tid.ext = sext i32 %tid to i64
69 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
70 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
71 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
72 %a = load i16, ptr addrspace(1) %a.gep
73 %b = load i16, ptr addrspace(1) %b.gep
74 %tmp0 = icmp uge i16 %a, %b
75 %tmp1 = sext i1 %tmp0 to i32
76 store i32 %tmp1, ptr addrspace(1) %out.gep
80 ; GCN-LABEL: {{^}}i16_ult:
81 ; VI: v_cmp_lt_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
82 ; SI: v_cmp_lt_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
83 define amdgpu_kernel void @i16_ult(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
85 %tid = call i32 @llvm.amdgcn.workitem.id.x()
86 %tid.ext = sext i32 %tid to i64
87 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
88 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
89 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
90 %a = load i16, ptr addrspace(1) %a.gep
91 %b = load i16, ptr addrspace(1) %b.gep
92 %tmp0 = icmp ult i16 %a, %b
93 %tmp1 = sext i1 %tmp0 to i32
94 store i32 %tmp1, ptr addrspace(1) %out.gep
98 ; GCN-LABEL: {{^}}i16_ule:
99 ; VI: v_cmp_le_u16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
100 ; SI: v_cmp_le_u32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
101 define amdgpu_kernel void @i16_ule(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
103 %tid = call i32 @llvm.amdgcn.workitem.id.x()
104 %tid.ext = sext i32 %tid to i64
105 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
106 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
107 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
108 %a = load i16, ptr addrspace(1) %a.gep
109 %b = load i16, ptr addrspace(1) %b.gep
110 %tmp0 = icmp ule i16 %a, %b
111 %tmp1 = sext i1 %tmp0 to i32
112 store i32 %tmp1, ptr addrspace(1) %out.gep
117 ; GCN-LABEL: {{^}}i16_sgt:
118 ; VI: v_cmp_gt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
119 ; SI: v_cmp_gt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
120 define amdgpu_kernel void @i16_sgt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
122 %tid = call i32 @llvm.amdgcn.workitem.id.x()
123 %tid.ext = sext i32 %tid to i64
124 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
125 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
126 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
127 %a = load i16, ptr addrspace(1) %a.gep
128 %b = load i16, ptr addrspace(1) %b.gep
129 %tmp0 = icmp sgt i16 %a, %b
130 %tmp1 = sext i1 %tmp0 to i32
131 store i32 %tmp1, ptr addrspace(1) %out.gep
135 ; GCN-LABEL: {{^}}i16_sge:
136 ; VI: v_cmp_ge_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
137 ; SI: v_cmp_ge_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
138 define amdgpu_kernel void @i16_sge(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
140 %tid = call i32 @llvm.amdgcn.workitem.id.x()
141 %tid.ext = sext i32 %tid to i64
142 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
143 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
144 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
145 %a = load i16, ptr addrspace(1) %a.gep
146 %b = load i16, ptr addrspace(1) %b.gep
147 %tmp0 = icmp sge i16 %a, %b
148 %tmp1 = sext i1 %tmp0 to i32
149 store i32 %tmp1, ptr addrspace(1) %out.gep
153 ; GCN-LABEL: {{^}}i16_slt:
154 ; VI: v_cmp_lt_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
155 ; SI: v_cmp_lt_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
156 define amdgpu_kernel void @i16_slt(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
158 %tid = call i32 @llvm.amdgcn.workitem.id.x()
159 %tid.ext = sext i32 %tid to i64
160 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
161 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
162 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
163 %a = load i16, ptr addrspace(1) %a.gep
164 %b = load i16, ptr addrspace(1) %b.gep
165 %tmp0 = icmp slt i16 %a, %b
166 %tmp1 = sext i1 %tmp0 to i32
167 store i32 %tmp1, ptr addrspace(1) %out.gep
171 ; GCN-LABEL: {{^}}i16_sle:
172 ; VI: v_cmp_le_i16_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
173 ; SI: v_cmp_le_i32_e32 vcc, v{{[0-9]+}}, v{{[0-9]+}}
174 define amdgpu_kernel void @i16_sle(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, ptr addrspace(1) %b.ptr) #0 {
176 %tid = call i32 @llvm.amdgcn.workitem.id.x()
177 %tid.ext = sext i32 %tid to i64
178 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
179 %b.gep = getelementptr inbounds i16, ptr addrspace(1) %b.ptr, i64 %tid.ext
180 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
181 %a = load i16, ptr addrspace(1) %a.gep
182 %b = load i16, ptr addrspace(1) %b.gep
183 %tmp0 = icmp sle i16 %a, %b
184 %tmp1 = sext i1 %tmp0 to i32
185 store i32 %tmp1, ptr addrspace(1) %out.gep
189 ; These should be commuted to reduce code size
190 ; GCN-LABEL: {{^}}i16_eq_v_s:
191 ; VI: v_cmp_eq_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
192 ; SI: v_cmp_eq_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
193 define amdgpu_kernel void @i16_eq_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
195 %tid = call i32 @llvm.amdgcn.workitem.id.x()
196 %tid.ext = sext i32 %tid to i64
197 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
198 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
199 %a = load i16, ptr addrspace(1) %a.gep
200 %tmp0 = icmp eq i16 %a, %b
201 %tmp1 = sext i1 %tmp0 to i32
202 store i32 %tmp1, ptr addrspace(1) %out.gep
206 ; GCN-LABEL: {{^}}i16_ne_v_s:
207 ; VI: v_cmp_ne_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
208 ; SI: v_cmp_ne_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
209 define amdgpu_kernel void @i16_ne_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
211 %tid = call i32 @llvm.amdgcn.workitem.id.x()
212 %tid.ext = sext i32 %tid to i64
213 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
214 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
215 %a = load i16, ptr addrspace(1) %a.gep
216 %tmp0 = icmp ne i16 %a, %b
217 %tmp1 = sext i1 %tmp0 to i32
218 store i32 %tmp1, ptr addrspace(1) %out.gep
222 ; GCN-LABEL: {{^}}i16_ugt_v_s:
223 ; VI: v_cmp_lt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
224 ; SI: v_cmp_lt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
225 define amdgpu_kernel void @i16_ugt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
227 %tid = call i32 @llvm.amdgcn.workitem.id.x()
228 %tid.ext = sext i32 %tid to i64
229 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
230 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
231 %a = load i16, ptr addrspace(1) %a.gep
232 %tmp0 = icmp ugt i16 %a, %b
233 %tmp1 = sext i1 %tmp0 to i32
234 store i32 %tmp1, ptr addrspace(1) %out.gep
238 ; GCN-LABEL: {{^}}i16_uge_v_s:
239 ; VI: v_cmp_le_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
240 ; SI: v_cmp_le_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
241 define amdgpu_kernel void @i16_uge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
243 %tid = call i32 @llvm.amdgcn.workitem.id.x()
244 %tid.ext = sext i32 %tid to i64
245 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
246 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
247 %a = load i16, ptr addrspace(1) %a.gep
248 %tmp0 = icmp uge i16 %a, %b
249 %tmp1 = sext i1 %tmp0 to i32
250 store i32 %tmp1, ptr addrspace(1) %out.gep
254 ; GCN-LABEL: {{^}}i16_ult_v_s:
255 ; VI: v_cmp_gt_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
256 ; SI: v_cmp_gt_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
257 define amdgpu_kernel void @i16_ult_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
259 %tid = call i32 @llvm.amdgcn.workitem.id.x()
260 %tid.ext = sext i32 %tid to i64
261 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
262 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
263 %a = load i16, ptr addrspace(1) %a.gep
264 %tmp0 = icmp ult i16 %a, %b
265 %tmp1 = sext i1 %tmp0 to i32
266 store i32 %tmp1, ptr addrspace(1) %out.gep
270 ; GCN-LABEL: {{^}}i16_ule_v_s:
271 ; VI: v_cmp_ge_u16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
272 ; SI: v_cmp_ge_u32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
273 define amdgpu_kernel void @i16_ule_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
275 %tid = call i32 @llvm.amdgcn.workitem.id.x()
276 %tid.ext = sext i32 %tid to i64
277 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
278 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
279 %a = load i16, ptr addrspace(1) %a.gep
280 %tmp0 = icmp ule i16 %a, %b
281 %tmp1 = sext i1 %tmp0 to i32
282 store i32 %tmp1, ptr addrspace(1) %out.gep
286 ; GCN-LABEL: {{^}}i16_sgt_v_s:
287 ; VI: v_cmp_lt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
288 ; SI: v_cmp_lt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
289 define amdgpu_kernel void @i16_sgt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
291 %tid = call i32 @llvm.amdgcn.workitem.id.x()
292 %tid.ext = sext i32 %tid to i64
293 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
294 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
295 %a = load i16, ptr addrspace(1) %a.gep
296 %tmp0 = icmp sgt i16 %a, %b
297 %tmp1 = sext i1 %tmp0 to i32
298 store i32 %tmp1, ptr addrspace(1) %out.gep
302 ; GCN-LABEL: {{^}}i16_sge_v_s:
303 ; VI: v_cmp_le_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
304 ; SI: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
305 define amdgpu_kernel void @i16_sge_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
307 %tid = call i32 @llvm.amdgcn.workitem.id.x()
308 %tid.ext = sext i32 %tid to i64
309 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
310 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
311 %a = load i16, ptr addrspace(1) %a.gep
312 %tmp0 = icmp sge i16 %a, %b
313 %tmp1 = sext i1 %tmp0 to i32
314 store i32 %tmp1, ptr addrspace(1) %out.gep
318 ; GCN-LABEL: {{^}}i16_slt_v_s:
319 ; VI: v_cmp_gt_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
320 ; SI: v_cmp_gt_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
321 define amdgpu_kernel void @i16_slt_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
323 %tid = call i32 @llvm.amdgcn.workitem.id.x()
324 %tid.ext = sext i32 %tid to i64
325 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
326 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
327 %a = load i16, ptr addrspace(1) %a.gep
328 %tmp0 = icmp slt i16 %a, %b
329 %tmp1 = sext i1 %tmp0 to i32
330 store i32 %tmp1, ptr addrspace(1) %out.gep
334 ; GCN-LABEL: {{^}}i16_sle_v_s:
335 ; VI: v_cmp_ge_i16_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
336 ; SI: v_cmp_ge_i32_e32 vcc, s{{[0-9]+}}, v{{[0-9]+}}
337 define amdgpu_kernel void @i16_sle_v_s(ptr addrspace(1) %out, ptr addrspace(1) %a.ptr, i16 %b) #0 {
339 %tid = call i32 @llvm.amdgcn.workitem.id.x()
340 %tid.ext = sext i32 %tid to i64
341 %a.gep = getelementptr inbounds i16, ptr addrspace(1) %a.ptr, i64 %tid.ext
342 %out.gep = getelementptr inbounds i32, ptr addrspace(1) %out, i64 %tid.ext
343 %a = load i16, ptr addrspace(1) %a.gep
344 %tmp0 = icmp sle i16 %a, %b
345 %tmp1 = sext i1 %tmp0 to i32
346 store i32 %tmp1, ptr addrspace(1) %out.gep
350 declare i32 @llvm.amdgcn.workitem.id.x() #1
352 attributes #0 = { nounwind }
353 attributes #1 = { nounwind readnone }