1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -march=amdgcn -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=FUNC,GCN,SI
3 ; RUN: llc < %s -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=FUNC,GCN,VI
4 ; RUN: llc < %s -march=r600 -mcpu=cypress -verify-machineinstrs | FileCheck %s -enable-var-scope -check-prefixes=FUNC,EG
6 declare i7 @llvm.ctlz.i7(i7, i1) nounwind readnone
7 declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone
8 declare i16 @llvm.ctlz.i16(i16, i1) nounwind readnone
10 declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
11 declare <2 x i32> @llvm.ctlz.v2i32(<2 x i32>, i1) nounwind readnone
12 declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1) nounwind readnone
14 declare i64 @llvm.ctlz.i64(i64, i1) nounwind readnone
15 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>, i1) nounwind readnone
16 declare <4 x i64> @llvm.ctlz.v4i64(<4 x i64>, i1) nounwind readnone
18 declare i32 @llvm.r600.read.tidig.x() nounwind readnone
20 define amdgpu_kernel void @s_ctlz_i32(i32 addrspace(1)* noalias %out, i32 %val) nounwind {
21 ; SI-LABEL: s_ctlz_i32:
23 ; SI-NEXT: s_load_dword s2, s[0:1], 0xb
24 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
25 ; SI-NEXT: s_mov_b32 s7, 0xf000
26 ; SI-NEXT: s_waitcnt lgkmcnt(0)
27 ; SI-NEXT: s_flbit_i32_b32 s0, s2
28 ; SI-NEXT: s_mov_b32 s6, -1
29 ; SI-NEXT: v_mov_b32_e32 v0, s0
30 ; SI-NEXT: v_cmp_ne_u32_e64 vcc, s2, 0
31 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v0, vcc
32 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
35 ; VI-LABEL: s_ctlz_i32:
37 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
38 ; VI-NEXT: s_load_dword s0, s[0:1], 0x2c
39 ; VI-NEXT: s_mov_b32 s7, 0xf000
40 ; VI-NEXT: s_mov_b32 s6, -1
41 ; VI-NEXT: s_waitcnt lgkmcnt(0)
42 ; VI-NEXT: s_flbit_i32_b32 s1, s0
43 ; VI-NEXT: v_mov_b32_e32 v0, s1
44 ; VI-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0
45 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v0, vcc
46 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
49 ; EG-LABEL: s_ctlz_i32:
51 ; EG-NEXT: ALU 3, @4, KC0[CB0:0-32], KC1[]
52 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
55 ; EG-NEXT: ALU clause starting at 4:
56 ; EG-NEXT: FFBH_UINT * T0.W, KC0[2].Z,
57 ; EG-NEXT: CNDE_INT T0.X, KC0[2].Z, literal.x, PV.W,
58 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
59 ; EG-NEXT: 32(4.484155e-44), 2(2.802597e-45)
60 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
61 store i32 %ctlz, i32 addrspace(1)* %out, align 4
65 define amdgpu_kernel void @v_ctlz_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
66 ; SI-LABEL: v_ctlz_i32:
68 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
69 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
70 ; SI-NEXT: s_mov_b32 s7, 0xf000
71 ; SI-NEXT: s_mov_b32 s10, 0
72 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
73 ; SI-NEXT: v_mov_b32_e32 v1, 0
74 ; SI-NEXT: s_mov_b32 s11, s7
75 ; SI-NEXT: s_waitcnt lgkmcnt(0)
76 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
77 ; SI-NEXT: s_mov_b32 s6, -1
78 ; SI-NEXT: s_waitcnt vmcnt(0)
79 ; SI-NEXT: v_ffbh_u32_e32 v1, v0
80 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
81 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
82 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
85 ; VI-LABEL: v_ctlz_i32:
87 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
88 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
89 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
90 ; VI-NEXT: s_mov_b32 s7, 0xf000
91 ; VI-NEXT: s_mov_b32 s6, -1
92 ; VI-NEXT: s_waitcnt lgkmcnt(0)
93 ; VI-NEXT: v_mov_b32_e32 v1, s1
94 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
95 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
96 ; VI-NEXT: flat_load_dword v0, v[0:1]
97 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
98 ; VI-NEXT: v_ffbh_u32_e32 v1, v0
99 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
100 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
101 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
104 ; EG-LABEL: v_ctlz_i32:
106 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
108 ; EG-NEXT: ALU 3, @11, KC0[CB0:0-32], KC1[]
109 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
112 ; EG-NEXT: Fetch clause starting at 6:
113 ; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
114 ; EG-NEXT: ALU clause starting at 8:
115 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
116 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
117 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
118 ; EG-NEXT: ALU clause starting at 11:
119 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
120 ; EG-NEXT: CNDE_INT T0.X, T0.X, literal.x, PV.W,
121 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
122 ; EG-NEXT: 32(4.484155e-44), 2(2.802597e-45)
123 %tid = call i32 @llvm.r600.read.tidig.x()
124 %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
125 %val = load i32, i32 addrspace(1)* %in.gep, align 4
126 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
127 store i32 %ctlz, i32 addrspace(1)* %out, align 4
131 define amdgpu_kernel void @v_ctlz_v2i32(<2 x i32> addrspace(1)* noalias %out, <2 x i32> addrspace(1)* noalias %valptr) nounwind {
132 ; SI-LABEL: v_ctlz_v2i32:
134 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
135 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
136 ; SI-NEXT: s_mov_b32 s7, 0xf000
137 ; SI-NEXT: s_mov_b32 s10, 0
138 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
139 ; SI-NEXT: v_mov_b32_e32 v1, 0
140 ; SI-NEXT: s_mov_b32 s11, s7
141 ; SI-NEXT: s_waitcnt lgkmcnt(0)
142 ; SI-NEXT: buffer_load_dwordx2 v[0:1], v[0:1], s[8:11], 0 addr64
143 ; SI-NEXT: s_mov_b32 s6, -1
144 ; SI-NEXT: s_waitcnt vmcnt(0)
145 ; SI-NEXT: v_ffbh_u32_e32 v2, v1
146 ; SI-NEXT: v_ffbh_u32_e32 v3, v0
147 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
148 ; SI-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
149 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
150 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v3, vcc
151 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
154 ; VI-LABEL: v_ctlz_v2i32:
156 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
157 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
158 ; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
159 ; VI-NEXT: s_mov_b32 s7, 0xf000
160 ; VI-NEXT: s_mov_b32 s6, -1
161 ; VI-NEXT: s_waitcnt lgkmcnt(0)
162 ; VI-NEXT: v_mov_b32_e32 v1, s1
163 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
164 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
165 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
166 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
167 ; VI-NEXT: v_ffbh_u32_e32 v2, v1
168 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
169 ; VI-NEXT: v_cndmask_b32_e32 v1, 32, v2, vcc
170 ; VI-NEXT: v_ffbh_u32_e32 v3, v0
171 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
172 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v3, vcc
173 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
176 ; EG-LABEL: v_ctlz_v2i32:
178 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
180 ; EG-NEXT: ALU 6, @11, KC0[CB0:0-32], KC1[]
181 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
184 ; EG-NEXT: Fetch clause starting at 6:
185 ; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
186 ; EG-NEXT: ALU clause starting at 8:
187 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
188 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
189 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
190 ; EG-NEXT: ALU clause starting at 11:
191 ; EG-NEXT: FFBH_UINT * T0.W, T0.Y,
192 ; EG-NEXT: CNDE_INT T0.Y, T0.Y, literal.x, PV.W,
193 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
194 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
195 ; EG-NEXT: CNDE_INT T0.X, T0.X, literal.x, PV.W,
196 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
197 ; EG-NEXT: 32(4.484155e-44), 2(2.802597e-45)
198 %tid = call i32 @llvm.r600.read.tidig.x()
199 %in.gep = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %valptr, i32 %tid
200 %val = load <2 x i32>, <2 x i32> addrspace(1)* %in.gep, align 8
201 %ctlz = call <2 x i32> @llvm.ctlz.v2i32(<2 x i32> %val, i1 false) nounwind readnone
202 store <2 x i32> %ctlz, <2 x i32> addrspace(1)* %out, align 8
206 define amdgpu_kernel void @v_ctlz_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %valptr) nounwind {
207 ; SI-LABEL: v_ctlz_v4i32:
209 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
210 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
211 ; SI-NEXT: s_mov_b32 s7, 0xf000
212 ; SI-NEXT: s_mov_b32 s10, 0
213 ; SI-NEXT: v_lshlrev_b32_e32 v0, 4, v0
214 ; SI-NEXT: v_mov_b32_e32 v1, 0
215 ; SI-NEXT: s_mov_b32 s11, s7
216 ; SI-NEXT: s_waitcnt lgkmcnt(0)
217 ; SI-NEXT: buffer_load_dwordx4 v[0:3], v[0:1], s[8:11], 0 addr64
218 ; SI-NEXT: s_mov_b32 s6, -1
219 ; SI-NEXT: s_waitcnt vmcnt(0)
220 ; SI-NEXT: v_ffbh_u32_e32 v4, v3
221 ; SI-NEXT: v_ffbh_u32_e32 v5, v2
222 ; SI-NEXT: v_ffbh_u32_e32 v6, v1
223 ; SI-NEXT: v_ffbh_u32_e32 v7, v0
224 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
225 ; SI-NEXT: v_cndmask_b32_e32 v3, 32, v4, vcc
226 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
227 ; SI-NEXT: v_cndmask_b32_e32 v2, 32, v5, vcc
228 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
229 ; SI-NEXT: v_cndmask_b32_e32 v1, 32, v6, vcc
230 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
231 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v7, vcc
232 ; SI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
235 ; VI-LABEL: v_ctlz_v4i32:
237 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
238 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
239 ; VI-NEXT: v_lshlrev_b32_e32 v0, 4, v0
240 ; VI-NEXT: s_mov_b32 s7, 0xf000
241 ; VI-NEXT: s_mov_b32 s6, -1
242 ; VI-NEXT: s_waitcnt lgkmcnt(0)
243 ; VI-NEXT: v_mov_b32_e32 v1, s1
244 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
245 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
246 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
247 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
248 ; VI-NEXT: v_ffbh_u32_e32 v4, v3
249 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
250 ; VI-NEXT: v_cndmask_b32_e32 v3, 32, v4, vcc
251 ; VI-NEXT: v_ffbh_u32_e32 v5, v2
252 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
253 ; VI-NEXT: v_cndmask_b32_e32 v2, 32, v5, vcc
254 ; VI-NEXT: v_ffbh_u32_e32 v6, v1
255 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
256 ; VI-NEXT: v_cndmask_b32_e32 v1, 32, v6, vcc
257 ; VI-NEXT: v_ffbh_u32_e32 v7, v0
258 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
259 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v7, vcc
260 ; VI-NEXT: buffer_store_dwordx4 v[0:3], off, s[4:7], 0
263 ; EG-LABEL: v_ctlz_v4i32:
265 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
267 ; EG-NEXT: ALU 12, @11, KC0[CB0:0-32], KC1[]
268 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XYZW, T1.X, 1
271 ; EG-NEXT: Fetch clause starting at 6:
272 ; EG-NEXT: VTX_READ_128 T0.XYZW, T0.X, 0, #1
273 ; EG-NEXT: ALU clause starting at 8:
274 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
275 ; EG-NEXT: 4(5.605194e-45), 0(0.000000e+00)
276 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
277 ; EG-NEXT: ALU clause starting at 11:
278 ; EG-NEXT: FFBH_UINT * T1.W, T0.W,
279 ; EG-NEXT: FFBH_UINT T2.W, T0.Z,
280 ; EG-NEXT: CNDE_INT * T0.W, T0.W, literal.x, PV.W, BS:VEC_021/SCL_122
281 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
282 ; EG-NEXT: CNDE_INT T0.Z, T0.Z, literal.x, PV.W,
283 ; EG-NEXT: FFBH_UINT * T1.W, T0.Y,
284 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
285 ; EG-NEXT: CNDE_INT T0.Y, T0.Y, literal.x, PV.W,
286 ; EG-NEXT: FFBH_UINT * T1.W, T0.X,
287 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
288 ; EG-NEXT: CNDE_INT T0.X, T0.X, literal.x, PV.W,
289 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
290 ; EG-NEXT: 32(4.484155e-44), 2(2.802597e-45)
291 %tid = call i32 @llvm.r600.read.tidig.x()
292 %in.gep = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %valptr, i32 %tid
293 %val = load <4 x i32>, <4 x i32> addrspace(1)* %in.gep, align 16
294 %ctlz = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %val, i1 false) nounwind readnone
295 store <4 x i32> %ctlz, <4 x i32> addrspace(1)* %out, align 16
299 define amdgpu_kernel void @v_ctlz_i8(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
300 ; SI-LABEL: v_ctlz_i8:
302 ; SI-NEXT: s_mov_b32 s3, 0xf000
303 ; SI-NEXT: s_mov_b32 s2, -1
304 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
305 ; SI-NEXT: s_mov_b32 s6, s2
306 ; SI-NEXT: s_mov_b32 s7, s3
307 ; SI-NEXT: s_waitcnt lgkmcnt(0)
308 ; SI-NEXT: buffer_load_ubyte v0, off, s[4:7], 0
309 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
310 ; SI-NEXT: s_waitcnt vmcnt(0)
311 ; SI-NEXT: v_ffbh_u32_e32 v1, v0
312 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
313 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
314 ; SI-NEXT: v_subrev_i32_e32 v0, vcc, 24, v0
315 ; SI-NEXT: s_waitcnt lgkmcnt(0)
316 ; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
319 ; VI-LABEL: v_ctlz_i8:
321 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
322 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
323 ; VI-NEXT: s_mov_b32 s7, 0xf000
324 ; VI-NEXT: s_mov_b32 s6, -1
325 ; VI-NEXT: s_mov_b32 s2, s6
326 ; VI-NEXT: s_mov_b32 s3, s7
327 ; VI-NEXT: s_waitcnt lgkmcnt(0)
328 ; VI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0
329 ; VI-NEXT: s_waitcnt vmcnt(0)
330 ; VI-NEXT: v_ffbh_u32_sdwa v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0
331 ; VI-NEXT: v_cmp_ne_u16_e32 vcc, 0, v0
332 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
333 ; VI-NEXT: v_add_u32_e32 v0, vcc, -16, v0
334 ; VI-NEXT: v_add_u16_e32 v0, -8, v0
335 ; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
338 ; EG-LABEL: v_ctlz_i8:
340 ; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
342 ; EG-NEXT: ALU 15, @9, KC0[CB0:0-32], KC1[]
343 ; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
346 ; EG-NEXT: Fetch clause starting at 6:
347 ; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
348 ; EG-NEXT: ALU clause starting at 8:
349 ; EG-NEXT: MOV * T0.X, KC0[2].Z,
350 ; EG-NEXT: ALU clause starting at 9:
351 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
352 ; EG-NEXT: CNDE_INT T0.W, T0.X, literal.x, PV.W,
353 ; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.y,
354 ; EG-NEXT: 32(4.484155e-44), 3(4.203895e-45)
355 ; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
356 ; EG-NEXT: -24(nan), 0(0.000000e+00)
357 ; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
358 ; EG-NEXT: LSHL * T1.W, T1.W, literal.y,
359 ; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
360 ; EG-NEXT: LSHL T0.X, PV.W, PS,
361 ; EG-NEXT: LSHL * T0.W, literal.x, PS,
362 ; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
363 ; EG-NEXT: MOV T0.Y, 0.0,
364 ; EG-NEXT: MOV * T0.Z, 0.0,
365 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
366 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
367 %val = load i8, i8 addrspace(1)* %valptr
368 %ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 false) nounwind readnone
369 store i8 %ctlz, i8 addrspace(1)* %out
373 define amdgpu_kernel void @s_ctlz_i64(i64 addrspace(1)* noalias %out, [8 x i32], i64 %val) nounwind {
374 ; SI-LABEL: s_ctlz_i64:
376 ; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x13
377 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
378 ; SI-NEXT: s_mov_b32 s7, 0xf000
379 ; SI-NEXT: s_mov_b32 s6, -1
380 ; SI-NEXT: s_waitcnt lgkmcnt(0)
381 ; SI-NEXT: s_flbit_i32_b32 s0, s2
382 ; SI-NEXT: s_flbit_i32_b32 s1, s3
383 ; SI-NEXT: s_add_i32 s0, s0, 32
384 ; SI-NEXT: s_or_b32 s2, s2, s3
385 ; SI-NEXT: v_mov_b32_e32 v0, s1
386 ; SI-NEXT: v_mov_b32_e32 v1, s0
387 ; SI-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0
388 ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
389 ; SI-NEXT: v_cmp_ne_u32_e64 vcc, s2, 0
390 ; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
391 ; SI-NEXT: v_mov_b32_e32 v1, 0
392 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
395 ; VI-LABEL: s_ctlz_i64:
397 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
398 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x4c
399 ; VI-NEXT: s_mov_b32 s7, 0xf000
400 ; VI-NEXT: s_mov_b32 s6, -1
401 ; VI-NEXT: s_waitcnt lgkmcnt(0)
402 ; VI-NEXT: s_flbit_i32_b32 s2, s0
403 ; VI-NEXT: s_flbit_i32_b32 s3, s1
404 ; VI-NEXT: s_add_i32 s2, s2, 32
405 ; VI-NEXT: v_mov_b32_e32 v0, s3
406 ; VI-NEXT: v_mov_b32_e32 v1, s2
407 ; VI-NEXT: v_cmp_eq_u32_e64 vcc, s1, 0
408 ; VI-NEXT: s_or_b32 s0, s0, s1
409 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
410 ; VI-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0
411 ; VI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
412 ; VI-NEXT: v_mov_b32_e32 v1, 0
413 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
416 ; EG-LABEL: s_ctlz_i64:
418 ; EG-NEXT: ALU 9, @4, KC0[CB0:0-32], KC1[]
419 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
422 ; EG-NEXT: ALU clause starting at 4:
423 ; EG-NEXT: FFBH_UINT * T0.W, KC0[4].W,
424 ; EG-NEXT: CNDE_INT * T0.W, KC0[4].W, literal.x, PV.W,
425 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
426 ; EG-NEXT: FFBH_UINT T1.W, KC0[5].X,
427 ; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
428 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
429 ; EG-NEXT: CNDE_INT T0.X, KC0[5].X, PS, PV.W,
430 ; EG-NEXT: MOV T0.Y, 0.0,
431 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
432 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
433 %ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
434 store i64 %ctlz, i64 addrspace(1)* %out
438 define amdgpu_kernel void @s_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 %val) nounwind {
439 ; SI-LABEL: s_ctlz_i64_trunc:
441 ; SI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0xb
442 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
443 ; SI-NEXT: s_mov_b32 s7, 0xf000
444 ; SI-NEXT: s_mov_b32 s6, -1
445 ; SI-NEXT: s_waitcnt lgkmcnt(0)
446 ; SI-NEXT: s_flbit_i32_b32 s0, s2
447 ; SI-NEXT: s_flbit_i32_b32 s1, s3
448 ; SI-NEXT: s_add_i32 s0, s0, 32
449 ; SI-NEXT: s_or_b32 s2, s2, s3
450 ; SI-NEXT: v_mov_b32_e32 v0, s1
451 ; SI-NEXT: v_mov_b32_e32 v1, s0
452 ; SI-NEXT: v_cmp_eq_u32_e64 vcc, s3, 0
453 ; SI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
454 ; SI-NEXT: v_cmp_ne_u32_e64 vcc, s2, 0
455 ; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
456 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
459 ; VI-LABEL: s_ctlz_i64_trunc:
461 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
462 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
463 ; VI-NEXT: s_mov_b32 s7, 0xf000
464 ; VI-NEXT: s_mov_b32 s6, -1
465 ; VI-NEXT: s_waitcnt lgkmcnt(0)
466 ; VI-NEXT: s_flbit_i32_b32 s2, s0
467 ; VI-NEXT: s_flbit_i32_b32 s3, s1
468 ; VI-NEXT: s_add_i32 s2, s2, 32
469 ; VI-NEXT: v_mov_b32_e32 v0, s3
470 ; VI-NEXT: v_mov_b32_e32 v1, s2
471 ; VI-NEXT: v_cmp_eq_u32_e64 vcc, s1, 0
472 ; VI-NEXT: s_or_b32 s0, s0, s1
473 ; VI-NEXT: v_cndmask_b32_e32 v0, v0, v1, vcc
474 ; VI-NEXT: v_cmp_ne_u32_e64 vcc, s0, 0
475 ; VI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
476 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
479 ; EG-LABEL: s_ctlz_i64_trunc:
481 ; EG-NEXT: ALU 8, @4, KC0[CB0:0-32], KC1[]
482 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
485 ; EG-NEXT: ALU clause starting at 4:
486 ; EG-NEXT: FFBH_UINT * T0.W, KC0[2].W,
487 ; EG-NEXT: CNDE_INT * T0.W, KC0[2].W, literal.x, PV.W,
488 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
489 ; EG-NEXT: FFBH_UINT T1.W, KC0[3].X,
490 ; EG-NEXT: ADD_INT * T0.W, PV.W, literal.x,
491 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
492 ; EG-NEXT: CNDE_INT T0.X, KC0[3].X, PS, PV.W,
493 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
494 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
495 %ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
496 %trunc = trunc i64 %ctlz to i32
497 store i32 %trunc, i32 addrspace(1)* %out
501 define amdgpu_kernel void @v_ctlz_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
502 ; SI-LABEL: v_ctlz_i64:
504 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x9
505 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
506 ; SI-NEXT: s_mov_b32 s7, 0xf000
507 ; SI-NEXT: s_mov_b32 s6, 0
508 ; SI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
509 ; SI-NEXT: v_mov_b32_e32 v1, 0
510 ; SI-NEXT: s_waitcnt lgkmcnt(0)
511 ; SI-NEXT: buffer_load_dwordx2 v[2:3], v[0:1], s[4:7], 0 addr64
512 ; SI-NEXT: s_mov_b64 s[10:11], s[6:7]
513 ; SI-NEXT: s_waitcnt vmcnt(0)
514 ; SI-NEXT: v_ffbh_u32_e32 v4, v2
515 ; SI-NEXT: v_ffbh_u32_e32 v5, v3
516 ; SI-NEXT: v_or_b32_e32 v2, v2, v3
517 ; SI-NEXT: v_add_i32_e32 v4, vcc, 32, v4
518 ; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
519 ; SI-NEXT: v_cndmask_b32_e32 v3, v5, v4, vcc
520 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
521 ; SI-NEXT: v_cndmask_b32_e32 v2, 64, v3, vcc
522 ; SI-NEXT: v_mov_b32_e32 v3, v1
523 ; SI-NEXT: buffer_store_dwordx2 v[2:3], v[0:1], s[8:11], 0 addr64
526 ; VI-LABEL: v_ctlz_i64:
528 ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
529 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
530 ; VI-NEXT: v_lshlrev_b32_e32 v0, 3, v0
531 ; VI-NEXT: v_mov_b32_e32 v5, 0
532 ; VI-NEXT: v_mov_b32_e32 v1, 0
533 ; VI-NEXT: s_waitcnt lgkmcnt(0)
534 ; VI-NEXT: v_mov_b32_e32 v6, s3
535 ; VI-NEXT: v_mov_b32_e32 v3, s1
536 ; VI-NEXT: v_add_u32_e32 v2, vcc, s0, v0
537 ; VI-NEXT: v_addc_u32_e32 v3, vcc, v3, v5, vcc
538 ; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
539 ; VI-NEXT: v_add_u32_e32 v4, vcc, s2, v0
540 ; VI-NEXT: v_addc_u32_e32 v5, vcc, v6, v5, vcc
541 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
542 ; VI-NEXT: v_ffbh_u32_e32 v0, v2
543 ; VI-NEXT: v_add_u32_e32 v0, vcc, 32, v0
544 ; VI-NEXT: v_ffbh_u32_e32 v6, v3
545 ; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v3
546 ; VI-NEXT: v_or_b32_e32 v2, v2, v3
547 ; VI-NEXT: v_cndmask_b32_e32 v0, v6, v0, vcc
548 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
549 ; VI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
550 ; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
553 ; EG-LABEL: v_ctlz_i64:
555 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
557 ; EG-NEXT: ALU 10, @11, KC0[CB0:0-32], KC1[]
558 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.XY, T1.X, 1
561 ; EG-NEXT: Fetch clause starting at 6:
562 ; EG-NEXT: VTX_READ_64 T0.XY, T0.X, 0, #1
563 ; EG-NEXT: ALU clause starting at 8:
564 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
565 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
566 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
567 ; EG-NEXT: ALU clause starting at 11:
568 ; EG-NEXT: FFBH_UINT * T1.W, T0.X,
569 ; EG-NEXT: CNDE_INT * T1.W, T0.X, literal.x, PV.W,
570 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
571 ; EG-NEXT: FFBH_UINT T2.W, T0.Y,
572 ; EG-NEXT: ADD_INT * T1.W, PV.W, literal.x,
573 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
574 ; EG-NEXT: CNDE_INT T0.X, T0.Y, PS, PV.W,
575 ; EG-NEXT: MOV T0.Y, 0.0,
576 ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, T0.W,
577 ; EG-NEXT: LSHR * T1.X, PV.W, literal.x,
578 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
579 %tid = call i32 @llvm.r600.read.tidig.x()
580 %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
581 %out.gep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
582 %val = load i64, i64 addrspace(1)* %in.gep
583 %ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
584 store i64 %ctlz, i64 addrspace(1)* %out.gep
588 define amdgpu_kernel void @v_ctlz_i64_trunc(i32 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %in) nounwind {
589 ; SI-LABEL: v_ctlz_i64_trunc:
591 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0x9
592 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
593 ; SI-NEXT: s_mov_b32 s7, 0xf000
594 ; SI-NEXT: s_mov_b32 s6, 0
595 ; SI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
596 ; SI-NEXT: v_mov_b32_e32 v2, 0
597 ; SI-NEXT: s_waitcnt lgkmcnt(0)
598 ; SI-NEXT: buffer_load_dwordx2 v[3:4], v[1:2], s[4:7], 0 addr64
599 ; SI-NEXT: v_lshlrev_b32_e32 v1, 2, v0
600 ; SI-NEXT: s_mov_b64 s[10:11], s[6:7]
601 ; SI-NEXT: s_waitcnt vmcnt(0)
602 ; SI-NEXT: v_ffbh_u32_e32 v0, v3
603 ; SI-NEXT: v_ffbh_u32_e32 v5, v4
604 ; SI-NEXT: v_or_b32_e32 v3, v3, v4
605 ; SI-NEXT: v_add_i32_e32 v0, vcc, 32, v0
606 ; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
607 ; SI-NEXT: v_cndmask_b32_e32 v0, v5, v0, vcc
608 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
609 ; SI-NEXT: v_cndmask_b32_e32 v0, 64, v0, vcc
610 ; SI-NEXT: buffer_store_dword v0, v[1:2], s[8:11], 0 addr64
613 ; VI-LABEL: v_ctlz_i64_trunc:
615 ; VI-NEXT: s_load_dwordx2 s[2:3], s[0:1], 0x24
616 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
617 ; VI-NEXT: v_lshlrev_b32_e32 v1, 3, v0
618 ; VI-NEXT: v_lshlrev_b32_e32 v2, 2, v0
619 ; VI-NEXT: v_mov_b32_e32 v3, 0
620 ; VI-NEXT: s_waitcnt lgkmcnt(0)
621 ; VI-NEXT: v_mov_b32_e32 v4, s3
622 ; VI-NEXT: v_mov_b32_e32 v5, s1
623 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v1
624 ; VI-NEXT: v_addc_u32_e32 v1, vcc, v5, v3, vcc
625 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
626 ; VI-NEXT: v_add_u32_e32 v2, vcc, s2, v2
627 ; VI-NEXT: v_addc_u32_e32 v3, vcc, v4, v3, vcc
628 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
629 ; VI-NEXT: v_ffbh_u32_e32 v4, v0
630 ; VI-NEXT: v_add_u32_e32 v4, vcc, 32, v4
631 ; VI-NEXT: v_ffbh_u32_e32 v5, v1
632 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
633 ; VI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v1
634 ; VI-NEXT: v_cndmask_b32_e32 v1, v5, v4, vcc
635 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
636 ; VI-NEXT: v_cndmask_b32_e32 v0, 64, v1, vcc
637 ; VI-NEXT: flat_store_dword v[2:3], v0
640 ; EG-LABEL: v_ctlz_i64_trunc:
642 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
644 ; EG-NEXT: ALU 10, @11, KC0[CB0:0-32], KC1[]
645 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
648 ; EG-NEXT: Fetch clause starting at 6:
649 ; EG-NEXT: VTX_READ_64 T1.XY, T1.X, 0, #1
650 ; EG-NEXT: ALU clause starting at 8:
651 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
652 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
653 ; EG-NEXT: ADD_INT * T1.X, KC0[2].Z, PV.W,
654 ; EG-NEXT: ALU clause starting at 11:
655 ; EG-NEXT: FFBH_UINT * T0.W, T1.X,
656 ; EG-NEXT: CNDE_INT * T0.W, T1.X, literal.x, PV.W,
657 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
658 ; EG-NEXT: LSHL T0.Z, T0.X, literal.x,
659 ; EG-NEXT: FFBH_UINT T1.W, T1.Y,
660 ; EG-NEXT: ADD_INT * T0.W, PV.W, literal.y,
661 ; EG-NEXT: 2(2.802597e-45), 32(4.484155e-44)
662 ; EG-NEXT: CNDE_INT T0.X, T1.Y, PS, PV.W,
663 ; EG-NEXT: ADD_INT * T0.W, KC0[2].Y, PV.Z,
664 ; EG-NEXT: LSHR * T1.X, PV.W, literal.x,
665 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
666 %tid = call i32 @llvm.r600.read.tidig.x()
667 %in.gep = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
668 %out.gep = getelementptr i32, i32 addrspace(1)* %out, i32 %tid
669 %val = load i64, i64 addrspace(1)* %in.gep
670 %ctlz = call i64 @llvm.ctlz.i64(i64 %val, i1 false)
671 %trunc = trunc i64 %ctlz to i32
672 store i32 %trunc, i32 addrspace(1)* %out.gep
676 define amdgpu_kernel void @v_ctlz_i32_sel_eq_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
677 ; SI-LABEL: v_ctlz_i32_sel_eq_neg1:
679 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
680 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
681 ; SI-NEXT: s_mov_b32 s7, 0xf000
682 ; SI-NEXT: s_mov_b32 s10, 0
683 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
684 ; SI-NEXT: v_mov_b32_e32 v1, 0
685 ; SI-NEXT: s_mov_b32 s11, s7
686 ; SI-NEXT: s_waitcnt lgkmcnt(0)
687 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
688 ; SI-NEXT: s_mov_b32 s6, -1
689 ; SI-NEXT: s_waitcnt vmcnt(0)
690 ; SI-NEXT: v_ffbh_u32_e32 v0, v0
691 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
694 ; VI-LABEL: v_ctlz_i32_sel_eq_neg1:
696 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
697 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
698 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
699 ; VI-NEXT: s_mov_b32 s7, 0xf000
700 ; VI-NEXT: s_mov_b32 s6, -1
701 ; VI-NEXT: s_waitcnt lgkmcnt(0)
702 ; VI-NEXT: v_mov_b32_e32 v1, s1
703 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
704 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
705 ; VI-NEXT: flat_load_dword v0, v[0:1]
706 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
707 ; VI-NEXT: v_ffbh_u32_e32 v0, v0
708 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
711 ; EG-LABEL: v_ctlz_i32_sel_eq_neg1:
713 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
715 ; EG-NEXT: ALU 5, @11, KC0[CB0:0-32], KC1[]
716 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
719 ; EG-NEXT: Fetch clause starting at 6:
720 ; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
721 ; EG-NEXT: ALU clause starting at 8:
722 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
723 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
724 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
725 ; EG-NEXT: ALU clause starting at 11:
726 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
727 ; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
728 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
729 ; EG-NEXT: CNDE_INT T0.X, T0.X, literal.x, PV.W,
730 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
731 ; EG-NEXT: -1(nan), 2(2.802597e-45)
732 %tid = call i32 @llvm.r600.read.tidig.x()
733 %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
734 %val = load i32, i32 addrspace(1)* %in.gep
735 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
736 %cmp = icmp eq i32 %val, 0
737 %sel = select i1 %cmp, i32 -1, i32 %ctlz
738 store i32 %sel, i32 addrspace(1)* %out
742 define amdgpu_kernel void @v_ctlz_i32_sel_ne_neg1(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
743 ; SI-LABEL: v_ctlz_i32_sel_ne_neg1:
745 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
746 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
747 ; SI-NEXT: s_mov_b32 s7, 0xf000
748 ; SI-NEXT: s_mov_b32 s10, 0
749 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
750 ; SI-NEXT: v_mov_b32_e32 v1, 0
751 ; SI-NEXT: s_mov_b32 s11, s7
752 ; SI-NEXT: s_waitcnt lgkmcnt(0)
753 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
754 ; SI-NEXT: s_mov_b32 s6, -1
755 ; SI-NEXT: s_waitcnt vmcnt(0)
756 ; SI-NEXT: v_ffbh_u32_e32 v0, v0
757 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
760 ; VI-LABEL: v_ctlz_i32_sel_ne_neg1:
762 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
763 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
764 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
765 ; VI-NEXT: s_mov_b32 s7, 0xf000
766 ; VI-NEXT: s_mov_b32 s6, -1
767 ; VI-NEXT: s_waitcnt lgkmcnt(0)
768 ; VI-NEXT: v_mov_b32_e32 v1, s1
769 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
770 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
771 ; VI-NEXT: flat_load_dword v0, v[0:1]
772 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
773 ; VI-NEXT: v_ffbh_u32_e32 v0, v0
774 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
777 ; EG-LABEL: v_ctlz_i32_sel_ne_neg1:
779 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
781 ; EG-NEXT: ALU 5, @11, KC0[CB0:0-32], KC1[]
782 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
785 ; EG-NEXT: Fetch clause starting at 6:
786 ; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
787 ; EG-NEXT: ALU clause starting at 8:
788 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
789 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
790 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
791 ; EG-NEXT: ALU clause starting at 11:
792 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
793 ; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
794 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
795 ; EG-NEXT: CNDE_INT T0.X, T0.X, literal.x, PV.W,
796 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
797 ; EG-NEXT: -1(nan), 2(2.802597e-45)
798 %tid = call i32 @llvm.r600.read.tidig.x()
799 %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
800 %val = load i32, i32 addrspace(1)* %in.gep
801 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
802 %cmp = icmp ne i32 %val, 0
803 %sel = select i1 %cmp, i32 %ctlz, i32 -1
804 store i32 %sel, i32 addrspace(1)* %out
808 ; TODO: Should be able to eliminate select here as well.
809 define amdgpu_kernel void @v_ctlz_i32_sel_eq_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
810 ; SI-LABEL: v_ctlz_i32_sel_eq_bitwidth:
812 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
813 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
814 ; SI-NEXT: s_mov_b32 s7, 0xf000
815 ; SI-NEXT: s_mov_b32 s10, 0
816 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
817 ; SI-NEXT: v_mov_b32_e32 v1, 0
818 ; SI-NEXT: s_mov_b32 s11, s7
819 ; SI-NEXT: s_waitcnt lgkmcnt(0)
820 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
821 ; SI-NEXT: s_mov_b32 s6, -1
822 ; SI-NEXT: s_waitcnt vmcnt(0)
823 ; SI-NEXT: v_ffbh_u32_e32 v1, v0
824 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
825 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
826 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 32, v0
827 ; SI-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
828 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
831 ; VI-LABEL: v_ctlz_i32_sel_eq_bitwidth:
833 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
834 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
835 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
836 ; VI-NEXT: s_mov_b32 s7, 0xf000
837 ; VI-NEXT: s_mov_b32 s6, -1
838 ; VI-NEXT: s_waitcnt lgkmcnt(0)
839 ; VI-NEXT: v_mov_b32_e32 v1, s1
840 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
841 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
842 ; VI-NEXT: flat_load_dword v0, v[0:1]
843 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
844 ; VI-NEXT: v_ffbh_u32_e32 v1, v0
845 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
846 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
847 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 32, v0
848 ; VI-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
849 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
852 ; EG-LABEL: v_ctlz_i32_sel_eq_bitwidth:
854 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
856 ; EG-NEXT: ALU 7, @11, KC0[CB0:0-32], KC1[]
857 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
860 ; EG-NEXT: Fetch clause starting at 6:
861 ; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
862 ; EG-NEXT: ALU clause starting at 8:
863 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
864 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
865 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
866 ; EG-NEXT: ALU clause starting at 11:
867 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
868 ; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
869 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
870 ; EG-NEXT: SETE_INT * T1.W, PV.W, literal.x,
871 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
872 ; EG-NEXT: CNDE_INT T0.X, PV.W, T0.W, literal.x,
873 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
874 ; EG-NEXT: -1(nan), 2(2.802597e-45)
875 %tid = call i32 @llvm.r600.read.tidig.x()
876 %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
877 %val = load i32, i32 addrspace(1)* %in.gep
878 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
879 %cmp = icmp eq i32 %ctlz, 32
880 %sel = select i1 %cmp, i32 -1, i32 %ctlz
881 store i32 %sel, i32 addrspace(1)* %out
885 define amdgpu_kernel void @v_ctlz_i32_sel_ne_bitwidth(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %valptr) nounwind {
886 ; SI-LABEL: v_ctlz_i32_sel_ne_bitwidth:
888 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
889 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
890 ; SI-NEXT: s_mov_b32 s7, 0xf000
891 ; SI-NEXT: s_mov_b32 s10, 0
892 ; SI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
893 ; SI-NEXT: v_mov_b32_e32 v1, 0
894 ; SI-NEXT: s_mov_b32 s11, s7
895 ; SI-NEXT: s_waitcnt lgkmcnt(0)
896 ; SI-NEXT: buffer_load_dword v0, v[0:1], s[8:11], 0 addr64
897 ; SI-NEXT: s_mov_b32 s6, -1
898 ; SI-NEXT: s_waitcnt vmcnt(0)
899 ; SI-NEXT: v_ffbh_u32_e32 v1, v0
900 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
901 ; SI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
902 ; SI-NEXT: v_cmp_ne_u32_e32 vcc, 32, v0
903 ; SI-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
904 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
907 ; VI-LABEL: v_ctlz_i32_sel_ne_bitwidth:
909 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
910 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
911 ; VI-NEXT: v_lshlrev_b32_e32 v0, 2, v0
912 ; VI-NEXT: s_mov_b32 s7, 0xf000
913 ; VI-NEXT: s_mov_b32 s6, -1
914 ; VI-NEXT: s_waitcnt lgkmcnt(0)
915 ; VI-NEXT: v_mov_b32_e32 v1, s1
916 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
917 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
918 ; VI-NEXT: flat_load_dword v0, v[0:1]
919 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
920 ; VI-NEXT: v_ffbh_u32_e32 v1, v0
921 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
922 ; VI-NEXT: v_cndmask_b32_e32 v0, 32, v1, vcc
923 ; VI-NEXT: v_cmp_ne_u32_e32 vcc, 32, v0
924 ; VI-NEXT: v_cndmask_b32_e32 v0, -1, v0, vcc
925 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
928 ; EG-LABEL: v_ctlz_i32_sel_ne_bitwidth:
930 ; EG-NEXT: ALU 2, @8, KC0[CB0:0-32], KC1[]
932 ; EG-NEXT: ALU 7, @11, KC0[CB0:0-32], KC1[]
933 ; EG-NEXT: MEM_RAT_CACHELESS STORE_RAW T0.X, T1.X, 1
936 ; EG-NEXT: Fetch clause starting at 6:
937 ; EG-NEXT: VTX_READ_32 T0.X, T0.X, 0, #1
938 ; EG-NEXT: ALU clause starting at 8:
939 ; EG-NEXT: LSHL * T0.W, T0.X, literal.x,
940 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
941 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, PV.W,
942 ; EG-NEXT: ALU clause starting at 11:
943 ; EG-NEXT: FFBH_UINT * T0.W, T0.X,
944 ; EG-NEXT: CNDE_INT * T0.W, T0.X, literal.x, PV.W,
945 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
946 ; EG-NEXT: SETNE_INT * T1.W, PV.W, literal.x,
947 ; EG-NEXT: 32(4.484155e-44), 0(0.000000e+00)
948 ; EG-NEXT: CNDE_INT T0.X, PV.W, literal.x, T0.W,
949 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.y,
950 ; EG-NEXT: -1(nan), 2(2.802597e-45)
951 %tid = call i32 @llvm.r600.read.tidig.x()
952 %in.gep = getelementptr i32, i32 addrspace(1)* %valptr, i32 %tid
953 %val = load i32, i32 addrspace(1)* %in.gep
954 %ctlz = call i32 @llvm.ctlz.i32(i32 %val, i1 false) nounwind readnone
955 %cmp = icmp ne i32 %ctlz, 32
956 %sel = select i1 %cmp, i32 %ctlz, i32 -1
957 store i32 %sel, i32 addrspace(1)* %out
961 define amdgpu_kernel void @v_ctlz_i8_sel_eq_neg1(i8 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %valptr) nounwind {
962 ; SI-LABEL: v_ctlz_i8_sel_eq_neg1:
964 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
965 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
966 ; SI-NEXT: s_mov_b32 s7, 0xf000
967 ; SI-NEXT: v_mov_b32_e32 v1, 0
968 ; SI-NEXT: s_mov_b32 s10, 0
969 ; SI-NEXT: s_mov_b32 s11, s7
970 ; SI-NEXT: s_waitcnt lgkmcnt(0)
971 ; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[8:11], 0 addr64
972 ; SI-NEXT: s_mov_b32 s6, -1
973 ; SI-NEXT: s_waitcnt vmcnt(0)
974 ; SI-NEXT: v_ffbh_u32_e32 v0, v0
975 ; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
978 ; VI-LABEL: v_ctlz_i8_sel_eq_neg1:
980 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
981 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
982 ; VI-NEXT: s_mov_b32 s7, 0xf000
983 ; VI-NEXT: s_mov_b32 s6, -1
984 ; VI-NEXT: s_waitcnt lgkmcnt(0)
985 ; VI-NEXT: v_mov_b32_e32 v1, s1
986 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
987 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
988 ; VI-NEXT: flat_load_ubyte v0, v[0:1]
989 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
990 ; VI-NEXT: v_ffbh_u32_e32 v0, v0
991 ; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
994 ; EG-LABEL: v_ctlz_i8_sel_eq_neg1:
996 ; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
998 ; EG-NEXT: ALU 12, @9, KC0[CB0:0-32], KC1[]
999 ; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
1002 ; EG-NEXT: Fetch clause starting at 6:
1003 ; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
1004 ; EG-NEXT: ALU clause starting at 8:
1005 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
1006 ; EG-NEXT: ALU clause starting at 9:
1007 ; EG-NEXT: FFBH_UINT T0.W, T0.X,
1008 ; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
1009 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
1010 ; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
1011 ; EG-NEXT: LSHL * T1.W, PS, literal.y,
1012 ; EG-NEXT: 255(3.573311e-43), 3(4.203895e-45)
1013 ; EG-NEXT: LSHL T0.X, PV.W, PS,
1014 ; EG-NEXT: LSHL * T0.W, literal.x, PS,
1015 ; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
1016 ; EG-NEXT: MOV T0.Y, 0.0,
1017 ; EG-NEXT: MOV * T0.Z, 0.0,
1018 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
1019 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
1020 %tid = call i32 @llvm.r600.read.tidig.x()
1021 %valptr.gep = getelementptr i8, i8 addrspace(1)* %valptr, i32 %tid
1022 %val = load i8, i8 addrspace(1)* %valptr.gep
1023 %ctlz = call i8 @llvm.ctlz.i8(i8 %val, i1 false) nounwind readnone
1024 %cmp = icmp eq i8 %val, 0
1025 %sel = select i1 %cmp, i8 -1, i8 %ctlz
1026 store i8 %sel, i8 addrspace(1)* %out
1030 define amdgpu_kernel void @v_ctlz_i16_sel_eq_neg1(i16 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %valptr) nounwind {
1031 ; SI-LABEL: v_ctlz_i16_sel_eq_neg1:
1033 ; SI-NEXT: s_mov_b32 s3, 0xf000
1034 ; SI-NEXT: s_mov_b32 s2, -1
1035 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0xb
1036 ; SI-NEXT: s_mov_b32 s6, s2
1037 ; SI-NEXT: s_mov_b32 s7, s3
1038 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1039 ; SI-NEXT: buffer_load_ushort v0, off, s[4:7], 0
1040 ; SI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x9
1041 ; SI-NEXT: s_waitcnt vmcnt(0)
1042 ; SI-NEXT: v_ffbh_u32_e32 v0, v0
1043 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1044 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
1047 ; VI-LABEL: v_ctlz_i16_sel_eq_neg1:
1049 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
1050 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
1051 ; VI-NEXT: s_mov_b32 s7, 0xf000
1052 ; VI-NEXT: s_mov_b32 s6, -1
1053 ; VI-NEXT: s_mov_b32 s2, s6
1054 ; VI-NEXT: s_mov_b32 s3, s7
1055 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1056 ; VI-NEXT: buffer_load_ushort v0, off, s[0:3], 0
1057 ; VI-NEXT: s_waitcnt vmcnt(0)
1058 ; VI-NEXT: v_ffbh_u32_e32 v1, v0
1059 ; VI-NEXT: v_cmp_ne_u32_e64 s[0:1], 0, v0
1060 ; VI-NEXT: v_cndmask_b32_e64 v0, 32, v1, s[0:1]
1061 ; VI-NEXT: v_add_u32_e32 v0, vcc, -16, v0
1062 ; VI-NEXT: v_mov_b32_e32 v1, 0xffff
1063 ; VI-NEXT: v_cndmask_b32_e64 v0, v1, v0, s[0:1]
1064 ; VI-NEXT: buffer_store_short v0, off, s[4:7], 0
1067 ; EG-LABEL: v_ctlz_i16_sel_eq_neg1:
1069 ; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
1071 ; EG-NEXT: ALU 12, @9, KC0[CB0:0-32], KC1[]
1072 ; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
1075 ; EG-NEXT: Fetch clause starting at 6:
1076 ; EG-NEXT: VTX_READ_16 T0.X, T0.X, 0, #1
1077 ; EG-NEXT: ALU clause starting at 8:
1078 ; EG-NEXT: MOV * T0.X, KC0[2].Z,
1079 ; EG-NEXT: ALU clause starting at 9:
1080 ; EG-NEXT: FFBH_UINT T0.W, T0.X,
1081 ; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
1082 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
1083 ; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
1084 ; EG-NEXT: LSHL * T1.W, PS, literal.y,
1085 ; EG-NEXT: 65535(9.183409e-41), 3(4.203895e-45)
1086 ; EG-NEXT: LSHL T0.X, PV.W, PS,
1087 ; EG-NEXT: LSHL * T0.W, literal.x, PS,
1088 ; EG-NEXT: 65535(9.183409e-41), 0(0.000000e+00)
1089 ; EG-NEXT: MOV T0.Y, 0.0,
1090 ; EG-NEXT: MOV * T0.Z, 0.0,
1091 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
1092 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
1093 %val = load i16, i16 addrspace(1)* %valptr
1094 %ctlz = call i16 @llvm.ctlz.i16(i16 %val, i1 false) nounwind readnone
1095 %cmp = icmp eq i16 %val, 0
1096 %sel = select i1 %cmp, i16 -1, i16 %ctlz
1097 store i16 %sel, i16 addrspace(1)* %out
1101 ; FIXME: Need to handle non-uniform case for function below (load without gep).
1102 define amdgpu_kernel void @v_ctlz_i7_sel_eq_neg1(i7 addrspace(1)* noalias %out, i7 addrspace(1)* noalias %valptr) nounwind {
1103 ; SI-LABEL: v_ctlz_i7_sel_eq_neg1:
1105 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
1106 ; SI-NEXT: s_load_dwordx2 s[8:9], s[0:1], 0xb
1107 ; SI-NEXT: s_mov_b32 s7, 0xf000
1108 ; SI-NEXT: v_mov_b32_e32 v1, 0
1109 ; SI-NEXT: s_mov_b32 s10, 0
1110 ; SI-NEXT: s_mov_b32 s11, s7
1111 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1112 ; SI-NEXT: buffer_load_ubyte v0, v[0:1], s[8:11], 0 addr64
1113 ; SI-NEXT: s_mov_b32 s6, -1
1114 ; SI-NEXT: s_waitcnt vmcnt(0)
1115 ; SI-NEXT: v_ffbh_u32_e32 v0, v0
1116 ; SI-NEXT: v_and_b32_e32 v0, 0x7f, v0
1117 ; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
1120 ; VI-LABEL: v_ctlz_i7_sel_eq_neg1:
1122 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
1123 ; VI-NEXT: s_load_dwordx2 s[0:1], s[0:1], 0x2c
1124 ; VI-NEXT: s_mov_b32 s7, 0xf000
1125 ; VI-NEXT: s_mov_b32 s6, -1
1126 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1127 ; VI-NEXT: v_mov_b32_e32 v1, s1
1128 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
1129 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
1130 ; VI-NEXT: flat_load_ubyte v0, v[0:1]
1131 ; VI-NEXT: s_waitcnt vmcnt(0) lgkmcnt(0)
1132 ; VI-NEXT: v_ffbh_u32_e32 v0, v0
1133 ; VI-NEXT: v_and_b32_e32 v0, 0x7f, v0
1134 ; VI-NEXT: buffer_store_byte v0, off, s[4:7], 0
1137 ; EG-LABEL: v_ctlz_i7_sel_eq_neg1:
1139 ; EG-NEXT: ALU 0, @8, KC0[CB0:0-32], KC1[]
1141 ; EG-NEXT: ALU 12, @9, KC0[CB0:0-32], KC1[]
1142 ; EG-NEXT: MEM_RAT MSKOR T0.XW, T1.X
1145 ; EG-NEXT: Fetch clause starting at 6:
1146 ; EG-NEXT: VTX_READ_8 T0.X, T0.X, 0, #1
1147 ; EG-NEXT: ALU clause starting at 8:
1148 ; EG-NEXT: ADD_INT * T0.X, KC0[2].Z, T0.X,
1149 ; EG-NEXT: ALU clause starting at 9:
1150 ; EG-NEXT: FFBH_UINT T0.W, T0.X,
1151 ; EG-NEXT: AND_INT * T1.W, KC0[2].Y, literal.x,
1152 ; EG-NEXT: 3(4.203895e-45), 0(0.000000e+00)
1153 ; EG-NEXT: AND_INT T0.W, PV.W, literal.x,
1154 ; EG-NEXT: LSHL * T1.W, PS, literal.y,
1155 ; EG-NEXT: 127(1.779649e-43), 3(4.203895e-45)
1156 ; EG-NEXT: LSHL T0.X, PV.W, PS,
1157 ; EG-NEXT: LSHL * T0.W, literal.x, PS,
1158 ; EG-NEXT: 255(3.573311e-43), 0(0.000000e+00)
1159 ; EG-NEXT: MOV T0.Y, 0.0,
1160 ; EG-NEXT: MOV * T0.Z, 0.0,
1161 ; EG-NEXT: LSHR * T1.X, KC0[2].Y, literal.x,
1162 ; EG-NEXT: 2(2.802597e-45), 0(0.000000e+00)
1163 %tid = call i32 @llvm.r600.read.tidig.x()
1164 %valptr.gep = getelementptr i7, i7 addrspace(1)* %valptr, i32 %tid
1165 %val = load i7, i7 addrspace(1)* %valptr.gep
1166 %ctlz = call i7 @llvm.ctlz.i7(i7 %val, i1 false) nounwind readnone
1167 %cmp = icmp eq i7 %val, 0
1168 %sel = select i1 %cmp, i7 -1, i7 %ctlz
1169 store i7 %sel, i7 addrspace(1)* %out