1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=SI %s
3 ; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=VI %s
4 ; RUN: llc -amdgpu-codegenprepare-widen-constant-loads=0 -mtriple=amdgcn -mcpu=gfx1100 -verify-machineinstrs < %s | FileCheck -enable-var-scope --check-prefix=GFX11 %s
6 define amdgpu_kernel void @widen_i16_constant_load(ptr addrspace(4) %arg) {
7 ; SI-LABEL: widen_i16_constant_load:
9 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
10 ; SI-NEXT: s_mov_b32 s3, 0xf000
11 ; SI-NEXT: s_mov_b32 s2, -1
12 ; SI-NEXT: s_waitcnt lgkmcnt(0)
13 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
14 ; SI-NEXT: s_mov_b32 s0, 0
15 ; SI-NEXT: s_waitcnt lgkmcnt(0)
16 ; SI-NEXT: s_addk_i32 s1, 0x3e7
17 ; SI-NEXT: s_or_b32 s4, s1, 4
18 ; SI-NEXT: s_mov_b32 s1, s0
19 ; SI-NEXT: v_mov_b32_e32 v0, s4
20 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
23 ; VI-LABEL: widen_i16_constant_load:
25 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
26 ; VI-NEXT: v_mov_b32_e32 v0, 0
27 ; VI-NEXT: v_mov_b32_e32 v1, 0
28 ; VI-NEXT: s_waitcnt lgkmcnt(0)
29 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
30 ; VI-NEXT: s_waitcnt lgkmcnt(0)
31 ; VI-NEXT: s_addk_i32 s0, 0x3e7
32 ; VI-NEXT: s_or_b32 s0, s0, 4
33 ; VI-NEXT: v_mov_b32_e32 v2, s0
34 ; VI-NEXT: flat_store_short v[0:1], v2
37 ; GFX11-LABEL: widen_i16_constant_load:
39 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
40 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
41 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
42 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
43 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
44 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
45 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
46 ; GFX11-NEXT: s_or_b32 s0, s0, 4
47 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
48 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
50 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
51 ; GFX11-NEXT: s_endpgm
52 %load = load i16, ptr addrspace(4) %arg, align 4
53 %add = add i16 %load, 999
55 store i16 %or, ptr addrspace(1) null
59 define amdgpu_kernel void @widen_i16_constant_load_zext_i32(ptr addrspace(4) %arg) {
60 ; SI-LABEL: widen_i16_constant_load_zext_i32:
62 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
63 ; SI-NEXT: s_mov_b32 s3, 0xf000
64 ; SI-NEXT: s_mov_b32 s2, -1
65 ; SI-NEXT: s_waitcnt lgkmcnt(0)
66 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
67 ; SI-NEXT: s_mov_b32 s0, 0
68 ; SI-NEXT: s_waitcnt lgkmcnt(0)
69 ; SI-NEXT: s_and_b32 s1, s1, 0xffff
70 ; SI-NEXT: s_addk_i32 s1, 0x3e7
71 ; SI-NEXT: s_or_b32 s4, s1, 4
72 ; SI-NEXT: s_mov_b32 s1, s0
73 ; SI-NEXT: v_mov_b32_e32 v0, s4
74 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
77 ; VI-LABEL: widen_i16_constant_load_zext_i32:
79 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
80 ; VI-NEXT: v_mov_b32_e32 v0, 0
81 ; VI-NEXT: v_mov_b32_e32 v1, 0
82 ; VI-NEXT: s_waitcnt lgkmcnt(0)
83 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
84 ; VI-NEXT: s_waitcnt lgkmcnt(0)
85 ; VI-NEXT: s_and_b32 s0, s0, 0xffff
86 ; VI-NEXT: s_addk_i32 s0, 0x3e7
87 ; VI-NEXT: s_or_b32 s0, s0, 4
88 ; VI-NEXT: v_mov_b32_e32 v2, s0
89 ; VI-NEXT: flat_store_dword v[0:1], v2
92 ; GFX11-LABEL: widen_i16_constant_load_zext_i32:
94 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
95 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
96 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
97 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
98 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
99 ; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
100 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
101 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
102 ; GFX11-NEXT: s_or_b32 s0, s0, 4
103 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
104 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
105 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off
106 ; GFX11-NEXT: s_nop 0
107 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
108 ; GFX11-NEXT: s_endpgm
109 %load = load i16, ptr addrspace(4) %arg, align 4
110 %ext = zext i16 %load to i32
111 %add = add i32 %ext, 999
113 store i32 %or, ptr addrspace(1) null
117 define amdgpu_kernel void @widen_i16_constant_load_sext_i32(ptr addrspace(4) %arg) {
118 ; SI-LABEL: widen_i16_constant_load_sext_i32:
120 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
121 ; SI-NEXT: s_mov_b32 s3, 0xf000
122 ; SI-NEXT: s_mov_b32 s2, -1
123 ; SI-NEXT: s_waitcnt lgkmcnt(0)
124 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
125 ; SI-NEXT: s_mov_b32 s0, 0
126 ; SI-NEXT: s_waitcnt lgkmcnt(0)
127 ; SI-NEXT: s_sext_i32_i16 s1, s1
128 ; SI-NEXT: s_addk_i32 s1, 0x3e7
129 ; SI-NEXT: s_or_b32 s4, s1, 4
130 ; SI-NEXT: s_mov_b32 s1, s0
131 ; SI-NEXT: v_mov_b32_e32 v0, s4
132 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
135 ; VI-LABEL: widen_i16_constant_load_sext_i32:
137 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
138 ; VI-NEXT: v_mov_b32_e32 v0, 0
139 ; VI-NEXT: v_mov_b32_e32 v1, 0
140 ; VI-NEXT: s_waitcnt lgkmcnt(0)
141 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
142 ; VI-NEXT: s_waitcnt lgkmcnt(0)
143 ; VI-NEXT: s_sext_i32_i16 s0, s0
144 ; VI-NEXT: s_addk_i32 s0, 0x3e7
145 ; VI-NEXT: s_or_b32 s0, s0, 4
146 ; VI-NEXT: v_mov_b32_e32 v2, s0
147 ; VI-NEXT: flat_store_dword v[0:1], v2
150 ; GFX11-LABEL: widen_i16_constant_load_sext_i32:
152 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
153 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
154 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
155 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
156 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
157 ; GFX11-NEXT: s_sext_i32_i16 s0, s0
158 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
159 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
160 ; GFX11-NEXT: s_or_b32 s0, s0, 4
161 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
162 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
163 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off
164 ; GFX11-NEXT: s_nop 0
165 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
166 ; GFX11-NEXT: s_endpgm
167 %load = load i16, ptr addrspace(4) %arg, align 4
168 %ext = sext i16 %load to i32
169 %add = add i32 %ext, 999
171 store i32 %or, ptr addrspace(1) null
175 define amdgpu_kernel void @widen_i17_constant_load(ptr addrspace(4) %arg) {
176 ; SI-LABEL: widen_i17_constant_load:
178 ; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x9
179 ; SI-NEXT: s_mov_b32 s0, 0
180 ; SI-NEXT: s_mov_b32 s3, 0xf000
181 ; SI-NEXT: s_mov_b32 s2, -1
182 ; SI-NEXT: s_mov_b32 s1, s0
183 ; SI-NEXT: s_waitcnt lgkmcnt(0)
184 ; SI-NEXT: s_load_dword s7, s[4:5], 0x0
185 ; SI-NEXT: s_mov_b32 s4, 2
186 ; SI-NEXT: s_mov_b32 s5, s0
187 ; SI-NEXT: s_mov_b32 s6, s2
188 ; SI-NEXT: s_waitcnt lgkmcnt(0)
189 ; SI-NEXT: s_add_i32 s7, s7, 34
190 ; SI-NEXT: s_or_b32 s7, s7, 4
191 ; SI-NEXT: v_mov_b32_e32 v0, s7
192 ; SI-NEXT: s_bfe_u32 s8, s7, 0x10010
193 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
194 ; SI-NEXT: s_mov_b32 s7, s3
195 ; SI-NEXT: s_waitcnt expcnt(0)
196 ; SI-NEXT: v_mov_b32_e32 v0, s8
197 ; SI-NEXT: buffer_store_byte v0, off, s[4:7], 0
200 ; VI-LABEL: widen_i17_constant_load:
202 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
203 ; VI-NEXT: v_mov_b32_e32 v0, 0
204 ; VI-NEXT: v_mov_b32_e32 v1, 0
205 ; VI-NEXT: v_mov_b32_e32 v2, 2
206 ; VI-NEXT: v_mov_b32_e32 v3, 0
207 ; VI-NEXT: s_waitcnt lgkmcnt(0)
208 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
209 ; VI-NEXT: s_waitcnt lgkmcnt(0)
210 ; VI-NEXT: s_add_i32 s0, s0, 34
211 ; VI-NEXT: s_or_b32 s0, s0, 4
212 ; VI-NEXT: v_mov_b32_e32 v4, s0
213 ; VI-NEXT: s_bfe_u32 s0, s0, 0x10010
214 ; VI-NEXT: flat_store_short v[0:1], v4
215 ; VI-NEXT: v_mov_b32_e32 v0, s0
216 ; VI-NEXT: flat_store_byte v[2:3], v0
219 ; GFX11-LABEL: widen_i17_constant_load:
221 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
222 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
223 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
224 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
225 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
226 ; GFX11-NEXT: s_add_i32 s0, s0, 34
227 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
228 ; GFX11-NEXT: s_or_b32 s0, s0, 4
229 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v4, s0
230 ; GFX11-NEXT: s_and_b32 s0, s0, 0x1ffff
231 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
232 ; GFX11-NEXT: v_dual_mov_b32 v2, 2 :: v_dual_mov_b32 v5, s0
233 ; GFX11-NEXT: v_mov_b32_e32 v3, 0
234 ; GFX11-NEXT: s_clause 0x1
235 ; GFX11-NEXT: global_store_b16 v[0:1], v4, off
236 ; GFX11-NEXT: global_store_d16_hi_b8 v[2:3], v5, off
237 ; GFX11-NEXT: s_nop 0
238 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
239 ; GFX11-NEXT: s_endpgm
240 %load = load i17, ptr addrspace(4) %arg, align 4
241 %add = add i17 %load, 34
243 store i17 %or, ptr addrspace(1) null
247 define amdgpu_kernel void @widen_f16_constant_load(ptr addrspace(4) %arg) {
248 ; SI-LABEL: widen_f16_constant_load:
250 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
251 ; SI-NEXT: s_mov_b32 s3, 0xf000
252 ; SI-NEXT: s_mov_b32 s2, -1
253 ; SI-NEXT: s_waitcnt lgkmcnt(0)
254 ; SI-NEXT: s_load_dword s0, s[0:1], 0x0
255 ; SI-NEXT: s_waitcnt lgkmcnt(0)
256 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
257 ; SI-NEXT: s_mov_b32 s0, 0
258 ; SI-NEXT: s_mov_b32 s1, s0
259 ; SI-NEXT: v_add_f32_e32 v0, 4.0, v0
260 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
261 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
264 ; VI-LABEL: widen_f16_constant_load:
266 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
267 ; VI-NEXT: v_mov_b32_e32 v0, 0
268 ; VI-NEXT: v_mov_b32_e32 v1, 0
269 ; VI-NEXT: s_waitcnt lgkmcnt(0)
270 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
271 ; VI-NEXT: s_waitcnt lgkmcnt(0)
272 ; VI-NEXT: v_add_f16_e64 v2, s0, 4.0
273 ; VI-NEXT: flat_store_short v[0:1], v2
276 ; GFX11-LABEL: widen_f16_constant_load:
278 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
279 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
280 ; GFX11-NEXT: v_mov_b32_e32 v1, 0
281 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
282 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
283 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
284 ; GFX11-NEXT: v_add_f16_e64 v2, s0, 4.0
285 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
286 ; GFX11-NEXT: s_nop 0
287 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
288 ; GFX11-NEXT: s_endpgm
289 %load = load half, ptr addrspace(4) %arg, align 4
290 %add = fadd half %load, 4.0
291 store half %add, ptr addrspace(1) null
295 ; FIXME: valu usage on VI
296 define amdgpu_kernel void @widen_v2i8_constant_load(ptr addrspace(4) %arg) {
297 ; SI-LABEL: widen_v2i8_constant_load:
299 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
300 ; SI-NEXT: s_mov_b32 s3, 0xf000
301 ; SI-NEXT: s_mov_b32 s2, -1
302 ; SI-NEXT: s_waitcnt lgkmcnt(0)
303 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
304 ; SI-NEXT: s_mov_b32 s0, 0
305 ; SI-NEXT: s_waitcnt lgkmcnt(0)
306 ; SI-NEXT: s_and_b32 s4, s1, 0xff00
307 ; SI-NEXT: s_add_i32 s1, s1, 12
308 ; SI-NEXT: s_or_b32 s1, s1, 4
309 ; SI-NEXT: s_and_b32 s1, s1, 0xff
310 ; SI-NEXT: s_or_b32 s1, s4, s1
311 ; SI-NEXT: s_addk_i32 s1, 0x2c00
312 ; SI-NEXT: s_or_b32 s4, s1, 0x300
313 ; SI-NEXT: s_mov_b32 s1, s0
314 ; SI-NEXT: v_mov_b32_e32 v0, s4
315 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
318 ; VI-LABEL: widen_v2i8_constant_load:
320 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
321 ; VI-NEXT: v_mov_b32_e32 v0, 44
322 ; VI-NEXT: v_mov_b32_e32 v1, 3
323 ; VI-NEXT: s_waitcnt lgkmcnt(0)
324 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
325 ; VI-NEXT: s_waitcnt lgkmcnt(0)
326 ; VI-NEXT: s_and_b32 s1, s0, 0xffff
327 ; VI-NEXT: v_mov_b32_e32 v2, s0
328 ; VI-NEXT: s_add_i32 s1, s1, 12
329 ; VI-NEXT: v_add_u32_sdwa v0, vcc, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_1
330 ; VI-NEXT: s_or_b32 s0, s1, 4
331 ; VI-NEXT: v_or_b32_sdwa v2, v0, v1 dst_sel:BYTE_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
332 ; VI-NEXT: v_mov_b32_e32 v3, s0
333 ; VI-NEXT: v_mov_b32_e32 v0, 0
334 ; VI-NEXT: v_mov_b32_e32 v1, 0
335 ; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
336 ; VI-NEXT: flat_store_short v[0:1], v2
339 ; GFX11-LABEL: widen_v2i8_constant_load:
341 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
342 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
343 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
344 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
345 ; GFX11-NEXT: v_add_nc_u16 v0, s0, 12
346 ; GFX11-NEXT: v_and_b32_e64 v1, 0xffffff00, s0
347 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
348 ; GFX11-NEXT: v_or_b32_e32 v0, 4, v0
349 ; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v0
350 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
351 ; GFX11-NEXT: v_or_b32_e32 v0, v1, v0
352 ; GFX11-NEXT: v_add_nc_u16 v2, v0, 0x2c00
353 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
354 ; GFX11-NEXT: v_mov_b32_e32 v1, 0
355 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
356 ; GFX11-NEXT: v_or_b32_e32 v2, 0x300, v2
357 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
358 ; GFX11-NEXT: s_nop 0
359 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
360 ; GFX11-NEXT: s_endpgm
361 %load = load <2 x i8>, ptr addrspace(4) %arg, align 4
362 %add = add <2 x i8> %load, <i8 12, i8 44>
363 %or = or <2 x i8> %add, <i8 4, i8 3>
364 store <2 x i8> %or, ptr addrspace(1) null
368 define amdgpu_kernel void @no_widen_i16_constant_divergent_load(ptr addrspace(4) %arg) {
369 ; SI-LABEL: no_widen_i16_constant_divergent_load:
371 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
372 ; SI-NEXT: s_mov_b32 s2, 0
373 ; SI-NEXT: s_mov_b32 s3, 0xf000
374 ; SI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
375 ; SI-NEXT: v_mov_b32_e32 v1, 0
376 ; SI-NEXT: s_waitcnt lgkmcnt(0)
377 ; SI-NEXT: buffer_load_ushort v0, v[0:1], s[0:3], 0 addr64
378 ; SI-NEXT: s_mov_b32 s6, -1
379 ; SI-NEXT: s_mov_b32 s4, s2
380 ; SI-NEXT: s_mov_b32 s5, s2
381 ; SI-NEXT: s_mov_b32 s7, s3
382 ; SI-NEXT: s_waitcnt vmcnt(0)
383 ; SI-NEXT: v_add_i32_e32 v0, vcc, 0x3e7, v0
384 ; SI-NEXT: v_or_b32_e32 v0, 4, v0
385 ; SI-NEXT: buffer_store_short v0, off, s[4:7], 0
388 ; VI-LABEL: no_widen_i16_constant_divergent_load:
390 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
391 ; VI-NEXT: v_lshlrev_b32_e32 v0, 1, v0
392 ; VI-NEXT: s_waitcnt lgkmcnt(0)
393 ; VI-NEXT: v_mov_b32_e32 v1, s1
394 ; VI-NEXT: v_add_u32_e32 v0, vcc, s0, v0
395 ; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
396 ; VI-NEXT: flat_load_ushort v0, v[0:1]
397 ; VI-NEXT: s_waitcnt vmcnt(0)
398 ; VI-NEXT: v_add_u16_e32 v2, 0x3e7, v0
399 ; VI-NEXT: v_mov_b32_e32 v0, 0
400 ; VI-NEXT: v_mov_b32_e32 v1, 0
401 ; VI-NEXT: v_or_b32_e32 v2, 4, v2
402 ; VI-NEXT: flat_store_short v[0:1], v2
405 ; GFX11-LABEL: no_widen_i16_constant_divergent_load:
407 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
408 ; GFX11-NEXT: v_and_b32_e32 v0, 0x3ff, v0
409 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
410 ; GFX11-NEXT: v_lshlrev_b32_e32 v0, 1, v0
411 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
412 ; GFX11-NEXT: global_load_u16 v0, v0, s[0:1]
413 ; GFX11-NEXT: s_waitcnt vmcnt(0)
414 ; GFX11-NEXT: v_add_nc_u16 v2, v0, 0x3e7
415 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
416 ; GFX11-NEXT: v_mov_b32_e32 v1, 0
417 ; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
418 ; GFX11-NEXT: v_or_b32_e32 v2, 4, v2
419 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
420 ; GFX11-NEXT: s_nop 0
421 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
422 ; GFX11-NEXT: s_endpgm
423 %tid = call i32 @llvm.amdgcn.workitem.id.x()
424 %tid.ext = zext i32 %tid to i64
425 %gep.arg = getelementptr inbounds i16, ptr addrspace(4) %arg, i64 %tid.ext
426 %load = load i16, ptr addrspace(4) %gep.arg, align 4
427 %add = add i16 %load, 999
429 store i16 %or, ptr addrspace(1) null
433 define amdgpu_kernel void @widen_i1_constant_load(ptr addrspace(4) %arg) {
434 ; SI-LABEL: widen_i1_constant_load:
436 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
437 ; SI-NEXT: s_mov_b32 s3, 0xf000
438 ; SI-NEXT: s_mov_b32 s2, -1
439 ; SI-NEXT: s_waitcnt lgkmcnt(0)
440 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
441 ; SI-NEXT: s_mov_b32 s0, 0
442 ; SI-NEXT: s_waitcnt lgkmcnt(0)
443 ; SI-NEXT: s_and_b32 s4, s1, 1
444 ; SI-NEXT: s_mov_b32 s1, s0
445 ; SI-NEXT: v_mov_b32_e32 v0, s4
446 ; SI-NEXT: buffer_store_byte v0, off, s[0:3], 0
449 ; VI-LABEL: widen_i1_constant_load:
451 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
452 ; VI-NEXT: v_mov_b32_e32 v0, 0
453 ; VI-NEXT: v_mov_b32_e32 v1, 0
454 ; VI-NEXT: s_waitcnt lgkmcnt(0)
455 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
456 ; VI-NEXT: s_waitcnt lgkmcnt(0)
457 ; VI-NEXT: s_and_b32 s0, s0, 1
458 ; VI-NEXT: v_mov_b32_e32 v2, s0
459 ; VI-NEXT: flat_store_byte v[0:1], v2
462 ; GFX11-LABEL: widen_i1_constant_load:
464 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
465 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
466 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
467 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
468 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
469 ; GFX11-NEXT: s_and_b32 s0, s0, 1
470 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
471 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
472 ; GFX11-NEXT: global_store_b8 v[0:1], v2, off
473 ; GFX11-NEXT: s_nop 0
474 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
475 ; GFX11-NEXT: s_endpgm
476 %load = load i1, ptr addrspace(4) %arg, align 4
477 %and = and i1 %load, true
478 store i1 %and, ptr addrspace(1) null
482 define amdgpu_kernel void @widen_i16_zextload_i64_constant_load(ptr addrspace(4) %arg) {
483 ; SI-LABEL: widen_i16_zextload_i64_constant_load:
485 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
486 ; SI-NEXT: s_mov_b32 s3, 0xf000
487 ; SI-NEXT: s_mov_b32 s2, -1
488 ; SI-NEXT: s_waitcnt lgkmcnt(0)
489 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
490 ; SI-NEXT: s_mov_b32 s0, 0
491 ; SI-NEXT: s_waitcnt lgkmcnt(0)
492 ; SI-NEXT: s_and_b32 s1, s1, 0xffff
493 ; SI-NEXT: s_addk_i32 s1, 0x3e7
494 ; SI-NEXT: s_or_b32 s4, s1, 4
495 ; SI-NEXT: s_mov_b32 s1, s0
496 ; SI-NEXT: v_mov_b32_e32 v0, s4
497 ; SI-NEXT: buffer_store_dword v0, off, s[0:3], 0
500 ; VI-LABEL: widen_i16_zextload_i64_constant_load:
502 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
503 ; VI-NEXT: v_mov_b32_e32 v0, 0
504 ; VI-NEXT: v_mov_b32_e32 v1, 0
505 ; VI-NEXT: s_waitcnt lgkmcnt(0)
506 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
507 ; VI-NEXT: s_waitcnt lgkmcnt(0)
508 ; VI-NEXT: s_and_b32 s0, s0, 0xffff
509 ; VI-NEXT: s_addk_i32 s0, 0x3e7
510 ; VI-NEXT: s_or_b32 s0, s0, 4
511 ; VI-NEXT: v_mov_b32_e32 v2, s0
512 ; VI-NEXT: flat_store_dword v[0:1], v2
515 ; GFX11-LABEL: widen_i16_zextload_i64_constant_load:
517 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
518 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
519 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
520 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
521 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
522 ; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
523 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
524 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
525 ; GFX11-NEXT: s_or_b32 s0, s0, 4
526 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
527 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
528 ; GFX11-NEXT: global_store_b32 v[0:1], v2, off
529 ; GFX11-NEXT: s_nop 0
530 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
531 ; GFX11-NEXT: s_endpgm
532 %load = load i16, ptr addrspace(4) %arg, align 4
533 %zext = zext i16 %load to i32
534 %add = add i32 %zext, 999
536 store i32 %or, ptr addrspace(1) null
540 define amdgpu_kernel void @widen_i1_zext_to_i64_constant_load(ptr addrspace(4) %arg) {
541 ; SI-LABEL: widen_i1_zext_to_i64_constant_load:
543 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
544 ; SI-NEXT: s_mov_b32 s3, 0xf000
545 ; SI-NEXT: s_mov_b32 s2, -1
546 ; SI-NEXT: s_waitcnt lgkmcnt(0)
547 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
548 ; SI-NEXT: s_mov_b32 s0, 0
549 ; SI-NEXT: s_waitcnt lgkmcnt(0)
550 ; SI-NEXT: s_and_b32 s1, s1, 1
551 ; SI-NEXT: s_add_u32 s4, s1, 0x3e7
552 ; SI-NEXT: s_addc_u32 s5, 0, 0
553 ; SI-NEXT: v_mov_b32_e32 v0, s4
554 ; SI-NEXT: s_mov_b32 s1, s0
555 ; SI-NEXT: v_mov_b32_e32 v1, s5
556 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
559 ; VI-LABEL: widen_i1_zext_to_i64_constant_load:
561 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
562 ; VI-NEXT: v_mov_b32_e32 v0, 0
563 ; VI-NEXT: v_mov_b32_e32 v1, 0
564 ; VI-NEXT: s_waitcnt lgkmcnt(0)
565 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
566 ; VI-NEXT: s_waitcnt lgkmcnt(0)
567 ; VI-NEXT: s_and_b32 s0, s0, 1
568 ; VI-NEXT: s_add_u32 s0, s0, 0x3e7
569 ; VI-NEXT: s_addc_u32 s1, 0, 0
570 ; VI-NEXT: v_mov_b32_e32 v3, s1
571 ; VI-NEXT: v_mov_b32_e32 v2, s0
572 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
575 ; GFX11-LABEL: widen_i1_zext_to_i64_constant_load:
577 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
578 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
579 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
580 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
581 ; GFX11-NEXT: s_and_b32 s0, s0, 1
582 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
583 ; GFX11-NEXT: s_add_u32 s0, s0, 0x3e7
584 ; GFX11-NEXT: s_addc_u32 s1, 0, 0
585 ; GFX11-NEXT: v_dual_mov_b32 v0, 0 :: v_dual_mov_b32 v3, s1
586 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
587 ; GFX11-NEXT: global_store_b64 v[0:1], v[2:3], off
588 ; GFX11-NEXT: s_nop 0
589 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
590 ; GFX11-NEXT: s_endpgm
591 %load = load i1, ptr addrspace(4) %arg, align 4
592 %zext = zext i1 %load to i64
593 %add = add i64 %zext, 999
594 store i64 %add, ptr addrspace(1) null
598 define amdgpu_kernel void @widen_i16_constant32_load(ptr addrspace(6) %arg) {
599 ; SI-LABEL: widen_i16_constant32_load:
601 ; SI-NEXT: s_load_dword s0, s[2:3], 0x9
602 ; SI-NEXT: s_mov_b32 s1, 0
603 ; SI-NEXT: s_mov_b32 s3, 0xf000
604 ; SI-NEXT: s_mov_b32 s2, -1
605 ; SI-NEXT: s_waitcnt lgkmcnt(0)
606 ; SI-NEXT: s_load_dword s0, s[0:1], 0x0
607 ; SI-NEXT: s_waitcnt lgkmcnt(0)
608 ; SI-NEXT: s_addk_i32 s0, 0x3e7
609 ; SI-NEXT: s_or_b32 s4, s0, 4
610 ; SI-NEXT: s_mov_b32 s0, s1
611 ; SI-NEXT: v_mov_b32_e32 v0, s4
612 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
615 ; VI-LABEL: widen_i16_constant32_load:
617 ; VI-NEXT: s_load_dword s0, s[2:3], 0x24
618 ; VI-NEXT: s_mov_b32 s1, 0
619 ; VI-NEXT: v_mov_b32_e32 v0, 0
620 ; VI-NEXT: v_mov_b32_e32 v1, 0
621 ; VI-NEXT: s_waitcnt lgkmcnt(0)
622 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
623 ; VI-NEXT: s_waitcnt lgkmcnt(0)
624 ; VI-NEXT: s_addk_i32 s0, 0x3e7
625 ; VI-NEXT: s_or_b32 s0, s0, 4
626 ; VI-NEXT: v_mov_b32_e32 v2, s0
627 ; VI-NEXT: flat_store_short v[0:1], v2
630 ; GFX11-LABEL: widen_i16_constant32_load:
632 ; GFX11-NEXT: s_load_b32 s0, s[2:3], 0x24
633 ; GFX11-NEXT: s_mov_b32 s1, 0
634 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
635 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
636 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
637 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
638 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
639 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
640 ; GFX11-NEXT: s_or_b32 s0, s0, 4
641 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
642 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
643 ; GFX11-NEXT: s_nop 0
644 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
645 ; GFX11-NEXT: s_endpgm
646 %load = load i16, ptr addrspace(6) %arg, align 4
647 %add = add i16 %load, 999
649 store i16 %or, ptr addrspace(1) null
653 define amdgpu_kernel void @widen_i16_global_invariant_load(ptr addrspace(1) %arg) {
654 ; SI-LABEL: widen_i16_global_invariant_load:
656 ; SI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x9
657 ; SI-NEXT: s_mov_b32 s3, 0xf000
658 ; SI-NEXT: s_mov_b32 s2, -1
659 ; SI-NEXT: s_waitcnt lgkmcnt(0)
660 ; SI-NEXT: s_load_dword s1, s[0:1], 0x0
661 ; SI-NEXT: s_mov_b32 s0, 0
662 ; SI-NEXT: s_waitcnt lgkmcnt(0)
663 ; SI-NEXT: s_addk_i32 s1, 0x3e7
664 ; SI-NEXT: s_or_b32 s4, s1, 1
665 ; SI-NEXT: s_mov_b32 s1, s0
666 ; SI-NEXT: v_mov_b32_e32 v0, s4
667 ; SI-NEXT: buffer_store_short v0, off, s[0:3], 0
670 ; VI-LABEL: widen_i16_global_invariant_load:
672 ; VI-NEXT: s_load_dwordx2 s[0:1], s[2:3], 0x24
673 ; VI-NEXT: v_mov_b32_e32 v0, 0
674 ; VI-NEXT: v_mov_b32_e32 v1, 0
675 ; VI-NEXT: s_waitcnt lgkmcnt(0)
676 ; VI-NEXT: s_load_dword s0, s[0:1], 0x0
677 ; VI-NEXT: s_waitcnt lgkmcnt(0)
678 ; VI-NEXT: s_addk_i32 s0, 0x3e7
679 ; VI-NEXT: s_or_b32 s0, s0, 1
680 ; VI-NEXT: v_mov_b32_e32 v2, s0
681 ; VI-NEXT: flat_store_short v[0:1], v2
684 ; GFX11-LABEL: widen_i16_global_invariant_load:
686 ; GFX11-NEXT: s_load_b64 s[0:1], s[2:3], 0x24
687 ; GFX11-NEXT: v_mov_b32_e32 v0, 0
688 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
689 ; GFX11-NEXT: s_load_b32 s0, s[0:1], 0x0
690 ; GFX11-NEXT: s_waitcnt lgkmcnt(0)
691 ; GFX11-NEXT: s_addk_i32 s0, 0x3e7
692 ; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
693 ; GFX11-NEXT: s_or_b32 s0, s0, 1
694 ; GFX11-NEXT: v_dual_mov_b32 v1, 0 :: v_dual_mov_b32 v2, s0
695 ; GFX11-NEXT: global_store_b16 v[0:1], v2, off
696 ; GFX11-NEXT: s_nop 0
697 ; GFX11-NEXT: s_sendmsg sendmsg(MSG_DEALLOC_VGPRS)
698 ; GFX11-NEXT: s_endpgm
699 %load = load i16, ptr addrspace(1) %arg, align 4, !invariant.load !0
700 %add = add i16 %load, 999
702 store i16 %or, ptr addrspace(1) null
706 declare i32 @llvm.amdgcn.workitem.id.x()