1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,SI %s
3 ; RUN: llc -amdgpu-scalarize-global-loads=false -mtriple=amdgcn-amd-amdhsa -mcpu=tonga -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -enable-var-scope -check-prefixes=GCN,VI %s
5 ; half args should be promoted to float for SI and lower.
7 define amdgpu_kernel void @load_f16_arg(half addrspace(1)* %out, half %arg) #0 {
8 ; SI-LABEL: load_f16_arg:
10 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
11 ; SI-NEXT: s_load_dword s2, s[4:5], 0x2
12 ; SI-NEXT: s_waitcnt lgkmcnt(0)
13 ; SI-NEXT: v_mov_b32_e32 v0, s0
14 ; SI-NEXT: v_mov_b32_e32 v1, s1
15 ; SI-NEXT: v_mov_b32_e32 v2, s2
16 ; SI-NEXT: flat_store_short v[0:1], v2
19 ; VI-LABEL: load_f16_arg:
21 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
22 ; VI-NEXT: s_load_dword s2, s[4:5], 0x8
23 ; VI-NEXT: s_waitcnt lgkmcnt(0)
24 ; VI-NEXT: v_mov_b32_e32 v0, s0
25 ; VI-NEXT: v_mov_b32_e32 v1, s1
26 ; VI-NEXT: v_mov_b32_e32 v2, s2
27 ; VI-NEXT: flat_store_short v[0:1], v2
29 store half %arg, half addrspace(1)* %out
33 define amdgpu_kernel void @load_v2f16_arg(<2 x half> addrspace(1)* %out, <2 x half> %arg) #0 {
34 ; SI-LABEL: load_v2f16_arg:
36 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
37 ; SI-NEXT: s_load_dword s2, s[4:5], 0x2
38 ; SI-NEXT: s_waitcnt lgkmcnt(0)
39 ; SI-NEXT: v_mov_b32_e32 v0, s0
40 ; SI-NEXT: v_mov_b32_e32 v1, s1
41 ; SI-NEXT: v_mov_b32_e32 v2, s2
42 ; SI-NEXT: flat_store_dword v[0:1], v2
45 ; VI-LABEL: load_v2f16_arg:
47 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
48 ; VI-NEXT: s_load_dword s2, s[4:5], 0x8
49 ; VI-NEXT: s_waitcnt lgkmcnt(0)
50 ; VI-NEXT: v_mov_b32_e32 v0, s0
51 ; VI-NEXT: v_mov_b32_e32 v1, s1
52 ; VI-NEXT: v_mov_b32_e32 v2, s2
53 ; VI-NEXT: flat_store_dword v[0:1], v2
55 store <2 x half> %arg, <2 x half> addrspace(1)* %out
59 define amdgpu_kernel void @load_v3f16_arg(<3 x half> addrspace(1)* %out, <3 x half> %arg) #0 {
60 ; SI-LABEL: load_v3f16_arg:
62 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
63 ; SI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x2
64 ; SI-NEXT: s_waitcnt lgkmcnt(0)
65 ; SI-NEXT: s_add_u32 s4, s0, 4
66 ; SI-NEXT: s_addc_u32 s5, s1, 0
67 ; SI-NEXT: v_mov_b32_e32 v2, s4
68 ; SI-NEXT: v_mov_b32_e32 v0, s0
69 ; SI-NEXT: v_mov_b32_e32 v4, s3
70 ; SI-NEXT: v_mov_b32_e32 v3, s5
71 ; SI-NEXT: v_mov_b32_e32 v1, s1
72 ; SI-NEXT: v_mov_b32_e32 v5, s2
73 ; SI-NEXT: flat_store_short v[2:3], v4
74 ; SI-NEXT: flat_store_dword v[0:1], v5
77 ; VI-LABEL: load_v3f16_arg:
79 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
80 ; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x8
81 ; VI-NEXT: s_waitcnt lgkmcnt(0)
82 ; VI-NEXT: s_add_u32 s4, s0, 4
83 ; VI-NEXT: s_addc_u32 s5, s1, 0
84 ; VI-NEXT: v_mov_b32_e32 v2, s4
85 ; VI-NEXT: v_mov_b32_e32 v0, s0
86 ; VI-NEXT: v_mov_b32_e32 v4, s3
87 ; VI-NEXT: v_mov_b32_e32 v3, s5
88 ; VI-NEXT: v_mov_b32_e32 v1, s1
89 ; VI-NEXT: v_mov_b32_e32 v5, s2
90 ; VI-NEXT: flat_store_short v[2:3], v4
91 ; VI-NEXT: flat_store_dword v[0:1], v5
93 store <3 x half> %arg, <3 x half> addrspace(1)* %out
98 ; FIXME: Why not one load?
99 define amdgpu_kernel void @load_v4f16_arg(<4 x half> addrspace(1)* %out, <4 x half> %arg) #0 {
100 ; SI-LABEL: load_v4f16_arg:
102 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
103 ; SI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x2
104 ; SI-NEXT: s_waitcnt lgkmcnt(0)
105 ; SI-NEXT: v_mov_b32_e32 v0, s0
106 ; SI-NEXT: v_mov_b32_e32 v2, s2
107 ; SI-NEXT: v_mov_b32_e32 v1, s1
108 ; SI-NEXT: v_mov_b32_e32 v3, s3
109 ; SI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
112 ; VI-LABEL: load_v4f16_arg:
114 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
115 ; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x8
116 ; VI-NEXT: s_waitcnt lgkmcnt(0)
117 ; VI-NEXT: v_mov_b32_e32 v0, s0
118 ; VI-NEXT: v_mov_b32_e32 v2, s2
119 ; VI-NEXT: v_mov_b32_e32 v1, s1
120 ; VI-NEXT: v_mov_b32_e32 v3, s3
121 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
123 store <4 x half> %arg, <4 x half> addrspace(1)* %out
127 define amdgpu_kernel void @load_v8f16_arg(<8 x half> addrspace(1)* %out, <8 x half> %arg) #0 {
128 ; SI-LABEL: load_v8f16_arg:
130 ; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
131 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x4
132 ; SI-NEXT: s_waitcnt lgkmcnt(0)
133 ; SI-NEXT: v_mov_b32_e32 v4, s6
134 ; SI-NEXT: v_mov_b32_e32 v0, s0
135 ; SI-NEXT: v_mov_b32_e32 v5, s7
136 ; SI-NEXT: v_mov_b32_e32 v1, s1
137 ; SI-NEXT: v_mov_b32_e32 v2, s2
138 ; SI-NEXT: v_mov_b32_e32 v3, s3
139 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
142 ; VI-LABEL: load_v8f16_arg:
144 ; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
145 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
146 ; VI-NEXT: s_waitcnt lgkmcnt(0)
147 ; VI-NEXT: v_mov_b32_e32 v4, s6
148 ; VI-NEXT: v_mov_b32_e32 v0, s0
149 ; VI-NEXT: v_mov_b32_e32 v5, s7
150 ; VI-NEXT: v_mov_b32_e32 v1, s1
151 ; VI-NEXT: v_mov_b32_e32 v2, s2
152 ; VI-NEXT: v_mov_b32_e32 v3, s3
153 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
155 store <8 x half> %arg, <8 x half> addrspace(1)* %out
159 define amdgpu_kernel void @extload_v2f16_arg(<2 x float> addrspace(1)* %out, <2 x half> %in) #0 {
160 ; SI-LABEL: extload_v2f16_arg:
162 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
163 ; SI-NEXT: s_waitcnt lgkmcnt(0)
164 ; SI-NEXT: s_lshr_b32 s1, s0, 16
165 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
166 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s1
167 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
168 ; SI-NEXT: s_waitcnt lgkmcnt(0)
169 ; SI-NEXT: v_mov_b32_e32 v3, s1
170 ; SI-NEXT: v_mov_b32_e32 v2, s0
171 ; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
174 ; VI-LABEL: extload_v2f16_arg:
176 ; VI-NEXT: s_load_dword s0, s[4:5], 0x8
177 ; VI-NEXT: s_waitcnt lgkmcnt(0)
178 ; VI-NEXT: s_lshr_b32 s1, s0, 16
179 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
180 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s1
181 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
182 ; VI-NEXT: s_waitcnt lgkmcnt(0)
183 ; VI-NEXT: v_mov_b32_e32 v3, s1
184 ; VI-NEXT: v_mov_b32_e32 v2, s0
185 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
187 %fpext = fpext <2 x half> %in to <2 x float>
188 store <2 x float> %fpext, <2 x float> addrspace(1)* %out
192 define amdgpu_kernel void @extload_f16_to_f32_arg(float addrspace(1)* %out, half %arg) #0 {
193 ; SI-LABEL: extload_f16_to_f32_arg:
195 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
196 ; SI-NEXT: s_waitcnt lgkmcnt(0)
197 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s0
198 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
199 ; SI-NEXT: s_waitcnt lgkmcnt(0)
200 ; SI-NEXT: v_mov_b32_e32 v0, s0
201 ; SI-NEXT: v_mov_b32_e32 v1, s1
202 ; SI-NEXT: flat_store_dword v[0:1], v2
205 ; VI-LABEL: extload_f16_to_f32_arg:
207 ; VI-NEXT: s_load_dword s0, s[4:5], 0x8
208 ; VI-NEXT: s_waitcnt lgkmcnt(0)
209 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s0
210 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
211 ; VI-NEXT: s_waitcnt lgkmcnt(0)
212 ; VI-NEXT: v_mov_b32_e32 v0, s0
213 ; VI-NEXT: v_mov_b32_e32 v1, s1
214 ; VI-NEXT: flat_store_dword v[0:1], v2
216 %ext = fpext half %arg to float
217 store float %ext, float addrspace(1)* %out
221 define amdgpu_kernel void @extload_v2f16_to_v2f32_arg(<2 x float> addrspace(1)* %out, <2 x half> %arg) #0 {
222 ; SI-LABEL: extload_v2f16_to_v2f32_arg:
224 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
225 ; SI-NEXT: s_waitcnt lgkmcnt(0)
226 ; SI-NEXT: s_lshr_b32 s1, s0, 16
227 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
228 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s1
229 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
230 ; SI-NEXT: s_waitcnt lgkmcnt(0)
231 ; SI-NEXT: v_mov_b32_e32 v3, s1
232 ; SI-NEXT: v_mov_b32_e32 v2, s0
233 ; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
236 ; VI-LABEL: extload_v2f16_to_v2f32_arg:
238 ; VI-NEXT: s_load_dword s0, s[4:5], 0x8
239 ; VI-NEXT: s_waitcnt lgkmcnt(0)
240 ; VI-NEXT: s_lshr_b32 s1, s0, 16
241 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
242 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s1
243 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
244 ; VI-NEXT: s_waitcnt lgkmcnt(0)
245 ; VI-NEXT: v_mov_b32_e32 v3, s1
246 ; VI-NEXT: v_mov_b32_e32 v2, s0
247 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
249 %ext = fpext <2 x half> %arg to <2 x float>
250 store <2 x float> %ext, <2 x float> addrspace(1)* %out
254 define amdgpu_kernel void @extload_v3f16_to_v3f32_arg(<3 x float> addrspace(1)* %out, <3 x half> %arg) #0 {
255 ; SI-LABEL: extload_v3f16_to_v3f32_arg:
257 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
258 ; SI-NEXT: s_waitcnt lgkmcnt(0)
259 ; SI-NEXT: s_lshr_b32 s2, s0, 16
260 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s1
261 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
262 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
263 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
264 ; SI-NEXT: s_waitcnt lgkmcnt(0)
265 ; SI-NEXT: v_mov_b32_e32 v4, s1
266 ; SI-NEXT: v_mov_b32_e32 v3, s0
267 ; SI-NEXT: flat_store_dwordx3 v[3:4], v[0:2]
270 ; VI-LABEL: extload_v3f16_to_v3f32_arg:
272 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
273 ; VI-NEXT: s_waitcnt lgkmcnt(0)
274 ; VI-NEXT: s_lshr_b32 s2, s0, 16
275 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
276 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s1
277 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
278 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s2
279 ; VI-NEXT: s_waitcnt lgkmcnt(0)
280 ; VI-NEXT: v_mov_b32_e32 v4, s1
281 ; VI-NEXT: v_mov_b32_e32 v3, s0
282 ; VI-NEXT: flat_store_dwordx3 v[3:4], v[0:2]
284 %ext = fpext <3 x half> %arg to <3 x float>
285 store <3 x float> %ext, <3 x float> addrspace(1)* %out
289 define amdgpu_kernel void @extload_v4f16_to_v4f32_arg(<4 x float> addrspace(1)* %out, <4 x half> %arg) #0 {
290 ; SI-LABEL: extload_v4f16_to_v4f32_arg:
292 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x2
293 ; SI-NEXT: s_waitcnt lgkmcnt(0)
294 ; SI-NEXT: s_lshr_b32 s2, s1, 16
295 ; SI-NEXT: s_lshr_b32 s3, s0, 16
296 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s1
297 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
298 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
299 ; SI-NEXT: v_cvt_f32_f16_e32 v3, s2
300 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s3
301 ; SI-NEXT: s_waitcnt lgkmcnt(0)
302 ; SI-NEXT: v_mov_b32_e32 v5, s1
303 ; SI-NEXT: v_mov_b32_e32 v4, s0
304 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
307 ; VI-LABEL: extload_v4f16_to_v4f32_arg:
309 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x8
310 ; VI-NEXT: s_waitcnt lgkmcnt(0)
311 ; VI-NEXT: s_lshr_b32 s2, s1, 16
312 ; VI-NEXT: s_lshr_b32 s3, s0, 16
313 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
314 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s1
315 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
316 ; VI-NEXT: v_cvt_f32_f16_e32 v3, s2
317 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s3
318 ; VI-NEXT: s_waitcnt lgkmcnt(0)
319 ; VI-NEXT: v_mov_b32_e32 v5, s1
320 ; VI-NEXT: v_mov_b32_e32 v4, s0
321 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
323 %ext = fpext <4 x half> %arg to <4 x float>
324 store <4 x float> %ext, <4 x float> addrspace(1)* %out
328 define amdgpu_kernel void @extload_v8f16_to_v8f32_arg(<8 x float> addrspace(1)* %out, <8 x half> %arg) #0 {
329 ; SI-LABEL: extload_v8f16_to_v8f32_arg:
331 ; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
332 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x4
333 ; SI-NEXT: s_waitcnt lgkmcnt(0)
334 ; SI-NEXT: s_lshr_b32 s4, s1, 16
335 ; SI-NEXT: s_lshr_b32 s5, s0, 16
336 ; SI-NEXT: s_lshr_b32 s8, s3, 16
337 ; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
338 ; SI-NEXT: s_lshr_b32 s4, s2, 16
339 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
340 ; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
341 ; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
342 ; SI-NEXT: v_cvt_f32_f16_e32 v6, s3
343 ; SI-NEXT: v_cvt_f32_f16_e32 v4, s2
344 ; SI-NEXT: s_add_u32 s0, s6, 16
345 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s1
346 ; SI-NEXT: s_addc_u32 s1, s7, 0
347 ; SI-NEXT: v_mov_b32_e32 v9, s1
348 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
349 ; SI-NEXT: v_mov_b32_e32 v8, s0
350 ; SI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
352 ; SI-NEXT: v_mov_b32_e32 v4, s6
353 ; SI-NEXT: v_mov_b32_e32 v5, s7
354 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
357 ; VI-LABEL: extload_v8f16_to_v8f32_arg:
359 ; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
360 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
361 ; VI-NEXT: s_waitcnt lgkmcnt(0)
362 ; VI-NEXT: s_lshr_b32 s4, s1, 16
363 ; VI-NEXT: s_lshr_b32 s5, s0, 16
364 ; VI-NEXT: s_lshr_b32 s8, s3, 16
365 ; VI-NEXT: v_cvt_f32_f16_e32 v3, s4
366 ; VI-NEXT: s_lshr_b32 s4, s2, 16
367 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
368 ; VI-NEXT: v_cvt_f32_f16_e32 v7, s8
369 ; VI-NEXT: v_cvt_f32_f16_e32 v5, s4
370 ; VI-NEXT: v_cvt_f32_f16_e32 v6, s3
371 ; VI-NEXT: v_cvt_f32_f16_e32 v4, s2
372 ; VI-NEXT: s_add_u32 s0, s6, 16
373 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s1
374 ; VI-NEXT: s_addc_u32 s1, s7, 0
375 ; VI-NEXT: v_mov_b32_e32 v9, s1
376 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s5
377 ; VI-NEXT: v_mov_b32_e32 v8, s0
378 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
380 ; VI-NEXT: v_mov_b32_e32 v4, s6
381 ; VI-NEXT: v_mov_b32_e32 v5, s7
382 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
384 %ext = fpext <8 x half> %arg to <8 x float>
385 store <8 x float> %ext, <8 x float> addrspace(1)* %out
389 define amdgpu_kernel void @extload_f16_to_f64_arg(double addrspace(1)* %out, half %arg) #0 {
390 ; SI-LABEL: extload_f16_to_f64_arg:
392 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
393 ; SI-NEXT: s_waitcnt lgkmcnt(0)
394 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
395 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
396 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
397 ; SI-NEXT: s_waitcnt lgkmcnt(0)
398 ; SI-NEXT: v_mov_b32_e32 v3, s1
399 ; SI-NEXT: v_mov_b32_e32 v2, s0
400 ; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
403 ; VI-LABEL: extload_f16_to_f64_arg:
405 ; VI-NEXT: s_load_dword s0, s[4:5], 0x8
406 ; VI-NEXT: s_waitcnt lgkmcnt(0)
407 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
408 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
409 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
410 ; VI-NEXT: s_waitcnt lgkmcnt(0)
411 ; VI-NEXT: v_mov_b32_e32 v3, s1
412 ; VI-NEXT: v_mov_b32_e32 v2, s0
413 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
415 %ext = fpext half %arg to double
416 store double %ext, double addrspace(1)* %out
420 define amdgpu_kernel void @extload_v2f16_to_v2f64_arg(<2 x double> addrspace(1)* %out, <2 x half> %arg) #0 {
421 ; SI-LABEL: extload_v2f16_to_v2f64_arg:
423 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
424 ; SI-NEXT: s_waitcnt lgkmcnt(0)
425 ; SI-NEXT: s_lshr_b32 s1, s0, 16
426 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
427 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s1
428 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
429 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
430 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
431 ; SI-NEXT: s_waitcnt lgkmcnt(0)
432 ; SI-NEXT: v_mov_b32_e32 v5, s1
433 ; SI-NEXT: v_mov_b32_e32 v4, s0
434 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
437 ; VI-LABEL: extload_v2f16_to_v2f64_arg:
439 ; VI-NEXT: s_load_dword s0, s[4:5], 0x8
440 ; VI-NEXT: s_waitcnt lgkmcnt(0)
441 ; VI-NEXT: s_lshr_b32 s1, s0, 16
442 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
443 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s1
444 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
445 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
446 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
447 ; VI-NEXT: s_waitcnt lgkmcnt(0)
448 ; VI-NEXT: v_mov_b32_e32 v5, s1
449 ; VI-NEXT: v_mov_b32_e32 v4, s0
450 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
452 %ext = fpext <2 x half> %arg to <2 x double>
453 store <2 x double> %ext, <2 x double> addrspace(1)* %out
457 define amdgpu_kernel void @extload_v3f16_to_v3f64_arg(<3 x double> addrspace(1)* %out, <3 x half> %arg) #0 {
458 ; SI-LABEL: extload_v3f16_to_v3f64_arg:
460 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
461 ; SI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x2
462 ; SI-NEXT: s_waitcnt lgkmcnt(0)
463 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s3
464 ; SI-NEXT: s_lshr_b32 s4, s2, 16
465 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
466 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s2
467 ; SI-NEXT: s_add_u32 s2, s0, 16
468 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v0
469 ; SI-NEXT: s_addc_u32 s3, s1, 0
470 ; SI-NEXT: v_mov_b32_e32 v7, s3
471 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
472 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
473 ; SI-NEXT: v_mov_b32_e32 v6, s2
474 ; SI-NEXT: flat_store_dwordx2 v[6:7], v[4:5]
475 ; SI-NEXT: v_mov_b32_e32 v5, s1
476 ; SI-NEXT: v_mov_b32_e32 v4, s0
477 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
480 ; VI-LABEL: extload_v3f16_to_v3f64_arg:
482 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
483 ; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x8
484 ; VI-NEXT: s_waitcnt lgkmcnt(0)
485 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s3
486 ; VI-NEXT: s_lshr_b32 s4, s2, 16
487 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s4
488 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s2
489 ; VI-NEXT: s_add_u32 s2, s0, 16
490 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v1
491 ; VI-NEXT: s_addc_u32 s3, s1, 0
492 ; VI-NEXT: v_mov_b32_e32 v7, s3
493 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
494 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
495 ; VI-NEXT: v_mov_b32_e32 v6, s2
496 ; VI-NEXT: flat_store_dwordx2 v[6:7], v[4:5]
497 ; VI-NEXT: v_mov_b32_e32 v5, s1
498 ; VI-NEXT: v_mov_b32_e32 v4, s0
499 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
501 %ext = fpext <3 x half> %arg to <3 x double>
502 store <3 x double> %ext, <3 x double> addrspace(1)* %out
506 define amdgpu_kernel void @extload_v4f16_to_v4f64_arg(<4 x double> addrspace(1)* %out, <4 x half> %arg) #0 {
507 ; SI-LABEL: extload_v4f16_to_v4f64_arg:
509 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
510 ; SI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x2
511 ; SI-NEXT: s_waitcnt lgkmcnt(0)
512 ; SI-NEXT: s_lshr_b32 s4, s3, 16
513 ; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
514 ; SI-NEXT: v_cvt_f32_f16_e32 v5, s3
515 ; SI-NEXT: s_lshr_b32 s5, s2, 16
516 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s2
517 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s5
518 ; SI-NEXT: s_add_u32 s2, s0, 16
519 ; SI-NEXT: v_cvt_f64_f32_e32 v[6:7], v4
520 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v5
521 ; SI-NEXT: s_addc_u32 s3, s1, 0
522 ; SI-NEXT: v_mov_b32_e32 v9, s3
523 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
524 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
525 ; SI-NEXT: v_mov_b32_e32 v8, s2
526 ; SI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
528 ; SI-NEXT: v_mov_b32_e32 v5, s1
529 ; SI-NEXT: v_mov_b32_e32 v4, s0
530 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
533 ; VI-LABEL: extload_v4f16_to_v4f64_arg:
535 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
536 ; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x8
537 ; VI-NEXT: s_waitcnt lgkmcnt(0)
538 ; VI-NEXT: s_lshr_b32 s5, s3, 16
539 ; VI-NEXT: v_cvt_f32_f16_e32 v4, s5
540 ; VI-NEXT: v_cvt_f32_f16_e32 v5, s3
541 ; VI-NEXT: s_lshr_b32 s4, s2, 16
542 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s2
543 ; VI-NEXT: v_cvt_f32_f16_e32 v2, s4
544 ; VI-NEXT: s_add_u32 s2, s0, 16
545 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v4
546 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v5
547 ; VI-NEXT: s_addc_u32 s3, s1, 0
548 ; VI-NEXT: v_mov_b32_e32 v9, s3
549 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
550 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
551 ; VI-NEXT: v_mov_b32_e32 v8, s2
552 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
554 ; VI-NEXT: v_mov_b32_e32 v5, s1
555 ; VI-NEXT: v_mov_b32_e32 v4, s0
556 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
558 %ext = fpext <4 x half> %arg to <4 x double>
559 store <4 x double> %ext, <4 x double> addrspace(1)* %out
563 define amdgpu_kernel void @extload_v8f16_to_v8f64_arg(<8 x double> addrspace(1)* %out, <8 x half> %arg) #0 {
564 ; SI-LABEL: extload_v8f16_to_v8f64_arg:
566 ; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
567 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x4
568 ; SI-NEXT: s_waitcnt lgkmcnt(0)
569 ; SI-NEXT: s_lshr_b32 s4, s3, 16
570 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s4
571 ; SI-NEXT: v_cvt_f32_f16_e32 v12, s3
572 ; SI-NEXT: s_lshr_b32 s5, s2, 16
573 ; SI-NEXT: s_lshr_b32 s8, s1, 16
574 ; SI-NEXT: s_lshr_b32 s4, s0, 16
575 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s5
576 ; SI-NEXT: v_cvt_f32_f16_e32 v9, s0
577 ; SI-NEXT: v_cvt_f32_f16_e32 v8, s2
578 ; SI-NEXT: s_add_u32 s0, s6, 48
579 ; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
580 ; SI-NEXT: s_addc_u32 s1, s7, 0
581 ; SI-NEXT: v_cvt_f64_f32_e32 v[14:15], v0
582 ; SI-NEXT: v_cvt_f64_f32_e32 v[12:13], v12
583 ; SI-NEXT: v_cvt_f32_f16_e32 v4, s8
584 ; SI-NEXT: v_mov_b32_e32 v17, s1
585 ; SI-NEXT: v_mov_b32_e32 v16, s0
586 ; SI-NEXT: s_add_u32 s0, s6, 32
587 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
588 ; SI-NEXT: v_cvt_f64_f32_e32 v[10:11], v1
589 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v9
590 ; SI-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
591 ; SI-NEXT: s_addc_u32 s1, s7, 0
592 ; SI-NEXT: flat_store_dwordx4 v[16:17], v[12:15]
593 ; SI-NEXT: v_cvt_f64_f32_e32 v[6:7], v4
594 ; SI-NEXT: v_mov_b32_e32 v13, s1
595 ; SI-NEXT: v_mov_b32_e32 v12, s0
596 ; SI-NEXT: s_add_u32 s0, s6, 16
597 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v5
598 ; SI-NEXT: s_addc_u32 s1, s7, 0
599 ; SI-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
600 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
601 ; SI-NEXT: v_mov_b32_e32 v9, s1
602 ; SI-NEXT: v_mov_b32_e32 v8, s0
603 ; SI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
605 ; SI-NEXT: v_mov_b32_e32 v4, s6
606 ; SI-NEXT: v_mov_b32_e32 v5, s7
607 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
610 ; VI-LABEL: extload_v8f16_to_v8f64_arg:
612 ; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x0
613 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
614 ; VI-NEXT: s_waitcnt lgkmcnt(0)
615 ; VI-NEXT: s_lshr_b32 s4, s0, 16
616 ; VI-NEXT: s_lshr_b32 s8, s2, 16
617 ; VI-NEXT: s_lshr_b32 s9, s3, 16
618 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s4
619 ; VI-NEXT: v_cvt_f32_f16_e32 v12, s3
620 ; VI-NEXT: v_cvt_f32_f16_e32 v4, s8
621 ; VI-NEXT: v_cvt_f32_f16_e32 v5, s9
622 ; VI-NEXT: s_lshr_b32 s5, s1, 16
623 ; VI-NEXT: v_cvt_f32_f16_e32 v8, s2
624 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v0
625 ; VI-NEXT: v_cvt_f32_f16_e32 v0, s0
626 ; VI-NEXT: s_add_u32 s0, s6, 48
627 ; VI-NEXT: v_cvt_f64_f32_e32 v[10:11], v4
628 ; VI-NEXT: v_cvt_f32_f16_e32 v4, s1
629 ; VI-NEXT: s_addc_u32 s1, s7, 0
630 ; VI-NEXT: v_cvt_f64_f32_e32 v[14:15], v5
631 ; VI-NEXT: v_cvt_f64_f32_e32 v[12:13], v12
632 ; VI-NEXT: v_cvt_f32_f16_e32 v1, s5
633 ; VI-NEXT: v_mov_b32_e32 v17, s1
634 ; VI-NEXT: v_mov_b32_e32 v16, s0
635 ; VI-NEXT: s_add_u32 s0, s6, 32
636 ; VI-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
637 ; VI-NEXT: s_addc_u32 s1, s7, 0
638 ; VI-NEXT: flat_store_dwordx4 v[16:17], v[12:15]
639 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v1
640 ; VI-NEXT: v_mov_b32_e32 v13, s1
641 ; VI-NEXT: v_mov_b32_e32 v12, s0
642 ; VI-NEXT: s_add_u32 s0, s6, 16
643 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v4
644 ; VI-NEXT: s_addc_u32 s1, s7, 0
645 ; VI-NEXT: flat_store_dwordx4 v[12:13], v[8:11]
646 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
647 ; VI-NEXT: v_mov_b32_e32 v9, s1
648 ; VI-NEXT: v_mov_b32_e32 v8, s0
649 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[4:7]
651 ; VI-NEXT: v_mov_b32_e32 v4, s6
652 ; VI-NEXT: v_mov_b32_e32 v5, s7
653 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
655 %ext = fpext <8 x half> %arg to <8 x double>
656 store <8 x double> %ext, <8 x double> addrspace(1)* %out
660 define amdgpu_kernel void @global_load_store_f16(half addrspace(1)* %out, half addrspace(1)* %in) #0 {
661 ; GCN-LABEL: global_load_store_f16:
663 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
664 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
665 ; GCN-NEXT: v_mov_b32_e32 v2, s2
666 ; GCN-NEXT: v_mov_b32_e32 v3, s3
667 ; GCN-NEXT: flat_load_ushort v2, v[2:3]
668 ; GCN-NEXT: v_mov_b32_e32 v0, s0
669 ; GCN-NEXT: v_mov_b32_e32 v1, s1
670 ; GCN-NEXT: s_waitcnt vmcnt(0)
671 ; GCN-NEXT: flat_store_short v[0:1], v2
673 %val = load half, half addrspace(1)* %in
674 store half %val, half addrspace(1)* %out
678 define amdgpu_kernel void @global_load_store_v2f16(<2 x half> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
679 ; GCN-LABEL: global_load_store_v2f16:
681 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
682 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
683 ; GCN-NEXT: v_mov_b32_e32 v2, s2
684 ; GCN-NEXT: v_mov_b32_e32 v3, s3
685 ; GCN-NEXT: flat_load_dword v2, v[2:3]
686 ; GCN-NEXT: v_mov_b32_e32 v0, s0
687 ; GCN-NEXT: v_mov_b32_e32 v1, s1
688 ; GCN-NEXT: s_waitcnt vmcnt(0)
689 ; GCN-NEXT: flat_store_dword v[0:1], v2
691 %val = load <2 x half>, <2 x half> addrspace(1)* %in
692 store <2 x half> %val, <2 x half> addrspace(1)* %out
696 define amdgpu_kernel void @global_load_store_v4f16(<4 x half> addrspace(1)* %in, <4 x half> addrspace(1)* %out) #0 {
697 ; GCN-LABEL: global_load_store_v4f16:
699 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
700 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
701 ; GCN-NEXT: v_mov_b32_e32 v0, s0
702 ; GCN-NEXT: v_mov_b32_e32 v1, s1
703 ; GCN-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
704 ; GCN-NEXT: v_mov_b32_e32 v2, s2
705 ; GCN-NEXT: v_mov_b32_e32 v3, s3
706 ; GCN-NEXT: s_waitcnt vmcnt(0)
707 ; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
709 %val = load <4 x half>, <4 x half> addrspace(1)* %in
710 store <4 x half> %val, <4 x half> addrspace(1)* %out
714 define amdgpu_kernel void @global_load_store_v8f16(<8 x half> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
715 ; GCN-LABEL: global_load_store_v8f16:
717 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
718 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
719 ; GCN-NEXT: v_mov_b32_e32 v0, s2
720 ; GCN-NEXT: v_mov_b32_e32 v1, s3
721 ; GCN-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
722 ; GCN-NEXT: v_mov_b32_e32 v4, s0
723 ; GCN-NEXT: v_mov_b32_e32 v5, s1
724 ; GCN-NEXT: s_waitcnt vmcnt(0)
725 ; GCN-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
727 %val = load <8 x half>, <8 x half> addrspace(1)* %in
728 store <8 x half> %val, <8 x half> addrspace(1)* %out
732 define amdgpu_kernel void @global_extload_f16_to_f32(float addrspace(1)* %out, half addrspace(1)* %in) #0 {
733 ; GCN-LABEL: global_extload_f16_to_f32:
735 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
736 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
737 ; GCN-NEXT: v_mov_b32_e32 v0, s2
738 ; GCN-NEXT: v_mov_b32_e32 v1, s3
739 ; GCN-NEXT: flat_load_ushort v0, v[0:1]
740 ; GCN-NEXT: v_mov_b32_e32 v1, s1
741 ; GCN-NEXT: s_waitcnt vmcnt(0)
742 ; GCN-NEXT: v_cvt_f32_f16_e32 v2, v0
743 ; GCN-NEXT: v_mov_b32_e32 v0, s0
744 ; GCN-NEXT: flat_store_dword v[0:1], v2
746 %val = load half, half addrspace(1)* %in
747 %cvt = fpext half %val to float
748 store float %cvt, float addrspace(1)* %out
752 define amdgpu_kernel void @global_extload_v2f16_to_v2f32(<2 x float> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
753 ; SI-LABEL: global_extload_v2f16_to_v2f32:
755 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
756 ; SI-NEXT: s_waitcnt lgkmcnt(0)
757 ; SI-NEXT: v_mov_b32_e32 v0, s2
758 ; SI-NEXT: v_mov_b32_e32 v1, s3
759 ; SI-NEXT: flat_load_dword v1, v[0:1]
760 ; SI-NEXT: v_mov_b32_e32 v2, s0
761 ; SI-NEXT: v_mov_b32_e32 v3, s1
762 ; SI-NEXT: s_waitcnt vmcnt(0)
763 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v1
764 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
765 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
766 ; SI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
769 ; VI-LABEL: global_extload_v2f16_to_v2f32:
771 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
772 ; VI-NEXT: s_waitcnt lgkmcnt(0)
773 ; VI-NEXT: v_mov_b32_e32 v0, s2
774 ; VI-NEXT: v_mov_b32_e32 v1, s3
775 ; VI-NEXT: flat_load_dword v1, v[0:1]
776 ; VI-NEXT: v_mov_b32_e32 v2, s0
777 ; VI-NEXT: v_mov_b32_e32 v3, s1
778 ; VI-NEXT: s_waitcnt vmcnt(0)
779 ; VI-NEXT: v_cvt_f32_f16_e32 v0, v1
780 ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
781 ; VI-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
783 %val = load <2 x half>, <2 x half> addrspace(1)* %in
784 %cvt = fpext <2 x half> %val to <2 x float>
785 store <2 x float> %cvt, <2 x float> addrspace(1)* %out
789 define amdgpu_kernel void @global_extload_v3f16_to_v3f32(<3 x float> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
790 ; SI-LABEL: global_extload_v3f16_to_v3f32:
792 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
793 ; SI-NEXT: s_waitcnt lgkmcnt(0)
794 ; SI-NEXT: v_mov_b32_e32 v0, s2
795 ; SI-NEXT: v_mov_b32_e32 v1, s3
796 ; SI-NEXT: flat_load_dwordx2 v[1:2], v[0:1]
797 ; SI-NEXT: v_mov_b32_e32 v3, s0
798 ; SI-NEXT: v_mov_b32_e32 v4, s1
799 ; SI-NEXT: s_waitcnt vmcnt(0)
800 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v1
801 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
802 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
803 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
804 ; SI-NEXT: flat_store_dwordx3 v[3:4], v[0:2]
807 ; VI-LABEL: global_extload_v3f16_to_v3f32:
809 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
810 ; VI-NEXT: s_waitcnt lgkmcnt(0)
811 ; VI-NEXT: v_mov_b32_e32 v0, s2
812 ; VI-NEXT: v_mov_b32_e32 v1, s3
813 ; VI-NEXT: flat_load_dwordx2 v[1:2], v[0:1]
814 ; VI-NEXT: v_mov_b32_e32 v3, s0
815 ; VI-NEXT: v_mov_b32_e32 v4, s1
816 ; VI-NEXT: s_waitcnt vmcnt(0)
817 ; VI-NEXT: v_cvt_f32_f16_e32 v0, v1
818 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v2
819 ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
820 ; VI-NEXT: flat_store_dwordx3 v[3:4], v[0:2]
822 %val = load <3 x half>, <3 x half> addrspace(1)* %in
823 %cvt = fpext <3 x half> %val to <3 x float>
824 store <3 x float> %cvt, <3 x float> addrspace(1)* %out
828 define amdgpu_kernel void @global_extload_v4f16_to_v4f32(<4 x float> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
829 ; SI-LABEL: global_extload_v4f16_to_v4f32:
831 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
832 ; SI-NEXT: s_waitcnt lgkmcnt(0)
833 ; SI-NEXT: v_mov_b32_e32 v0, s2
834 ; SI-NEXT: v_mov_b32_e32 v1, s3
835 ; SI-NEXT: flat_load_dwordx2 v[3:4], v[0:1]
836 ; SI-NEXT: v_mov_b32_e32 v5, s1
837 ; SI-NEXT: s_waitcnt vmcnt(0)
838 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v4
839 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4
840 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3
841 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v3
842 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
843 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v4
844 ; SI-NEXT: v_mov_b32_e32 v4, s0
845 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
848 ; VI-LABEL: global_extload_v4f16_to_v4f32:
850 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
851 ; VI-NEXT: s_waitcnt lgkmcnt(0)
852 ; VI-NEXT: v_mov_b32_e32 v0, s2
853 ; VI-NEXT: v_mov_b32_e32 v1, s3
854 ; VI-NEXT: flat_load_dwordx2 v[4:5], v[0:1]
855 ; VI-NEXT: s_waitcnt vmcnt(0)
856 ; VI-NEXT: v_cvt_f32_f16_e32 v0, v4
857 ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
858 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v5
859 ; VI-NEXT: v_cvt_f32_f16_sdwa v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
860 ; VI-NEXT: v_mov_b32_e32 v4, s0
861 ; VI-NEXT: v_mov_b32_e32 v5, s1
862 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
864 %val = load <4 x half>, <4 x half> addrspace(1)* %in
865 %cvt = fpext <4 x half> %val to <4 x float>
866 store <4 x float> %cvt, <4 x float> addrspace(1)* %out
870 define amdgpu_kernel void @global_extload_v8f16_to_v8f32(<8 x float> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
871 ; SI-LABEL: global_extload_v8f16_to_v8f32:
873 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
874 ; SI-NEXT: s_waitcnt lgkmcnt(0)
875 ; SI-NEXT: v_mov_b32_e32 v0, s2
876 ; SI-NEXT: v_mov_b32_e32 v1, s3
877 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
878 ; SI-NEXT: s_add_u32 s2, s0, 16
879 ; SI-NEXT: s_addc_u32 s3, s1, 0
880 ; SI-NEXT: v_mov_b32_e32 v13, s1
881 ; SI-NEXT: v_mov_b32_e32 v12, s0
882 ; SI-NEXT: s_waitcnt vmcnt(0)
883 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v3
884 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v2
885 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
886 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
887 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v1
888 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v0
889 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
890 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
891 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v3
892 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v2
893 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
894 ; SI-NEXT: v_cvt_f32_f16_e32 v5, v0
895 ; SI-NEXT: v_mov_b32_e32 v0, s2
896 ; SI-NEXT: v_mov_b32_e32 v1, s3
897 ; SI-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
898 ; SI-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
901 ; VI-LABEL: global_extload_v8f16_to_v8f32:
903 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
904 ; VI-NEXT: s_waitcnt lgkmcnt(0)
905 ; VI-NEXT: v_mov_b32_e32 v0, s2
906 ; VI-NEXT: v_mov_b32_e32 v1, s3
907 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
908 ; VI-NEXT: s_add_u32 s2, s0, 16
909 ; VI-NEXT: s_addc_u32 s3, s1, 0
910 ; VI-NEXT: v_mov_b32_e32 v13, s1
911 ; VI-NEXT: v_mov_b32_e32 v12, s0
912 ; VI-NEXT: s_waitcnt vmcnt(0)
913 ; VI-NEXT: v_cvt_f32_f16_e32 v10, v3
914 ; VI-NEXT: v_cvt_f32_f16_e32 v8, v2
915 ; VI-NEXT: v_cvt_f32_f16_sdwa v11, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
916 ; VI-NEXT: v_cvt_f32_f16_sdwa v9, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
917 ; VI-NEXT: v_cvt_f32_f16_e32 v6, v1
918 ; VI-NEXT: v_cvt_f32_f16_e32 v4, v0
919 ; VI-NEXT: v_cvt_f32_f16_sdwa v7, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
920 ; VI-NEXT: v_cvt_f32_f16_sdwa v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
921 ; VI-NEXT: v_mov_b32_e32 v0, s2
922 ; VI-NEXT: v_mov_b32_e32 v1, s3
923 ; VI-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
924 ; VI-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
926 %val = load <8 x half>, <8 x half> addrspace(1)* %in
927 %cvt = fpext <8 x half> %val to <8 x float>
928 store <8 x float> %cvt, <8 x float> addrspace(1)* %out
932 define amdgpu_kernel void @global_extload_v16f16_to_v16f32(<16 x float> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
933 ; SI-LABEL: global_extload_v16f16_to_v16f32:
935 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
936 ; SI-NEXT: s_waitcnt lgkmcnt(0)
937 ; SI-NEXT: s_add_u32 s4, s2, 16
938 ; SI-NEXT: v_mov_b32_e32 v5, s3
939 ; SI-NEXT: s_addc_u32 s5, s3, 0
940 ; SI-NEXT: v_mov_b32_e32 v0, s4
941 ; SI-NEXT: v_mov_b32_e32 v4, s2
942 ; SI-NEXT: v_mov_b32_e32 v1, s5
943 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
944 ; SI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
945 ; SI-NEXT: s_add_u32 s2, s0, 16
946 ; SI-NEXT: s_addc_u32 s3, s1, 0
947 ; SI-NEXT: v_mov_b32_e32 v14, s3
948 ; SI-NEXT: v_mov_b32_e32 v13, s2
949 ; SI-NEXT: s_add_u32 s2, s0, 48
950 ; SI-NEXT: s_addc_u32 s3, s1, 0
951 ; SI-NEXT: s_waitcnt vmcnt(1)
952 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v1
953 ; SI-NEXT: s_waitcnt vmcnt(0)
954 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v7
955 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v6
956 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
957 ; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6
958 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v7
959 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v6
960 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
961 ; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v5
962 ; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v4
963 ; SI-NEXT: flat_store_dwordx4 v[13:14], v[9:12]
964 ; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v0
965 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v3
966 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
967 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v2
968 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v0
969 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v2
970 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v1
971 ; SI-NEXT: v_cvt_f32_f16_e32 v13, v3
972 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v5
973 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v4
974 ; SI-NEXT: v_mov_b32_e32 v5, s1
975 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v16
976 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v17
977 ; SI-NEXT: v_mov_b32_e32 v4, s0
978 ; SI-NEXT: s_add_u32 s0, s0, 32
979 ; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
980 ; SI-NEXT: s_addc_u32 s1, s1, 0
981 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
982 ; SI-NEXT: v_mov_b32_e32 v15, s3
983 ; SI-NEXT: v_mov_b32_e32 v17, s1
984 ; SI-NEXT: v_mov_b32_e32 v14, s2
985 ; SI-NEXT: v_mov_b32_e32 v16, s0
986 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
987 ; SI-NEXT: flat_store_dwordx4 v[14:15], v[10:13]
988 ; SI-NEXT: flat_store_dwordx4 v[16:17], v[6:9]
991 ; VI-LABEL: global_extload_v16f16_to_v16f32:
993 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
994 ; VI-NEXT: s_waitcnt lgkmcnt(0)
995 ; VI-NEXT: v_mov_b32_e32 v0, s2
996 ; VI-NEXT: v_mov_b32_e32 v1, s3
997 ; VI-NEXT: s_add_u32 s2, s2, 16
998 ; VI-NEXT: s_addc_u32 s3, s3, 0
999 ; VI-NEXT: v_mov_b32_e32 v5, s3
1000 ; VI-NEXT: v_mov_b32_e32 v4, s2
1001 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1002 ; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1003 ; VI-NEXT: s_add_u32 s2, s0, 16
1004 ; VI-NEXT: s_addc_u32 s3, s1, 0
1005 ; VI-NEXT: v_mov_b32_e32 v19, s3
1006 ; VI-NEXT: v_mov_b32_e32 v18, s2
1007 ; VI-NEXT: s_add_u32 s2, s0, 48
1008 ; VI-NEXT: s_addc_u32 s3, s1, 0
1009 ; VI-NEXT: v_mov_b32_e32 v17, s1
1010 ; VI-NEXT: v_mov_b32_e32 v16, s0
1011 ; VI-NEXT: s_add_u32 s0, s0, 32
1012 ; VI-NEXT: s_addc_u32 s1, s1, 0
1013 ; VI-NEXT: v_mov_b32_e32 v21, s3
1014 ; VI-NEXT: v_mov_b32_e32 v20, s2
1015 ; VI-NEXT: s_waitcnt vmcnt(1)
1016 ; VI-NEXT: v_cvt_f32_f16_e32 v14, v3
1017 ; VI-NEXT: v_cvt_f32_f16_sdwa v15, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1018 ; VI-NEXT: v_cvt_f32_f16_e32 v12, v2
1019 ; VI-NEXT: v_cvt_f32_f16_sdwa v13, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1020 ; VI-NEXT: v_cvt_f32_f16_e32 v10, v1
1021 ; VI-NEXT: v_cvt_f32_f16_sdwa v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1022 ; VI-NEXT: v_cvt_f32_f16_e32 v8, v0
1023 ; VI-NEXT: v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1024 ; VI-NEXT: flat_store_dwordx4 v[18:19], v[12:15]
1025 ; VI-NEXT: s_waitcnt vmcnt(1)
1026 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v5
1027 ; VI-NEXT: v_cvt_f32_f16_e32 v14, v7
1028 ; VI-NEXT: v_cvt_f32_f16_e32 v12, v6
1029 ; VI-NEXT: v_cvt_f32_f16_sdwa v15, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1030 ; VI-NEXT: v_cvt_f32_f16_sdwa v13, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1031 ; VI-NEXT: v_cvt_f32_f16_e32 v0, v4
1032 ; VI-NEXT: v_cvt_f32_f16_sdwa v3, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1033 ; VI-NEXT: v_cvt_f32_f16_sdwa v1, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1034 ; VI-NEXT: v_mov_b32_e32 v5, s1
1035 ; VI-NEXT: v_mov_b32_e32 v4, s0
1036 ; VI-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
1037 ; VI-NEXT: flat_store_dwordx4 v[20:21], v[12:15]
1038 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1040 %val = load <16 x half>, <16 x half> addrspace(1)* %in
1041 %cvt = fpext <16 x half> %val to <16 x float>
1042 store <16 x float> %cvt, <16 x float> addrspace(1)* %out
1046 define amdgpu_kernel void @global_extload_f16_to_f64(double addrspace(1)* %out, half addrspace(1)* %in) #0 {
1047 ; GCN-LABEL: global_extload_f16_to_f64:
1049 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1050 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
1051 ; GCN-NEXT: v_mov_b32_e32 v0, s2
1052 ; GCN-NEXT: v_mov_b32_e32 v1, s3
1053 ; GCN-NEXT: flat_load_ushort v0, v[0:1]
1054 ; GCN-NEXT: v_mov_b32_e32 v2, s0
1055 ; GCN-NEXT: v_mov_b32_e32 v3, s1
1056 ; GCN-NEXT: s_waitcnt vmcnt(0)
1057 ; GCN-NEXT: v_cvt_f32_f16_e32 v0, v0
1058 ; GCN-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
1059 ; GCN-NEXT: flat_store_dwordx2 v[2:3], v[0:1]
1060 ; GCN-NEXT: s_endpgm
1061 %val = load half, half addrspace(1)* %in
1062 %cvt = fpext half %val to double
1063 store double %cvt, double addrspace(1)* %out
1067 define amdgpu_kernel void @global_extload_v2f16_to_v2f64(<2 x double> addrspace(1)* %out, <2 x half> addrspace(1)* %in) #0 {
1068 ; SI-LABEL: global_extload_v2f16_to_v2f64:
1070 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1071 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1072 ; SI-NEXT: v_mov_b32_e32 v0, s2
1073 ; SI-NEXT: v_mov_b32_e32 v1, s3
1074 ; SI-NEXT: flat_load_dword v0, v[0:1]
1075 ; SI-NEXT: v_mov_b32_e32 v4, s0
1076 ; SI-NEXT: v_mov_b32_e32 v5, s1
1077 ; SI-NEXT: s_waitcnt vmcnt(0)
1078 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
1079 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v1
1080 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
1081 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
1082 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
1083 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1086 ; VI-LABEL: global_extload_v2f16_to_v2f64:
1088 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1089 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1090 ; VI-NEXT: v_mov_b32_e32 v0, s2
1091 ; VI-NEXT: v_mov_b32_e32 v1, s3
1092 ; VI-NEXT: flat_load_dword v0, v[0:1]
1093 ; VI-NEXT: v_mov_b32_e32 v4, s0
1094 ; VI-NEXT: v_mov_b32_e32 v5, s1
1095 ; VI-NEXT: s_waitcnt vmcnt(0)
1096 ; VI-NEXT: v_cvt_f32_f16_sdwa v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1097 ; VI-NEXT: v_cvt_f32_f16_e32 v1, v0
1098 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
1099 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
1100 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1102 %val = load <2 x half>, <2 x half> addrspace(1)* %in
1103 %cvt = fpext <2 x half> %val to <2 x double>
1104 store <2 x double> %cvt, <2 x double> addrspace(1)* %out
1108 define amdgpu_kernel void @global_extload_v3f16_to_v3f64(<3 x double> addrspace(1)* %out, <3 x half> addrspace(1)* %in) #0 {
1109 ; SI-LABEL: global_extload_v3f16_to_v3f64:
1111 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1112 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1113 ; SI-NEXT: v_mov_b32_e32 v0, s2
1114 ; SI-NEXT: v_mov_b32_e32 v1, s3
1115 ; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1116 ; SI-NEXT: s_add_u32 s2, s0, 16
1117 ; SI-NEXT: s_addc_u32 s3, s1, 0
1118 ; SI-NEXT: v_mov_b32_e32 v7, s3
1119 ; SI-NEXT: v_mov_b32_e32 v6, s2
1120 ; SI-NEXT: s_waitcnt vmcnt(0)
1121 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
1122 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v0
1123 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
1124 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
1125 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v1
1126 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
1127 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v0
1128 ; SI-NEXT: flat_store_dwordx2 v[6:7], v[4:5]
1129 ; SI-NEXT: v_mov_b32_e32 v5, s1
1130 ; SI-NEXT: v_mov_b32_e32 v4, s0
1131 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1134 ; VI-LABEL: global_extload_v3f16_to_v3f64:
1136 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1137 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1138 ; VI-NEXT: v_mov_b32_e32 v0, s2
1139 ; VI-NEXT: v_mov_b32_e32 v1, s3
1140 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1141 ; VI-NEXT: s_add_u32 s2, s0, 16
1142 ; VI-NEXT: s_addc_u32 s3, s1, 0
1143 ; VI-NEXT: v_mov_b32_e32 v5, s1
1144 ; VI-NEXT: v_mov_b32_e32 v4, s0
1145 ; VI-NEXT: s_waitcnt vmcnt(0)
1146 ; VI-NEXT: v_cvt_f32_f16_e32 v3, v1
1147 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v0
1148 ; VI-NEXT: v_cvt_f32_f16_sdwa v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1149 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v3
1150 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v2
1151 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v8
1152 ; VI-NEXT: v_mov_b32_e32 v9, s3
1153 ; VI-NEXT: v_mov_b32_e32 v8, s2
1154 ; VI-NEXT: flat_store_dwordx2 v[8:9], v[6:7]
1155 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1157 %val = load <3 x half>, <3 x half> addrspace(1)* %in
1158 %cvt = fpext <3 x half> %val to <3 x double>
1159 store <3 x double> %cvt, <3 x double> addrspace(1)* %out
1163 define amdgpu_kernel void @global_extload_v4f16_to_v4f64(<4 x double> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
1164 ; SI-LABEL: global_extload_v4f16_to_v4f64:
1166 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1167 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1168 ; SI-NEXT: v_mov_b32_e32 v0, s2
1169 ; SI-NEXT: v_mov_b32_e32 v1, s3
1170 ; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1171 ; SI-NEXT: s_add_u32 s2, s0, 16
1172 ; SI-NEXT: s_addc_u32 s3, s1, 0
1173 ; SI-NEXT: v_mov_b32_e32 v9, s1
1174 ; SI-NEXT: v_mov_b32_e32 v8, s0
1175 ; SI-NEXT: s_waitcnt vmcnt(0)
1176 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1
1177 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
1178 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v0
1179 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
1180 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
1181 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v0
1182 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v3
1183 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
1184 ; SI-NEXT: v_cvt_f64_f32_e32 v[6:7], v2
1185 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v10
1186 ; SI-NEXT: v_mov_b32_e32 v11, s3
1187 ; SI-NEXT: v_mov_b32_e32 v10, s2
1188 ; SI-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
1189 ; SI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
1192 ; VI-LABEL: global_extload_v4f16_to_v4f64:
1194 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1195 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1196 ; VI-NEXT: v_mov_b32_e32 v0, s2
1197 ; VI-NEXT: v_mov_b32_e32 v1, s3
1198 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1199 ; VI-NEXT: s_add_u32 s2, s0, 16
1200 ; VI-NEXT: s_addc_u32 s3, s1, 0
1201 ; VI-NEXT: v_mov_b32_e32 v9, s1
1202 ; VI-NEXT: v_mov_b32_e32 v8, s0
1203 ; VI-NEXT: s_waitcnt vmcnt(0)
1204 ; VI-NEXT: v_cvt_f32_f16_sdwa v6, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1205 ; VI-NEXT: v_cvt_f32_f16_e32 v3, v1
1206 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v0
1207 ; VI-NEXT: v_cvt_f32_f16_sdwa v10, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1208 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v6
1209 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v3
1210 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v2
1211 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v10
1212 ; VI-NEXT: v_mov_b32_e32 v11, s3
1213 ; VI-NEXT: v_mov_b32_e32 v10, s2
1214 ; VI-NEXT: flat_store_dwordx4 v[10:11], v[4:7]
1215 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
1217 %val = load <4 x half>, <4 x half> addrspace(1)* %in
1218 %cvt = fpext <4 x half> %val to <4 x double>
1219 store <4 x double> %cvt, <4 x double> addrspace(1)* %out
1223 define amdgpu_kernel void @global_extload_v8f16_to_v8f64(<8 x double> addrspace(1)* %out, <8 x half> addrspace(1)* %in) #0 {
1224 ; SI-LABEL: global_extload_v8f16_to_v8f64:
1226 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1227 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1228 ; SI-NEXT: v_mov_b32_e32 v0, s2
1229 ; SI-NEXT: v_mov_b32_e32 v1, s3
1230 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1231 ; SI-NEXT: s_add_u32 s2, s0, 48
1232 ; SI-NEXT: s_addc_u32 s3, s1, 0
1233 ; SI-NEXT: v_mov_b32_e32 v7, s3
1234 ; SI-NEXT: v_mov_b32_e32 v6, s2
1235 ; SI-NEXT: s_add_u32 s2, s0, 32
1236 ; SI-NEXT: s_addc_u32 s3, s1, 0
1237 ; SI-NEXT: v_mov_b32_e32 v13, s1
1238 ; SI-NEXT: v_mov_b32_e32 v12, s0
1239 ; SI-NEXT: s_add_u32 s0, s0, 16
1240 ; SI-NEXT: v_mov_b32_e32 v15, s3
1241 ; SI-NEXT: s_addc_u32 s1, s1, 0
1242 ; SI-NEXT: v_mov_b32_e32 v14, s2
1243 ; SI-NEXT: s_waitcnt vmcnt(0)
1244 ; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v3
1245 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v2
1246 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v2
1247 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v4
1248 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
1249 ; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
1250 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v0
1251 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v1
1252 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v0
1253 ; SI-NEXT: v_cvt_f32_f16_e32 v16, v5
1254 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v3
1255 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
1256 ; SI-NEXT: v_cvt_f32_f16_e32 v17, v9
1257 ; SI-NEXT: v_cvt_f32_f16_e32 v18, v11
1258 ; SI-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
1259 ; SI-NEXT: flat_store_dwordx4 v[6:7], v[0:3]
1260 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v4
1261 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v10
1262 ; SI-NEXT: v_cvt_f64_f32_e32 v[10:11], v16
1263 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v17
1264 ; SI-NEXT: v_cvt_f64_f32_e32 v[6:7], v18
1265 ; SI-NEXT: v_mov_b32_e32 v17, s1
1266 ; SI-NEXT: v_mov_b32_e32 v16, s0
1267 ; SI-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
1268 ; SI-NEXT: flat_store_dwordx4 v[16:17], v[0:3]
1269 ; SI-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
1272 ; VI-LABEL: global_extload_v8f16_to_v8f64:
1274 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1275 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1276 ; VI-NEXT: v_mov_b32_e32 v0, s2
1277 ; VI-NEXT: v_mov_b32_e32 v1, s3
1278 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1279 ; VI-NEXT: s_add_u32 s2, s0, 48
1280 ; VI-NEXT: s_addc_u32 s3, s1, 0
1281 ; VI-NEXT: v_mov_b32_e32 v8, s3
1282 ; VI-NEXT: v_mov_b32_e32 v7, s2
1283 ; VI-NEXT: s_add_u32 s2, s0, 32
1284 ; VI-NEXT: s_addc_u32 s3, s1, 0
1285 ; VI-NEXT: v_mov_b32_e32 v13, s1
1286 ; VI-NEXT: v_mov_b32_e32 v12, s0
1287 ; VI-NEXT: s_add_u32 s0, s0, 16
1288 ; VI-NEXT: v_mov_b32_e32 v15, s3
1289 ; VI-NEXT: s_addc_u32 s1, s1, 0
1290 ; VI-NEXT: v_mov_b32_e32 v14, s2
1291 ; VI-NEXT: s_waitcnt vmcnt(0)
1292 ; VI-NEXT: v_cvt_f32_f16_sdwa v5, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1293 ; VI-NEXT: v_cvt_f32_f16_e32 v9, v0
1294 ; VI-NEXT: v_cvt_f32_f16_sdwa v16, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1295 ; VI-NEXT: v_cvt_f32_f16_e32 v0, v3
1296 ; VI-NEXT: v_cvt_f32_f16_e32 v11, v2
1297 ; VI-NEXT: v_cvt_f32_f16_e32 v10, v1
1298 ; VI-NEXT: v_cvt_f64_f32_e32 v[5:6], v5
1299 ; VI-NEXT: v_cvt_f64_f32_e32 v[3:4], v0
1300 ; VI-NEXT: v_cvt_f32_f16_sdwa v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1301 ; VI-NEXT: v_cvt_f32_f16_sdwa v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1302 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v9
1303 ; VI-NEXT: flat_store_dwordx4 v[7:8], v[3:6]
1304 ; VI-NEXT: v_cvt_f64_f32_e32 v[8:9], v11
1305 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v10
1306 ; VI-NEXT: v_cvt_f64_f32_e32 v[10:11], v2
1307 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v17
1308 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v16
1309 ; VI-NEXT: v_mov_b32_e32 v17, s1
1310 ; VI-NEXT: v_mov_b32_e32 v16, s0
1311 ; VI-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
1312 ; VI-NEXT: flat_store_dwordx4 v[16:17], v[4:7]
1313 ; VI-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
1315 %val = load <8 x half>, <8 x half> addrspace(1)* %in
1316 %cvt = fpext <8 x half> %val to <8 x double>
1317 store <8 x double> %cvt, <8 x double> addrspace(1)* %out
1321 define amdgpu_kernel void @global_extload_v16f16_to_v16f64(<16 x double> addrspace(1)* %out, <16 x half> addrspace(1)* %in) #0 {
1322 ; SI-LABEL: global_extload_v16f16_to_v16f64:
1324 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1325 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1326 ; SI-NEXT: v_mov_b32_e32 v0, s2
1327 ; SI-NEXT: v_mov_b32_e32 v1, s3
1328 ; SI-NEXT: s_add_u32 s2, s2, 16
1329 ; SI-NEXT: s_addc_u32 s3, s3, 0
1330 ; SI-NEXT: v_mov_b32_e32 v5, s3
1331 ; SI-NEXT: v_mov_b32_e32 v4, s2
1332 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1333 ; SI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1334 ; SI-NEXT: s_add_u32 s2, s0, 48
1335 ; SI-NEXT: s_addc_u32 s3, s1, 0
1336 ; SI-NEXT: v_mov_b32_e32 v15, s3
1337 ; SI-NEXT: v_mov_b32_e32 v14, s2
1338 ; SI-NEXT: s_add_u32 s2, s0, 32
1339 ; SI-NEXT: s_addc_u32 s3, s1, 0
1340 ; SI-NEXT: v_mov_b32_e32 v17, s3
1341 ; SI-NEXT: v_mov_b32_e32 v16, s2
1342 ; SI-NEXT: s_add_u32 s2, s0, 16
1343 ; SI-NEXT: s_addc_u32 s3, s1, 0
1344 ; SI-NEXT: v_mov_b32_e32 v19, s3
1345 ; SI-NEXT: v_mov_b32_e32 v18, s2
1346 ; SI-NEXT: s_add_u32 s2, s0, 0x70
1347 ; SI-NEXT: s_addc_u32 s3, s1, 0
1348 ; SI-NEXT: v_mov_b32_e32 v13, s1
1349 ; SI-NEXT: v_mov_b32_e32 v12, s0
1350 ; SI-NEXT: s_waitcnt vmcnt(1)
1351 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v3
1352 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
1353 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v8
1354 ; SI-NEXT: s_waitcnt vmcnt(0)
1355 ; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v5
1356 ; SI-NEXT: v_cvt_f32_f16_e32 v21, v5
1357 ; SI-NEXT: v_cvt_f64_f32_e32 v[8:9], v3
1358 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
1359 ; SI-NEXT: v_cvt_f64_f32_e32 v[10:11], v10
1360 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
1361 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
1362 ; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4
1363 ; SI-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
1364 ; SI-NEXT: v_mov_b32_e32 v15, s3
1365 ; SI-NEXT: v_cvt_f64_f32_e32 v[8:9], v2
1366 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1
1367 ; SI-NEXT: v_cvt_f64_f32_e32 v[10:11], v3
1368 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
1369 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
1370 ; SI-NEXT: v_mov_b32_e32 v14, s2
1371 ; SI-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
1372 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v2
1373 ; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v0
1374 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v0
1375 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v1
1376 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
1377 ; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v7
1378 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
1379 ; SI-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
1380 ; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v6
1381 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v8
1382 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v9
1383 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v10
1384 ; SI-NEXT: v_cvt_f32_f16_e32 v10, v11
1385 ; SI-NEXT: s_add_u32 s2, s0, 0x60
1386 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
1387 ; SI-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
1388 ; SI-NEXT: s_addc_u32 s3, s1, 0
1389 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v7
1390 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v8
1391 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v20
1392 ; SI-NEXT: v_mov_b32_e32 v17, s3
1393 ; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
1394 ; SI-NEXT: v_cvt_f32_f16_e32 v12, v5
1395 ; SI-NEXT: v_mov_b32_e32 v16, s2
1396 ; SI-NEXT: s_add_u32 s2, s0, 0x50
1397 ; SI-NEXT: s_addc_u32 s3, s1, 0
1398 ; SI-NEXT: v_cvt_f64_f32_e32 v[8:9], v6
1399 ; SI-NEXT: v_cvt_f64_f32_e32 v[10:11], v10
1400 ; SI-NEXT: s_add_u32 s0, s0, 64
1401 ; SI-NEXT: flat_store_dwordx4 v[14:15], v[0:3]
1402 ; SI-NEXT: s_addc_u32 s1, s1, 0
1403 ; SI-NEXT: v_cvt_f64_f32_e32 v[2:3], v7
1404 ; SI-NEXT: v_cvt_f64_f32_e32 v[0:1], v21
1405 ; SI-NEXT: v_cvt_f64_f32_e32 v[6:7], v12
1406 ; SI-NEXT: v_cvt_f64_f32_e32 v[4:5], v4
1407 ; SI-NEXT: v_mov_b32_e32 v19, s3
1408 ; SI-NEXT: v_mov_b32_e32 v13, s1
1409 ; SI-NEXT: v_mov_b32_e32 v18, s2
1410 ; SI-NEXT: v_mov_b32_e32 v12, s0
1411 ; SI-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
1412 ; SI-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
1413 ; SI-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
1416 ; VI-LABEL: global_extload_v16f16_to_v16f64:
1418 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1419 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1420 ; VI-NEXT: v_mov_b32_e32 v0, s2
1421 ; VI-NEXT: v_mov_b32_e32 v1, s3
1422 ; VI-NEXT: s_add_u32 s2, s2, 16
1423 ; VI-NEXT: s_addc_u32 s3, s3, 0
1424 ; VI-NEXT: v_mov_b32_e32 v5, s3
1425 ; VI-NEXT: v_mov_b32_e32 v4, s2
1426 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1427 ; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1428 ; VI-NEXT: s_add_u32 s2, s0, 48
1429 ; VI-NEXT: s_addc_u32 s3, s1, 0
1430 ; VI-NEXT: v_mov_b32_e32 v15, s3
1431 ; VI-NEXT: v_mov_b32_e32 v14, s2
1432 ; VI-NEXT: s_add_u32 s2, s0, 32
1433 ; VI-NEXT: s_addc_u32 s3, s1, 0
1434 ; VI-NEXT: v_mov_b32_e32 v17, s3
1435 ; VI-NEXT: v_mov_b32_e32 v16, s2
1436 ; VI-NEXT: s_add_u32 s2, s0, 16
1437 ; VI-NEXT: s_addc_u32 s3, s1, 0
1438 ; VI-NEXT: v_mov_b32_e32 v19, s3
1439 ; VI-NEXT: v_mov_b32_e32 v18, s2
1440 ; VI-NEXT: s_add_u32 s2, s0, 0x70
1441 ; VI-NEXT: v_mov_b32_e32 v13, s1
1442 ; VI-NEXT: s_addc_u32 s3, s1, 0
1443 ; VI-NEXT: v_mov_b32_e32 v12, s0
1444 ; VI-NEXT: s_waitcnt vmcnt(1)
1445 ; VI-NEXT: v_cvt_f32_f16_e32 v8, v3
1446 ; VI-NEXT: v_cvt_f32_f16_sdwa v3, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1447 ; VI-NEXT: v_cvt_f64_f32_e32 v[8:9], v8
1448 ; VI-NEXT: v_cvt_f64_f32_e32 v[10:11], v3
1449 ; VI-NEXT: v_cvt_f32_f16_e32 v3, v2
1450 ; VI-NEXT: v_cvt_f32_f16_sdwa v2, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1451 ; VI-NEXT: flat_store_dwordx4 v[14:15], v[8:11]
1452 ; VI-NEXT: v_mov_b32_e32 v15, s3
1453 ; VI-NEXT: v_cvt_f64_f32_e32 v[8:9], v3
1454 ; VI-NEXT: v_cvt_f64_f32_e32 v[10:11], v2
1455 ; VI-NEXT: v_cvt_f32_f16_e32 v2, v1
1456 ; VI-NEXT: v_cvt_f32_f16_sdwa v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1457 ; VI-NEXT: v_mov_b32_e32 v14, s2
1458 ; VI-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
1459 ; VI-NEXT: s_add_u32 s2, s0, 0x60
1460 ; VI-NEXT: v_cvt_f32_f16_e32 v8, v0
1461 ; VI-NEXT: v_cvt_f32_f16_sdwa v9, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1462 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v2
1463 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v3
1464 ; VI-NEXT: s_waitcnt vmcnt(2)
1465 ; VI-NEXT: v_cvt_f32_f16_e32 v10, v4
1466 ; VI-NEXT: s_addc_u32 s3, s1, 0
1467 ; VI-NEXT: v_mov_b32_e32 v17, s3
1468 ; VI-NEXT: flat_store_dwordx4 v[18:19], v[0:3]
1469 ; VI-NEXT: v_cvt_f32_f16_sdwa v18, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1470 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v8
1471 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v9
1472 ; VI-NEXT: v_cvt_f32_f16_e32 v4, v7
1473 ; VI-NEXT: v_cvt_f32_f16_sdwa v7, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1474 ; VI-NEXT: v_cvt_f32_f16_e32 v8, v5
1475 ; VI-NEXT: flat_store_dwordx4 v[12:13], v[0:3]
1476 ; VI-NEXT: v_cvt_f32_f16_sdwa v12, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1477 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v4
1478 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v7
1479 ; VI-NEXT: v_cvt_f32_f16_e32 v7, v6
1480 ; VI-NEXT: v_cvt_f32_f16_sdwa v6, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1
1481 ; VI-NEXT: v_mov_b32_e32 v16, s2
1482 ; VI-NEXT: s_add_u32 s2, s0, 0x50
1483 ; VI-NEXT: s_addc_u32 s3, s1, 0
1484 ; VI-NEXT: flat_store_dwordx4 v[14:15], v[0:3]
1485 ; VI-NEXT: v_cvt_f64_f32_e32 v[4:5], v10
1486 ; VI-NEXT: v_cvt_f64_f32_e32 v[0:1], v8
1487 ; VI-NEXT: v_cvt_f64_f32_e32 v[8:9], v7
1488 ; VI-NEXT: v_cvt_f64_f32_e32 v[10:11], v6
1489 ; VI-NEXT: s_add_u32 s0, s0, 64
1490 ; VI-NEXT: v_cvt_f64_f32_e32 v[2:3], v12
1491 ; VI-NEXT: s_addc_u32 s1, s1, 0
1492 ; VI-NEXT: v_cvt_f64_f32_e32 v[6:7], v18
1493 ; VI-NEXT: v_mov_b32_e32 v21, s3
1494 ; VI-NEXT: v_mov_b32_e32 v13, s1
1495 ; VI-NEXT: v_mov_b32_e32 v20, s2
1496 ; VI-NEXT: v_mov_b32_e32 v12, s0
1497 ; VI-NEXT: flat_store_dwordx4 v[16:17], v[8:11]
1498 ; VI-NEXT: flat_store_dwordx4 v[20:21], v[0:3]
1499 ; VI-NEXT: flat_store_dwordx4 v[12:13], v[4:7]
1501 %val = load <16 x half>, <16 x half> addrspace(1)* %in
1502 %cvt = fpext <16 x half> %val to <16 x double>
1503 store <16 x double> %cvt, <16 x double> addrspace(1)* %out
1507 define amdgpu_kernel void @global_truncstore_f32_to_f16(half addrspace(1)* %out, float addrspace(1)* %in) #0 {
1508 ; GCN-LABEL: global_truncstore_f32_to_f16:
1510 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1511 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
1512 ; GCN-NEXT: v_mov_b32_e32 v0, s2
1513 ; GCN-NEXT: v_mov_b32_e32 v1, s3
1514 ; GCN-NEXT: flat_load_dword v0, v[0:1]
1515 ; GCN-NEXT: v_mov_b32_e32 v1, s1
1516 ; GCN-NEXT: s_waitcnt vmcnt(0)
1517 ; GCN-NEXT: v_cvt_f16_f32_e32 v2, v0
1518 ; GCN-NEXT: v_mov_b32_e32 v0, s0
1519 ; GCN-NEXT: flat_store_short v[0:1], v2
1520 ; GCN-NEXT: s_endpgm
1521 %val = load float, float addrspace(1)* %in
1522 %cvt = fptrunc float %val to half
1523 store half %cvt, half addrspace(1)* %out
1527 define amdgpu_kernel void @global_truncstore_v2f32_to_v2f16(<2 x half> addrspace(1)* %out, <2 x float> addrspace(1)* %in) #0 {
1528 ; SI-LABEL: global_truncstore_v2f32_to_v2f16:
1530 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1531 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1532 ; SI-NEXT: v_mov_b32_e32 v0, s2
1533 ; SI-NEXT: v_mov_b32_e32 v1, s3
1534 ; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1535 ; SI-NEXT: s_waitcnt vmcnt(0)
1536 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v1
1537 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v0
1538 ; SI-NEXT: v_mov_b32_e32 v0, s0
1539 ; SI-NEXT: v_mov_b32_e32 v1, s1
1540 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
1541 ; SI-NEXT: v_or_b32_e32 v2, v3, v2
1542 ; SI-NEXT: flat_store_dword v[0:1], v2
1545 ; VI-LABEL: global_truncstore_v2f32_to_v2f16:
1547 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1548 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1549 ; VI-NEXT: v_mov_b32_e32 v0, s2
1550 ; VI-NEXT: v_mov_b32_e32 v1, s3
1551 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1552 ; VI-NEXT: s_waitcnt vmcnt(0)
1553 ; VI-NEXT: v_cvt_f16_f32_sdwa v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1554 ; VI-NEXT: v_cvt_f16_f32_e32 v3, v0
1555 ; VI-NEXT: v_mov_b32_e32 v0, s0
1556 ; VI-NEXT: v_mov_b32_e32 v1, s1
1557 ; VI-NEXT: v_or_b32_e32 v2, v3, v2
1558 ; VI-NEXT: flat_store_dword v[0:1], v2
1560 %val = load <2 x float>, <2 x float> addrspace(1)* %in
1561 %cvt = fptrunc <2 x float> %val to <2 x half>
1562 store <2 x half> %cvt, <2 x half> addrspace(1)* %out
1566 define amdgpu_kernel void @global_truncstore_v3f32_to_v3f16(<3 x half> addrspace(1)* %out, <3 x float> addrspace(1)* %in) #0 {
1567 ; SI-LABEL: global_truncstore_v3f32_to_v3f16:
1569 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1570 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1571 ; SI-NEXT: v_mov_b32_e32 v0, s2
1572 ; SI-NEXT: v_mov_b32_e32 v1, s3
1573 ; SI-NEXT: flat_load_dwordx3 v[0:2], v[0:1]
1574 ; SI-NEXT: s_add_u32 s2, s0, 4
1575 ; SI-NEXT: s_addc_u32 s3, s1, 0
1576 ; SI-NEXT: s_waitcnt vmcnt(0)
1577 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v1
1578 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
1579 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v0
1580 ; SI-NEXT: v_mov_b32_e32 v0, s2
1581 ; SI-NEXT: v_mov_b32_e32 v1, s3
1582 ; SI-NEXT: flat_store_short v[0:1], v2
1583 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
1584 ; SI-NEXT: v_mov_b32_e32 v0, s0
1585 ; SI-NEXT: v_or_b32_e32 v2, v4, v3
1586 ; SI-NEXT: v_mov_b32_e32 v1, s1
1587 ; SI-NEXT: flat_store_dword v[0:1], v2
1590 ; VI-LABEL: global_truncstore_v3f32_to_v3f16:
1592 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1593 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1594 ; VI-NEXT: v_mov_b32_e32 v0, s2
1595 ; VI-NEXT: v_mov_b32_e32 v1, s3
1596 ; VI-NEXT: flat_load_dwordx3 v[0:2], v[0:1]
1597 ; VI-NEXT: s_add_u32 s2, s0, 4
1598 ; VI-NEXT: s_addc_u32 s3, s1, 0
1599 ; VI-NEXT: s_waitcnt vmcnt(0)
1600 ; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
1601 ; VI-NEXT: v_cvt_f16_f32_sdwa v3, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1602 ; VI-NEXT: v_cvt_f16_f32_e32 v4, v0
1603 ; VI-NEXT: v_mov_b32_e32 v0, s2
1604 ; VI-NEXT: v_mov_b32_e32 v1, s3
1605 ; VI-NEXT: flat_store_short v[0:1], v2
1606 ; VI-NEXT: v_mov_b32_e32 v0, s0
1607 ; VI-NEXT: v_or_b32_e32 v3, v4, v3
1608 ; VI-NEXT: v_mov_b32_e32 v1, s1
1609 ; VI-NEXT: flat_store_dword v[0:1], v3
1611 %val = load <3 x float>, <3 x float> addrspace(1)* %in
1612 %cvt = fptrunc <3 x float> %val to <3 x half>
1613 store <3 x half> %cvt, <3 x half> addrspace(1)* %out
1617 define amdgpu_kernel void @global_truncstore_v4f32_to_v4f16(<4 x half> addrspace(1)* %out, <4 x float> addrspace(1)* %in) #0 {
1618 ; SI-LABEL: global_truncstore_v4f32_to_v4f16:
1620 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1621 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1622 ; SI-NEXT: v_mov_b32_e32 v0, s2
1623 ; SI-NEXT: v_mov_b32_e32 v1, s3
1624 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1625 ; SI-NEXT: v_mov_b32_e32 v4, s0
1626 ; SI-NEXT: v_mov_b32_e32 v5, s1
1627 ; SI-NEXT: s_waitcnt vmcnt(0)
1628 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
1629 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
1630 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
1631 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
1632 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
1633 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v1
1634 ; SI-NEXT: v_or_b32_e32 v1, v2, v3
1635 ; SI-NEXT: v_or_b32_e32 v0, v0, v6
1636 ; SI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
1639 ; VI-LABEL: global_truncstore_v4f32_to_v4f16:
1641 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1642 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1643 ; VI-NEXT: v_mov_b32_e32 v0, s2
1644 ; VI-NEXT: v_mov_b32_e32 v1, s3
1645 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1646 ; VI-NEXT: s_waitcnt vmcnt(0)
1647 ; VI-NEXT: v_cvt_f16_f32_sdwa v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1648 ; VI-NEXT: v_cvt_f16_f32_e32 v5, v0
1649 ; VI-NEXT: v_cvt_f16_f32_sdwa v3, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1650 ; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
1651 ; VI-NEXT: v_mov_b32_e32 v0, s0
1652 ; VI-NEXT: v_mov_b32_e32 v1, s1
1653 ; VI-NEXT: v_or_b32_e32 v3, v2, v3
1654 ; VI-NEXT: v_or_b32_e32 v2, v5, v4
1655 ; VI-NEXT: flat_store_dwordx2 v[0:1], v[2:3]
1657 %val = load <4 x float>, <4 x float> addrspace(1)* %in
1658 %cvt = fptrunc <4 x float> %val to <4 x half>
1659 store <4 x half> %cvt, <4 x half> addrspace(1)* %out
1663 define amdgpu_kernel void @global_truncstore_v8f32_to_v8f16(<8 x half> addrspace(1)* %out, <8 x float> addrspace(1)* %in) #0 {
1664 ; SI-LABEL: global_truncstore_v8f32_to_v8f16:
1666 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1667 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1668 ; SI-NEXT: s_add_u32 s4, s2, 16
1669 ; SI-NEXT: v_mov_b32_e32 v5, s3
1670 ; SI-NEXT: s_addc_u32 s5, s3, 0
1671 ; SI-NEXT: v_mov_b32_e32 v0, s4
1672 ; SI-NEXT: v_mov_b32_e32 v4, s2
1673 ; SI-NEXT: v_mov_b32_e32 v1, s5
1674 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1675 ; SI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1676 ; SI-NEXT: v_mov_b32_e32 v8, s0
1677 ; SI-NEXT: v_mov_b32_e32 v9, s1
1678 ; SI-NEXT: s_waitcnt vmcnt(1)
1679 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
1680 ; SI-NEXT: s_waitcnt vmcnt(0)
1681 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
1682 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
1683 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
1684 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v0
1685 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
1686 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
1687 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
1688 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
1689 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v7
1690 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1
1691 ; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
1692 ; SI-NEXT: v_or_b32_e32 v1, v6, v0
1693 ; SI-NEXT: v_or_b32_e32 v3, v2, v3
1694 ; SI-NEXT: v_or_b32_e32 v0, v4, v5
1695 ; SI-NEXT: v_or_b32_e32 v2, v10, v7
1696 ; SI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
1699 ; VI-LABEL: global_truncstore_v8f32_to_v8f16:
1701 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1702 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1703 ; VI-NEXT: s_add_u32 s4, s2, 16
1704 ; VI-NEXT: v_mov_b32_e32 v5, s3
1705 ; VI-NEXT: s_addc_u32 s5, s3, 0
1706 ; VI-NEXT: v_mov_b32_e32 v0, s4
1707 ; VI-NEXT: v_mov_b32_e32 v4, s2
1708 ; VI-NEXT: v_mov_b32_e32 v1, s5
1709 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1710 ; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1711 ; VI-NEXT: v_mov_b32_e32 v8, s0
1712 ; VI-NEXT: v_mov_b32_e32 v9, s1
1713 ; VI-NEXT: s_waitcnt vmcnt(1)
1714 ; VI-NEXT: v_cvt_f16_f32_sdwa v3, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1715 ; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
1716 ; VI-NEXT: s_waitcnt vmcnt(0)
1717 ; VI-NEXT: v_cvt_f16_f32_sdwa v7, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1718 ; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
1719 ; VI-NEXT: v_cvt_f16_f32_sdwa v10, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1720 ; VI-NEXT: v_cvt_f16_f32_sdwa v5, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1721 ; VI-NEXT: v_cvt_f16_f32_e32 v4, v4
1722 ; VI-NEXT: v_cvt_f16_f32_e32 v11, v0
1723 ; VI-NEXT: v_or_b32_e32 v3, v2, v3
1724 ; VI-NEXT: v_or_b32_e32 v1, v6, v7
1725 ; VI-NEXT: v_or_b32_e32 v0, v4, v5
1726 ; VI-NEXT: v_or_b32_e32 v2, v11, v10
1727 ; VI-NEXT: flat_store_dwordx4 v[8:9], v[0:3]
1729 %val = load <8 x float>, <8 x float> addrspace(1)* %in
1730 %cvt = fptrunc <8 x float> %val to <8 x half>
1731 store <8 x half> %cvt, <8 x half> addrspace(1)* %out
1735 define amdgpu_kernel void @global_truncstore_v16f32_to_v16f16(<16 x half> addrspace(1)* %out, <16 x float> addrspace(1)* %in) #0 {
1736 ; SI-LABEL: global_truncstore_v16f32_to_v16f16:
1738 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1739 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1740 ; SI-NEXT: s_add_u32 s4, s2, 32
1741 ; SI-NEXT: s_addc_u32 s5, s3, 0
1742 ; SI-NEXT: v_mov_b32_e32 v0, s4
1743 ; SI-NEXT: v_mov_b32_e32 v1, s5
1744 ; SI-NEXT: s_add_u32 s4, s2, 48
1745 ; SI-NEXT: v_mov_b32_e32 v13, s3
1746 ; SI-NEXT: s_addc_u32 s5, s3, 0
1747 ; SI-NEXT: v_mov_b32_e32 v12, s2
1748 ; SI-NEXT: s_add_u32 s2, s2, 16
1749 ; SI-NEXT: v_mov_b32_e32 v4, s4
1750 ; SI-NEXT: v_mov_b32_e32 v5, s5
1751 ; SI-NEXT: s_addc_u32 s3, s3, 0
1752 ; SI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1753 ; SI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1754 ; SI-NEXT: v_mov_b32_e32 v9, s3
1755 ; SI-NEXT: v_mov_b32_e32 v8, s2
1756 ; SI-NEXT: flat_load_dwordx4 v[8:11], v[8:9]
1757 ; SI-NEXT: flat_load_dwordx4 v[12:15], v[12:13]
1758 ; SI-NEXT: s_add_u32 s2, s0, 16
1759 ; SI-NEXT: s_addc_u32 s3, s1, 0
1760 ; SI-NEXT: s_waitcnt vmcnt(3)
1761 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
1762 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
1763 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
1764 ; SI-NEXT: s_waitcnt vmcnt(2)
1765 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
1766 ; SI-NEXT: v_cvt_f16_f32_e32 v16, v5
1767 ; SI-NEXT: v_cvt_f16_f32_e32 v17, v4
1768 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
1769 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
1770 ; SI-NEXT: s_waitcnt vmcnt(1)
1771 ; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
1772 ; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
1773 ; SI-NEXT: s_waitcnt vmcnt(0)
1774 ; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
1775 ; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
1776 ; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
1777 ; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
1778 ; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
1779 ; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
1780 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
1781 ; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1
1782 ; SI-NEXT: v_mov_b32_e32 v5, s3
1783 ; SI-NEXT: v_or_b32_e32 v1, v2, v3
1784 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v7
1785 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v16
1786 ; SI-NEXT: v_or_b32_e32 v3, v6, v2
1787 ; SI-NEXT: v_or_b32_e32 v2, v17, v7
1788 ; SI-NEXT: v_mov_b32_e32 v4, s2
1789 ; SI-NEXT: v_or_b32_e32 v0, v0, v18
1790 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1791 ; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v15
1792 ; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v13
1793 ; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
1794 ; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
1795 ; SI-NEXT: v_mov_b32_e32 v5, s1
1796 ; SI-NEXT: v_or_b32_e32 v1, v14, v6
1797 ; SI-NEXT: v_or_b32_e32 v0, v12, v7
1798 ; SI-NEXT: v_or_b32_e32 v3, v10, v11
1799 ; SI-NEXT: v_or_b32_e32 v2, v8, v9
1800 ; SI-NEXT: v_mov_b32_e32 v4, s0
1801 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1804 ; VI-LABEL: global_truncstore_v16f32_to_v16f16:
1806 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1807 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1808 ; VI-NEXT: s_add_u32 s4, s2, 32
1809 ; VI-NEXT: s_addc_u32 s5, s3, 0
1810 ; VI-NEXT: v_mov_b32_e32 v0, s4
1811 ; VI-NEXT: v_mov_b32_e32 v1, s5
1812 ; VI-NEXT: s_add_u32 s4, s2, 48
1813 ; VI-NEXT: v_mov_b32_e32 v13, s3
1814 ; VI-NEXT: s_addc_u32 s5, s3, 0
1815 ; VI-NEXT: v_mov_b32_e32 v12, s2
1816 ; VI-NEXT: s_add_u32 s2, s2, 16
1817 ; VI-NEXT: v_mov_b32_e32 v4, s4
1818 ; VI-NEXT: v_mov_b32_e32 v5, s5
1819 ; VI-NEXT: s_addc_u32 s3, s3, 0
1820 ; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
1821 ; VI-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
1822 ; VI-NEXT: v_mov_b32_e32 v9, s3
1823 ; VI-NEXT: v_mov_b32_e32 v8, s2
1824 ; VI-NEXT: flat_load_dwordx4 v[8:11], v[8:9]
1825 ; VI-NEXT: flat_load_dwordx4 v[12:15], v[12:13]
1826 ; VI-NEXT: s_add_u32 s2, s0, 16
1827 ; VI-NEXT: s_addc_u32 s3, s1, 0
1828 ; VI-NEXT: s_waitcnt vmcnt(3)
1829 ; VI-NEXT: v_cvt_f16_f32_sdwa v3, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1830 ; VI-NEXT: v_cvt_f16_f32_e32 v2, v2
1831 ; VI-NEXT: v_cvt_f16_f32_sdwa v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1832 ; VI-NEXT: v_cvt_f16_f32_e32 v0, v0
1833 ; VI-NEXT: s_waitcnt vmcnt(2)
1834 ; VI-NEXT: v_cvt_f16_f32_sdwa v7, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1835 ; VI-NEXT: v_cvt_f16_f32_e32 v6, v6
1836 ; VI-NEXT: v_cvt_f16_f32_sdwa v17, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1837 ; VI-NEXT: v_cvt_f16_f32_e32 v18, v4
1838 ; VI-NEXT: s_waitcnt vmcnt(0)
1839 ; VI-NEXT: v_cvt_f16_f32_sdwa v15, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1840 ; VI-NEXT: v_cvt_f16_f32_e32 v14, v14
1841 ; VI-NEXT: v_cvt_f16_f32_sdwa v13, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1842 ; VI-NEXT: v_cvt_f16_f32_e32 v12, v12
1843 ; VI-NEXT: v_cvt_f16_f32_sdwa v11, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1844 ; VI-NEXT: v_cvt_f16_f32_e32 v10, v10
1845 ; VI-NEXT: v_cvt_f16_f32_sdwa v9, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD
1846 ; VI-NEXT: v_cvt_f16_f32_e32 v8, v8
1847 ; VI-NEXT: v_mov_b32_e32 v5, s3
1848 ; VI-NEXT: v_or_b32_e32 v1, v2, v3
1849 ; VI-NEXT: v_mov_b32_e32 v4, s2
1850 ; VI-NEXT: v_or_b32_e32 v0, v0, v16
1851 ; VI-NEXT: v_or_b32_e32 v3, v6, v7
1852 ; VI-NEXT: v_or_b32_e32 v2, v18, v17
1853 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1854 ; VI-NEXT: v_mov_b32_e32 v5, s1
1855 ; VI-NEXT: v_or_b32_e32 v1, v14, v15
1856 ; VI-NEXT: v_or_b32_e32 v0, v12, v13
1857 ; VI-NEXT: v_or_b32_e32 v3, v10, v11
1858 ; VI-NEXT: v_or_b32_e32 v2, v8, v9
1859 ; VI-NEXT: v_mov_b32_e32 v4, s0
1860 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
1862 %val = load <16 x float>, <16 x float> addrspace(1)* %in
1863 %cvt = fptrunc <16 x float> %val to <16 x half>
1864 store <16 x half> %cvt, <16 x half> addrspace(1)* %out
1868 ; FIXME: Unsafe math should fold conversions away
1869 define amdgpu_kernel void @fadd_f16(half addrspace(1)* %out, half %a, half %b) #0 {
1870 ; SI-LABEL: fadd_f16:
1872 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
1873 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1874 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
1875 ; SI-NEXT: s_lshr_b32 s0, s0, 16
1876 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s0
1877 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
1878 ; SI-NEXT: v_add_f32_e32 v0, v0, v1
1879 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v0
1880 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1881 ; SI-NEXT: v_mov_b32_e32 v0, s0
1882 ; SI-NEXT: v_mov_b32_e32 v1, s1
1883 ; SI-NEXT: flat_store_short v[0:1], v2
1886 ; VI-LABEL: fadd_f16:
1888 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
1889 ; VI-NEXT: s_load_dword s2, s[4:5], 0x8
1890 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1891 ; VI-NEXT: s_lshr_b32 s3, s2, 16
1892 ; VI-NEXT: v_mov_b32_e32 v0, s3
1893 ; VI-NEXT: v_add_f16_e32 v2, s2, v0
1894 ; VI-NEXT: v_mov_b32_e32 v0, s0
1895 ; VI-NEXT: v_mov_b32_e32 v1, s1
1896 ; VI-NEXT: flat_store_short v[0:1], v2
1898 %add = fadd half %a, %b
1899 store half %add, half addrspace(1)* %out, align 4
1903 define amdgpu_kernel void @fadd_v2f16(<2 x half> addrspace(1)* %out, <2 x half> %a, <2 x half> %b) #0 {
1904 ; SI-LABEL: fadd_v2f16:
1906 ; SI-NEXT: s_load_dword s0, s[4:5], 0x2
1907 ; SI-NEXT: s_load_dword s1, s[4:5], 0x3
1908 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1909 ; SI-NEXT: s_lshr_b32 s2, s0, 16
1910 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s0
1911 ; SI-NEXT: s_lshr_b32 s0, s1, 16
1912 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s1
1913 ; SI-NEXT: v_cvt_f32_f16_e32 v3, s0
1914 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s2
1915 ; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
1916 ; SI-NEXT: v_add_f32_e32 v0, v0, v1
1917 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
1918 ; SI-NEXT: v_add_f32_e32 v1, v2, v3
1919 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
1920 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
1921 ; SI-NEXT: v_or_b32_e32 v2, v0, v1
1922 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1923 ; SI-NEXT: v_mov_b32_e32 v0, s0
1924 ; SI-NEXT: v_mov_b32_e32 v1, s1
1925 ; SI-NEXT: flat_store_dword v[0:1], v2
1928 ; VI-LABEL: fadd_v2f16:
1930 ; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x0
1931 ; VI-NEXT: s_load_dword s2, s[4:5], 0x8
1932 ; VI-NEXT: s_load_dword s3, s[4:5], 0xc
1933 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1934 ; VI-NEXT: s_lshr_b32 s5, s2, 16
1935 ; VI-NEXT: s_lshr_b32 s4, s3, 16
1936 ; VI-NEXT: v_mov_b32_e32 v0, s4
1937 ; VI-NEXT: v_mov_b32_e32 v1, s5
1938 ; VI-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
1939 ; VI-NEXT: v_mov_b32_e32 v1, s3
1940 ; VI-NEXT: v_add_f16_e32 v1, s2, v1
1941 ; VI-NEXT: v_or_b32_e32 v2, v1, v0
1942 ; VI-NEXT: v_mov_b32_e32 v0, s0
1943 ; VI-NEXT: v_mov_b32_e32 v1, s1
1944 ; VI-NEXT: flat_store_dword v[0:1], v2
1946 %add = fadd <2 x half> %a, %b
1947 store <2 x half> %add, <2 x half> addrspace(1)* %out, align 8
1951 define amdgpu_kernel void @fadd_v4f16(<4 x half> addrspace(1)* %out, <4 x half> addrspace(1)* %in) #0 {
1952 ; SI-LABEL: fadd_v4f16:
1954 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1955 ; SI-NEXT: s_waitcnt lgkmcnt(0)
1956 ; SI-NEXT: s_add_u32 s4, s2, 8
1957 ; SI-NEXT: v_mov_b32_e32 v0, s2
1958 ; SI-NEXT: s_addc_u32 s5, s3, 0
1959 ; SI-NEXT: v_mov_b32_e32 v2, s4
1960 ; SI-NEXT: v_mov_b32_e32 v1, s3
1961 ; SI-NEXT: v_mov_b32_e32 v3, s5
1962 ; SI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
1963 ; SI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
1964 ; SI-NEXT: v_mov_b32_e32 v4, s0
1965 ; SI-NEXT: v_mov_b32_e32 v5, s1
1966 ; SI-NEXT: s_waitcnt vmcnt(1)
1967 ; SI-NEXT: v_cvt_f32_f16_e32 v6, v0
1968 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
1969 ; SI-NEXT: s_waitcnt vmcnt(0)
1970 ; SI-NEXT: v_cvt_f32_f16_e32 v8, v2
1971 ; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
1972 ; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
1973 ; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
1974 ; SI-NEXT: v_cvt_f32_f16_e32 v9, v3
1975 ; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
1976 ; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
1977 ; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
1978 ; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
1979 ; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
1980 ; SI-NEXT: v_add_f32_e32 v7, v7, v9
1981 ; SI-NEXT: v_add_f32_e32 v0, v0, v2
1982 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
1983 ; SI-NEXT: v_add_f32_e32 v1, v1, v3
1984 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
1985 ; SI-NEXT: v_add_f32_e32 v6, v6, v8
1986 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v7
1987 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v6
1988 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
1989 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
1990 ; SI-NEXT: v_or_b32_e32 v1, v2, v1
1991 ; SI-NEXT: v_or_b32_e32 v0, v3, v0
1992 ; SI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
1995 ; VI-LABEL: fadd_v4f16:
1997 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
1998 ; VI-NEXT: s_waitcnt lgkmcnt(0)
1999 ; VI-NEXT: s_add_u32 s4, s2, 8
2000 ; VI-NEXT: v_mov_b32_e32 v0, s2
2001 ; VI-NEXT: s_addc_u32 s5, s3, 0
2002 ; VI-NEXT: v_mov_b32_e32 v2, s4
2003 ; VI-NEXT: v_mov_b32_e32 v1, s3
2004 ; VI-NEXT: v_mov_b32_e32 v3, s5
2005 ; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
2006 ; VI-NEXT: flat_load_dwordx2 v[2:3], v[2:3]
2007 ; VI-NEXT: v_mov_b32_e32 v4, s0
2008 ; VI-NEXT: v_mov_b32_e32 v5, s1
2009 ; VI-NEXT: s_waitcnt vmcnt(0)
2010 ; VI-NEXT: v_add_f16_sdwa v6, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
2011 ; VI-NEXT: v_add_f16_e32 v1, v1, v3
2012 ; VI-NEXT: v_add_f16_sdwa v3, v0, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:WORD_1
2013 ; VI-NEXT: v_add_f16_e32 v0, v0, v2
2014 ; VI-NEXT: v_or_b32_e32 v1, v1, v6
2015 ; VI-NEXT: v_or_b32_e32 v0, v0, v3
2016 ; VI-NEXT: flat_store_dwordx2 v[4:5], v[0:1]
2018 %b_ptr = getelementptr <4 x half>, <4 x half> addrspace(1)* %in, i32 1
2019 %a = load <4 x half>, <4 x half> addrspace(1)* %in, align 16
2020 %b = load <4 x half>, <4 x half> addrspace(1)* %b_ptr, align 16
2021 %result = fadd <4 x half> %a, %b
2022 store <4 x half> %result, <4 x half> addrspace(1)* %out, align 16
2026 define amdgpu_kernel void @fadd_v8f16(<8 x half> addrspace(1)* %out, <8 x half> %a, <8 x half> %b) #0 {
2027 ; SI-LABEL: fadd_v8f16:
2029 ; SI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
2030 ; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x4
2031 ; SI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x8
2032 ; SI-NEXT: s_waitcnt lgkmcnt(0)
2033 ; SI-NEXT: s_lshr_b32 s10, s0, 16
2034 ; SI-NEXT: v_cvt_f32_f16_e32 v4, s0
2035 ; SI-NEXT: s_lshr_b32 s0, s4, 16
2036 ; SI-NEXT: v_cvt_f32_f16_e32 v8, s0
2037 ; SI-NEXT: s_lshr_b32 s0, s5, 16
2038 ; SI-NEXT: v_cvt_f32_f16_e32 v0, s10
2039 ; SI-NEXT: s_lshr_b32 s11, s1, 16
2040 ; SI-NEXT: s_lshr_b32 s10, s2, 16
2041 ; SI-NEXT: v_cvt_f32_f16_e32 v9, s0
2042 ; SI-NEXT: s_lshr_b32 s0, s6, 16
2043 ; SI-NEXT: v_cvt_f32_f16_e32 v1, s11
2044 ; SI-NEXT: v_cvt_f32_f16_e32 v2, s10
2045 ; SI-NEXT: v_cvt_f32_f16_e32 v10, s0
2046 ; SI-NEXT: s_lshr_b32 s10, s3, 16
2047 ; SI-NEXT: s_lshr_b32 s0, s7, 16
2048 ; SI-NEXT: v_cvt_f32_f16_e32 v12, s4
2049 ; SI-NEXT: v_cvt_f32_f16_e32 v3, s10
2050 ; SI-NEXT: v_cvt_f32_f16_e32 v11, s0
2051 ; SI-NEXT: v_cvt_f32_f16_e32 v5, s1
2052 ; SI-NEXT: v_cvt_f32_f16_e32 v13, s5
2053 ; SI-NEXT: v_cvt_f32_f16_e32 v6, s2
2054 ; SI-NEXT: v_cvt_f32_f16_e32 v15, s6
2055 ; SI-NEXT: v_cvt_f32_f16_e32 v7, s3
2056 ; SI-NEXT: v_cvt_f32_f16_e32 v14, s7
2057 ; SI-NEXT: v_add_f32_e32 v1, v1, v9
2058 ; SI-NEXT: v_add_f32_e32 v0, v0, v8
2059 ; SI-NEXT: v_add_f32_e32 v3, v3, v11
2060 ; SI-NEXT: v_add_f32_e32 v2, v2, v10
2061 ; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
2062 ; SI-NEXT: v_add_f32_e32 v5, v5, v13
2063 ; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
2064 ; SI-NEXT: v_add_f32_e32 v4, v4, v12
2065 ; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
2066 ; SI-NEXT: v_add_f32_e32 v7, v7, v14
2067 ; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
2068 ; SI-NEXT: v_add_f32_e32 v6, v6, v15
2069 ; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
2070 ; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
2071 ; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
2072 ; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
2073 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
2074 ; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
2075 ; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
2076 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
2077 ; SI-NEXT: v_or_b32_e32 v1, v5, v1
2078 ; SI-NEXT: v_or_b32_e32 v0, v4, v0
2079 ; SI-NEXT: v_mov_b32_e32 v4, s8
2080 ; SI-NEXT: v_or_b32_e32 v3, v7, v3
2081 ; SI-NEXT: v_or_b32_e32 v2, v6, v2
2082 ; SI-NEXT: v_mov_b32_e32 v5, s9
2083 ; SI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
2086 ; VI-LABEL: fadd_v8f16:
2088 ; VI-NEXT: s_load_dwordx2 s[8:9], s[4:5], 0x0
2089 ; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x10
2090 ; VI-NEXT: s_load_dwordx4 s[4:7], s[4:5], 0x20
2091 ; VI-NEXT: s_waitcnt lgkmcnt(0)
2092 ; VI-NEXT: s_lshr_b32 s11, s3, 16
2093 ; VI-NEXT: s_lshr_b32 s10, s7, 16
2094 ; VI-NEXT: v_mov_b32_e32 v0, s10
2095 ; VI-NEXT: v_mov_b32_e32 v1, s11
2096 ; VI-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
2097 ; VI-NEXT: v_mov_b32_e32 v1, s7
2098 ; VI-NEXT: v_add_f16_e32 v1, s3, v1
2099 ; VI-NEXT: s_lshr_b32 s3, s6, 16
2100 ; VI-NEXT: s_lshr_b32 s7, s2, 16
2101 ; VI-NEXT: v_or_b32_e32 v3, v1, v0
2102 ; VI-NEXT: v_mov_b32_e32 v0, s3
2103 ; VI-NEXT: v_mov_b32_e32 v1, s7
2104 ; VI-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
2105 ; VI-NEXT: v_mov_b32_e32 v1, s6
2106 ; VI-NEXT: v_add_f16_e32 v1, s2, v1
2107 ; VI-NEXT: s_lshr_b32 s2, s5, 16
2108 ; VI-NEXT: s_lshr_b32 s3, s1, 16
2109 ; VI-NEXT: v_or_b32_e32 v2, v1, v0
2110 ; VI-NEXT: v_mov_b32_e32 v0, s2
2111 ; VI-NEXT: v_mov_b32_e32 v1, s3
2112 ; VI-NEXT: v_add_f16_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
2113 ; VI-NEXT: v_mov_b32_e32 v1, s5
2114 ; VI-NEXT: v_add_f16_e32 v1, s1, v1
2115 ; VI-NEXT: s_lshr_b32 s2, s0, 16
2116 ; VI-NEXT: s_lshr_b32 s1, s4, 16
2117 ; VI-NEXT: v_or_b32_e32 v1, v1, v0
2118 ; VI-NEXT: v_mov_b32_e32 v0, s1
2119 ; VI-NEXT: v_mov_b32_e32 v4, s2
2120 ; VI-NEXT: v_add_f16_sdwa v0, v4, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
2121 ; VI-NEXT: v_mov_b32_e32 v4, s4
2122 ; VI-NEXT: v_add_f16_e32 v4, s0, v4
2123 ; VI-NEXT: v_or_b32_e32 v0, v4, v0
2124 ; VI-NEXT: v_mov_b32_e32 v4, s8
2125 ; VI-NEXT: v_mov_b32_e32 v5, s9
2126 ; VI-NEXT: flat_store_dwordx4 v[4:5], v[0:3]
2128 %add = fadd <8 x half> %a, %b
2129 store <8 x half> %add, <8 x half> addrspace(1)* %out, align 32
2133 define amdgpu_kernel void @test_bitcast_from_half(half addrspace(1)* %in, i16 addrspace(1)* %out) #0 {
2134 ; GCN-LABEL: test_bitcast_from_half:
2136 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
2137 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
2138 ; GCN-NEXT: v_mov_b32_e32 v0, s0
2139 ; GCN-NEXT: v_mov_b32_e32 v1, s1
2140 ; GCN-NEXT: flat_load_ushort v0, v[0:1]
2141 ; GCN-NEXT: v_mov_b32_e32 v2, s2
2142 ; GCN-NEXT: v_mov_b32_e32 v3, s3
2143 ; GCN-NEXT: s_waitcnt vmcnt(0)
2144 ; GCN-NEXT: flat_store_short v[2:3], v0
2145 ; GCN-NEXT: s_endpgm
2146 %val = load half, half addrspace(1)* %in
2147 %val_int = bitcast half %val to i16
2148 store i16 %val_int, i16 addrspace(1)* %out
2152 define amdgpu_kernel void @test_bitcast_to_half(half addrspace(1)* %out, i16 addrspace(1)* %in) #0 {
2153 ; GCN-LABEL: test_bitcast_to_half:
2155 ; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x0
2156 ; GCN-NEXT: s_waitcnt lgkmcnt(0)
2157 ; GCN-NEXT: v_mov_b32_e32 v2, s2
2158 ; GCN-NEXT: v_mov_b32_e32 v3, s3
2159 ; GCN-NEXT: flat_load_ushort v2, v[2:3]
2160 ; GCN-NEXT: v_mov_b32_e32 v0, s0
2161 ; GCN-NEXT: v_mov_b32_e32 v1, s1
2162 ; GCN-NEXT: s_waitcnt vmcnt(0)
2163 ; GCN-NEXT: flat_store_short v[0:1], v2
2164 ; GCN-NEXT: s_endpgm
2165 %val = load i16, i16 addrspace(1)* %in
2166 %val_fp = bitcast i16 %val to half
2167 store half %val_fp, half addrspace(1)* %out
2171 attributes #0 = { nounwind }