1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=amdgcn -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,SI
3 ; RUN: llc < %s -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs | FileCheck %s -check-prefixes=GCN,VI
5 ; XXX - Why the packing?
6 define amdgpu_kernel void @scalar_to_vector_v2i32(<4 x i16> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
7 ; SI-LABEL: scalar_to_vector_v2i32:
9 ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
10 ; SI-NEXT: s_mov_b32 s7, 0xf000
11 ; SI-NEXT: s_mov_b32 s6, -1
12 ; SI-NEXT: s_mov_b32 s10, s6
13 ; SI-NEXT: s_mov_b32 s11, s7
14 ; SI-NEXT: s_waitcnt lgkmcnt(0)
15 ; SI-NEXT: s_mov_b32 s8, s2
16 ; SI-NEXT: s_mov_b32 s9, s3
17 ; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
18 ; SI-NEXT: s_waitcnt vmcnt(0)
19 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
20 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
21 ; SI-NEXT: v_or_b32_e32 v0, v0, v1
22 ; SI-NEXT: s_mov_b32 s4, s0
23 ; SI-NEXT: s_mov_b32 s5, s1
24 ; SI-NEXT: v_mov_b32_e32 v1, v0
25 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
28 ; VI-LABEL: scalar_to_vector_v2i32:
30 ; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
31 ; VI-NEXT: s_mov_b32 s3, 0xf000
32 ; VI-NEXT: s_mov_b32 s2, -1
33 ; VI-NEXT: s_waitcnt lgkmcnt(0)
34 ; VI-NEXT: s_mov_b32 s0, s4
35 ; VI-NEXT: s_mov_b32 s1, s5
36 ; VI-NEXT: s_mov_b32 s4, s6
37 ; VI-NEXT: s_mov_b32 s5, s7
38 ; VI-NEXT: s_mov_b32 s6, s2
39 ; VI-NEXT: s_mov_b32 s7, s3
40 ; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0
41 ; VI-NEXT: s_waitcnt vmcnt(0)
42 ; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
43 ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
44 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
45 ; VI-NEXT: v_mov_b32_e32 v1, v0
46 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
48 %tmp1 = load i32, i32 addrspace(1)* %in, align 4
49 %bc = bitcast i32 %tmp1 to <2 x i16>
50 %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
51 store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
55 define amdgpu_kernel void @scalar_to_vector_v2f32(<4 x i16> addrspace(1)* %out, float addrspace(1)* %in) nounwind {
56 ; SI-LABEL: scalar_to_vector_v2f32:
58 ; SI-NEXT: s_load_dwordx4 s[0:3], s[0:1], 0x9
59 ; SI-NEXT: s_mov_b32 s7, 0xf000
60 ; SI-NEXT: s_mov_b32 s6, -1
61 ; SI-NEXT: s_mov_b32 s10, s6
62 ; SI-NEXT: s_mov_b32 s11, s7
63 ; SI-NEXT: s_waitcnt lgkmcnt(0)
64 ; SI-NEXT: s_mov_b32 s8, s2
65 ; SI-NEXT: s_mov_b32 s9, s3
66 ; SI-NEXT: buffer_load_dword v0, off, s[8:11], 0
67 ; SI-NEXT: s_waitcnt vmcnt(0)
68 ; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
69 ; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
70 ; SI-NEXT: v_or_b32_e32 v0, v0, v1
71 ; SI-NEXT: s_mov_b32 s4, s0
72 ; SI-NEXT: s_mov_b32 s5, s1
73 ; SI-NEXT: v_mov_b32_e32 v1, v0
74 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[4:7], 0
77 ; VI-LABEL: scalar_to_vector_v2f32:
79 ; VI-NEXT: s_load_dwordx4 s[4:7], s[0:1], 0x24
80 ; VI-NEXT: s_mov_b32 s3, 0xf000
81 ; VI-NEXT: s_mov_b32 s2, -1
82 ; VI-NEXT: s_waitcnt lgkmcnt(0)
83 ; VI-NEXT: s_mov_b32 s0, s4
84 ; VI-NEXT: s_mov_b32 s1, s5
85 ; VI-NEXT: s_mov_b32 s4, s6
86 ; VI-NEXT: s_mov_b32 s5, s7
87 ; VI-NEXT: s_mov_b32 s6, s2
88 ; VI-NEXT: s_mov_b32 s7, s3
89 ; VI-NEXT: buffer_load_dword v0, off, s[4:7], 0
90 ; VI-NEXT: s_waitcnt vmcnt(0)
91 ; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
92 ; VI-NEXT: v_lshlrev_b32_e32 v1, 16, v0
93 ; VI-NEXT: v_or_b32_e32 v0, v0, v1
94 ; VI-NEXT: v_mov_b32_e32 v1, v0
95 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
97 %tmp1 = load float, float addrspace(1)* %in, align 4
98 %bc = bitcast float %tmp1 to <2 x i16>
99 %tmp2 = shufflevector <2 x i16> %bc, <2 x i16> undef, <4 x i32> <i32 1, i32 1, i32 1, i32 1>
100 store <4 x i16> %tmp2, <4 x i16> addrspace(1)* %out, align 8
104 define amdgpu_kernel void @scalar_to_vector_v4i16() {
105 ; SI-LABEL: scalar_to_vector_v4i16:
107 ; SI-NEXT: s_mov_b32 s3, 0xf000
108 ; SI-NEXT: s_mov_b32 s2, -1
109 ; SI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0
110 ; SI-NEXT: s_waitcnt vmcnt(0)
111 ; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v0
112 ; SI-NEXT: v_or_b32_e32 v0, v1, v0
113 ; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
114 ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v1
115 ; SI-NEXT: v_or_b32_e32 v1, v1, v2
116 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
117 ; SI-NEXT: v_or_b32_e32 v1, v1, v2
118 ; SI-NEXT: v_or_b32_e32 v0, v0, v2
119 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
122 ; VI-LABEL: scalar_to_vector_v4i16:
124 ; VI-NEXT: s_mov_b32 s3, 0xf000
125 ; VI-NEXT: s_mov_b32 s2, -1
126 ; VI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0
127 ; VI-NEXT: s_waitcnt vmcnt(0)
128 ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v0
129 ; VI-NEXT: v_or_b32_e32 v0, v1, v0
130 ; VI-NEXT: v_lshrrev_b16_e32 v1, 8, v0
131 ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v1
132 ; VI-NEXT: v_or_b32_e32 v1, v1, v2
133 ; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
134 ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
135 ; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
136 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
139 %tmp = load <2 x i8>, <2 x i8> addrspace(1)* undef, align 1
140 %tmp1 = shufflevector <2 x i8> %tmp, <2 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
141 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 0, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9>
142 store <8 x i8> %tmp2, <8 x i8> addrspace(1)* undef, align 8
146 define amdgpu_kernel void @scalar_to_vector_v4f16() {
147 ; SI-LABEL: scalar_to_vector_v4f16:
149 ; SI-NEXT: s_mov_b32 s3, 0xf000
150 ; SI-NEXT: s_mov_b32 s2, -1
151 ; SI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0
152 ; SI-NEXT: s_waitcnt vmcnt(0)
153 ; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v0
154 ; SI-NEXT: v_or_b32_e32 v0, v1, v0
155 ; SI-NEXT: v_lshrrev_b32_e32 v1, 8, v0
156 ; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v1
157 ; SI-NEXT: v_or_b32_e32 v1, v1, v2
158 ; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
159 ; SI-NEXT: v_or_b32_e32 v1, v1, v2
160 ; SI-NEXT: v_or_b32_e32 v0, v0, v2
161 ; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
164 ; VI-LABEL: scalar_to_vector_v4f16:
166 ; VI-NEXT: s_mov_b32 s3, 0xf000
167 ; VI-NEXT: s_mov_b32 s2, -1
168 ; VI-NEXT: buffer_load_ubyte v0, off, s[0:3], 0
169 ; VI-NEXT: s_waitcnt vmcnt(0)
170 ; VI-NEXT: v_lshlrev_b16_e32 v1, 8, v0
171 ; VI-NEXT: v_or_b32_e32 v0, v1, v0
172 ; VI-NEXT: v_lshrrev_b16_e32 v1, 8, v0
173 ; VI-NEXT: v_lshlrev_b16_e32 v2, 8, v1
174 ; VI-NEXT: v_or_b32_e32 v1, v1, v2
175 ; VI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
176 ; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
177 ; VI-NEXT: v_or_b32_sdwa v0, v0, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
178 ; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
181 %load = load half, half addrspace(1)* undef, align 1
182 %tmp = bitcast half %load to <2 x i8>
183 %tmp1 = shufflevector <2 x i8> %tmp, <2 x i8> zeroinitializer, <8 x i32> <i32 0, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
184 %tmp2 = shufflevector <8 x i8> %tmp1, <8 x i8> undef, <8 x i32> <i32 0, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9, i32 9>
185 store <8 x i8> %tmp2, <8 x i8> addrspace(1)* undef, align 8
189 ; Getting a SCALAR_TO_VECTOR seems to be tricky. These cases managed
190 ; to produce one, but for some reason never made it to selection.
193 ; define amdgpu_kernel void @scalar_to_vector_test2(<8 x i8> addrspace(1)* %out, i32 addrspace(1)* %in) nounwind {
194 ; %tmp1 = load i32, i32 addrspace(1)* %in, align 4
195 ; %bc = bitcast i32 %tmp1 to <4 x i8>
197 ; %tmp2 = shufflevector <4 x i8> %bc, <4 x i8> undef, <8 x i32> <i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1, i32 1>
198 ; store <8 x i8> %tmp2, <8 x i8> addrspace(1)* %out, align 4
202 ; define amdgpu_kernel void @scalar_to_vector_test3(<4 x i32> addrspace(1)* %out) nounwind {
203 ; %newvec0 = insertelement <2 x i64> undef, i64 12345, i32 0
204 ; %newvec1 = insertelement <2 x i64> %newvec0, i64 undef, i32 1
205 ; %bc = bitcast <2 x i64> %newvec1 to <4 x i32>
206 ; %add = add <4 x i32> %bc, <i32 1, i32 2, i32 3, i32 4>
207 ; store <4 x i32> %add, <4 x i32> addrspace(1)* %out, align 16
211 ; define amdgpu_kernel void @scalar_to_vector_test4(<8 x i16> addrspace(1)* %out) nounwind {
212 ; %newvec0 = insertelement <4 x i32> undef, i32 12345, i32 0
213 ; %bc = bitcast <4 x i32> %newvec0 to <8 x i16>
214 ; %add = add <8 x i16> %bc, <i16 1, i16 2, i16 3, i16 4, i16 1, i16 2, i16 3, i16 4>
215 ; store <8 x i16> %add, <8 x i16> addrspace(1)* %out, align 16
219 ; define amdgpu_kernel void @scalar_to_vector_test5(<4 x i16> addrspace(1)* %out) nounwind {
220 ; %newvec0 = insertelement <2 x i32> undef, i32 12345, i32 0
221 ; %bc = bitcast <2 x i32> %newvec0 to <4 x i16>
222 ; %add = add <4 x i16> %bc, <i16 1, i16 2, i16 3, i16 4>
223 ; store <4 x i16> %add, <4 x i16> addrspace(1)* %out, align 16
227 define amdgpu_kernel void @scalar_to_vector_test6(<2 x half> addrspace(1)* %out, i8 zeroext %val) nounwind {
228 ; SI-LABEL: scalar_to_vector_test6:
230 ; SI-NEXT: s_load_dword s2, s[0:1], 0xb
231 ; SI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x9
232 ; SI-NEXT: s_mov_b32 s7, 0xf000
233 ; SI-NEXT: s_waitcnt lgkmcnt(0)
234 ; SI-NEXT: s_and_b32 s0, s2, 0xff
235 ; SI-NEXT: s_mov_b32 s6, -1
236 ; SI-NEXT: v_mov_b32_e32 v0, s0
237 ; SI-NEXT: buffer_store_dword v0, off, s[4:7], 0
240 ; VI-LABEL: scalar_to_vector_test6:
242 ; VI-NEXT: s_load_dwordx2 s[4:5], s[0:1], 0x24
243 ; VI-NEXT: s_load_dword s0, s[0:1], 0x2c
244 ; VI-NEXT: s_mov_b32 s7, 0xf000
245 ; VI-NEXT: s_mov_b32 s6, -1
246 ; VI-NEXT: s_waitcnt lgkmcnt(0)
247 ; VI-NEXT: s_and_b32 s0, s0, 0xff
248 ; VI-NEXT: v_mov_b32_e32 v0, s0
249 ; VI-NEXT: buffer_store_dword v0, off, s[4:7], 0
251 %newvec0 = insertelement <4 x i8> undef, i8 %val, i32 0
252 %bc = bitcast <4 x i8> %newvec0 to <2 x half>
253 store <2 x half> %bc, <2 x half> addrspace(1)* %out