1 ; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
2 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -O0 -amdgpu-ir-lower-kernel-arguments=0 -stop-after=irtranslator -global-isel %s -o - | FileCheck -check-prefix=HSA-VI %s
4 define amdgpu_kernel void @i8_arg(i32 addrspace(1)* nocapture %out, i8 %in) nounwind {
5 ; HSA-VI-LABEL: name: i8_arg
6 ; HSA-VI: bb.1 (%ir-block.0):
7 ; HSA-VI: liveins: $sgpr4_sgpr5
8 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
9 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
10 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
11 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
12 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
13 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
14 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
15 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
16 ; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
18 %ext = zext i8 %in to i32
19 store i32 %ext, i32 addrspace(1)* %out, align 4
23 define amdgpu_kernel void @i8_zext_arg(i32 addrspace(1)* nocapture %out, i8 zeroext %in) nounwind {
24 ; HSA-VI-LABEL: name: i8_zext_arg
25 ; HSA-VI: bb.1 (%ir-block.0):
26 ; HSA-VI: liveins: $sgpr4_sgpr5
27 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
28 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
29 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
30 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
31 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
32 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
33 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
34 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s8)
35 ; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
37 %ext = zext i8 %in to i32
38 store i32 %ext, i32 addrspace(1)* %out, align 4
42 define amdgpu_kernel void @i8_sext_arg(i32 addrspace(1)* nocapture %out, i8 signext %in) nounwind {
43 ; HSA-VI-LABEL: name: i8_sext_arg
44 ; HSA-VI: bb.1 (%ir-block.0):
45 ; HSA-VI: liveins: $sgpr4_sgpr5
46 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
47 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
48 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
49 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
50 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
51 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
52 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 8, addrspace 4)
53 ; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s8)
54 ; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
56 %ext = sext i8 %in to i32
57 store i32 %ext, i32 addrspace(1)* %out, align 4
61 define amdgpu_kernel void @i16_arg(i32 addrspace(1)* nocapture %out, i16 %in) nounwind {
62 ; HSA-VI-LABEL: name: i16_arg
63 ; HSA-VI: bb.1 (%ir-block.0):
64 ; HSA-VI: liveins: $sgpr4_sgpr5
65 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
66 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
67 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
68 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
69 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
70 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
71 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
72 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
73 ; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
75 %ext = zext i16 %in to i32
76 store i32 %ext, i32 addrspace(1)* %out, align 4
80 define amdgpu_kernel void @i16_zext_arg(i32 addrspace(1)* nocapture %out, i16 zeroext %in) nounwind {
81 ; HSA-VI-LABEL: name: i16_zext_arg
82 ; HSA-VI: bb.1 (%ir-block.0):
83 ; HSA-VI: liveins: $sgpr4_sgpr5
84 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
85 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
86 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
87 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
88 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
89 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
90 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
91 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s16)
92 ; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
94 %ext = zext i16 %in to i32
95 store i32 %ext, i32 addrspace(1)* %out, align 4
99 define amdgpu_kernel void @i16_sext_arg(i32 addrspace(1)* nocapture %out, i16 signext %in) nounwind {
100 ; HSA-VI-LABEL: name: i16_sext_arg
101 ; HSA-VI: bb.1 (%ir-block.0):
102 ; HSA-VI: liveins: $sgpr4_sgpr5
103 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
104 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
105 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
106 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
107 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
108 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
109 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s16) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `i16 addrspace(4)* undef`, align 8, addrspace 4)
110 ; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s16)
111 ; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
113 %ext = sext i16 %in to i32
114 store i32 %ext, i32 addrspace(1)* %out, align 4
118 define amdgpu_kernel void @i32_arg(i32 addrspace(1)* nocapture %out, i32 %in) nounwind {
119 ; HSA-VI-LABEL: name: i32_arg
120 ; HSA-VI: bb.1.entry:
121 ; HSA-VI: liveins: $sgpr4_sgpr5
122 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
123 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
124 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
125 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
126 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
127 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
128 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `i32 addrspace(4)* undef`, align 8, addrspace 4)
129 ; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
132 store i32 %in, i32 addrspace(1)* %out, align 4
136 define amdgpu_kernel void @f32_arg(float addrspace(1)* nocapture %out, float %in) nounwind {
137 ; HSA-VI-LABEL: name: f32_arg
138 ; HSA-VI: bb.1.entry:
139 ; HSA-VI: liveins: $sgpr4_sgpr5
140 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
141 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
142 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
143 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `float addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
144 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
145 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
146 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s32) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `float addrspace(4)* undef`, align 8, addrspace 4)
147 ; HSA-VI: G_STORE [[LOAD1]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
150 store float %in, float addrspace(1)* %out, align 4
154 define amdgpu_kernel void @v2i8_arg(<2 x i8> addrspace(1)* %out, <2 x i8> %in) {
155 ; HSA-VI-LABEL: name: v2i8_arg
156 ; HSA-VI: bb.1.entry:
157 ; HSA-VI: liveins: $sgpr4_sgpr5
158 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
159 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
160 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
161 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
162 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
163 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
164 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 2 from `<2 x i8> addrspace(4)* undef`, align 8, addrspace 4)
165 ; HSA-VI: G_STORE [[LOAD1]](<2 x s8>), [[LOAD]](p1) :: (store 2 into %ir.out, addrspace 1)
168 store <2 x i8> %in, <2 x i8> addrspace(1)* %out
172 define amdgpu_kernel void @v2i16_arg(<2 x i16> addrspace(1)* %out, <2 x i16> %in) {
173 ; HSA-VI-LABEL: name: v2i16_arg
174 ; HSA-VI: bb.1.entry:
175 ; HSA-VI: liveins: $sgpr4_sgpr5
176 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
177 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
178 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
179 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
180 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
181 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
182 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<2 x i16> addrspace(4)* undef`, align 8, addrspace 4)
183 ; HSA-VI: G_STORE [[LOAD1]](<2 x s16>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
186 store <2 x i16> %in, <2 x i16> addrspace(1)* %out
190 define amdgpu_kernel void @v2i32_arg(<2 x i32> addrspace(1)* nocapture %out, <2 x i32> %in) nounwind {
191 ; HSA-VI-LABEL: name: v2i32_arg
192 ; HSA-VI: bb.1.entry:
193 ; HSA-VI: liveins: $sgpr4_sgpr5
194 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
195 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
196 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
197 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
198 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
199 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
200 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x i32> addrspace(4)* undef`, addrspace 4)
201 ; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
204 store <2 x i32> %in, <2 x i32> addrspace(1)* %out, align 4
208 define amdgpu_kernel void @v2f32_arg(<2 x float> addrspace(1)* nocapture %out, <2 x float> %in) nounwind {
209 ; HSA-VI-LABEL: name: v2f32_arg
210 ; HSA-VI: bb.1.entry:
211 ; HSA-VI: liveins: $sgpr4_sgpr5
212 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
213 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
214 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
215 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
216 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
217 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
218 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<2 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<2 x float> addrspace(4)* undef`, addrspace 4)
219 ; HSA-VI: G_STORE [[LOAD1]](<2 x s32>), [[LOAD]](p1) :: (store 8 into %ir.out, align 4, addrspace 1)
222 store <2 x float> %in, <2 x float> addrspace(1)* %out, align 4
226 define amdgpu_kernel void @v3i8_arg(<3 x i8> addrspace(1)* nocapture %out, <3 x i8> %in) nounwind {
227 ; HSA-VI-LABEL: name: v3i8_arg
228 ; HSA-VI: bb.1.entry:
229 ; HSA-VI: liveins: $sgpr4_sgpr5
230 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
231 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
232 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
233 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
234 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
235 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
236 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 3 from `<3 x i8> addrspace(4)* undef`, align 8, addrspace 4)
237 ; HSA-VI: G_STORE [[LOAD1]](<3 x s8>), [[LOAD]](p1) :: (store 3 into %ir.out, align 4, addrspace 1)
240 store <3 x i8> %in, <3 x i8> addrspace(1)* %out, align 4
244 define amdgpu_kernel void @v3i16_arg(<3 x i16> addrspace(1)* nocapture %out, <3 x i16> %in) nounwind {
245 ; HSA-VI-LABEL: name: v3i16_arg
246 ; HSA-VI: bb.1.entry:
247 ; HSA-VI: liveins: $sgpr4_sgpr5
248 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
249 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
250 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
251 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
252 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
253 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
254 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 6 from `<3 x i16> addrspace(4)* undef`, align 8, addrspace 4)
255 ; HSA-VI: G_STORE [[LOAD1]](<3 x s16>), [[LOAD]](p1) :: (store 6 into %ir.out, align 4, addrspace 1)
258 store <3 x i16> %in, <3 x i16> addrspace(1)* %out, align 4
262 define amdgpu_kernel void @v3i32_arg(<3 x i32> addrspace(1)* nocapture %out, <3 x i32> %in) nounwind {
263 ; HSA-VI-LABEL: name: v3i32_arg
264 ; HSA-VI: bb.1.entry:
265 ; HSA-VI: liveins: $sgpr4_sgpr5
266 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
267 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
268 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
269 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
270 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
271 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
272 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x i32> addrspace(4)* undef`, align 16, addrspace 4)
273 ; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
276 store <3 x i32> %in, <3 x i32> addrspace(1)* %out, align 4
280 define amdgpu_kernel void @v3f32_arg(<3 x float> addrspace(1)* nocapture %out, <3 x float> %in) nounwind {
281 ; HSA-VI-LABEL: name: v3f32_arg
282 ; HSA-VI: bb.1.entry:
283 ; HSA-VI: liveins: $sgpr4_sgpr5
284 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
285 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
286 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
287 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<3 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
288 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
289 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
290 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<3 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 12 from `<3 x float> addrspace(4)* undef`, align 16, addrspace 4)
291 ; HSA-VI: G_STORE [[LOAD1]](<3 x s32>), [[LOAD]](p1) :: (store 12 into %ir.out, align 4, addrspace 1)
294 store <3 x float> %in, <3 x float> addrspace(1)* %out, align 4
298 define amdgpu_kernel void @v4i8_arg(<4 x i8> addrspace(1)* %out, <4 x i8> %in) {
299 ; HSA-VI-LABEL: name: v4i8_arg
300 ; HSA-VI: bb.1.entry:
301 ; HSA-VI: liveins: $sgpr4_sgpr5
302 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
303 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
304 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
305 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
306 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
307 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
308 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 4 from `<4 x i8> addrspace(4)* undef`, align 8, addrspace 4)
309 ; HSA-VI: G_STORE [[LOAD1]](<4 x s8>), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
312 store <4 x i8> %in, <4 x i8> addrspace(1)* %out
316 define amdgpu_kernel void @v4i16_arg(<4 x i16> addrspace(1)* %out, <4 x i16> %in) {
317 ; HSA-VI-LABEL: name: v4i16_arg
318 ; HSA-VI: bb.1.entry:
319 ; HSA-VI: liveins: $sgpr4_sgpr5
320 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
321 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
322 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
323 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
324 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
325 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
326 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<4 x i16> addrspace(4)* undef`, addrspace 4)
327 ; HSA-VI: G_STORE [[LOAD1]](<4 x s16>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
330 store <4 x i16> %in, <4 x i16> addrspace(1)* %out
334 define amdgpu_kernel void @v4i32_arg(<4 x i32> addrspace(1)* nocapture %out, <4 x i32> %in) nounwind {
335 ; HSA-VI-LABEL: name: v4i32_arg
336 ; HSA-VI: bb.1.entry:
337 ; HSA-VI: liveins: $sgpr4_sgpr5
338 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
339 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
340 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
341 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
342 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
343 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
344 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x i32> addrspace(4)* undef`, addrspace 4)
345 ; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
348 store <4 x i32> %in, <4 x i32> addrspace(1)* %out, align 4
352 define amdgpu_kernel void @v4f32_arg(<4 x float> addrspace(1)* nocapture %out, <4 x float> %in) nounwind {
353 ; HSA-VI-LABEL: name: v4f32_arg
354 ; HSA-VI: bb.1.entry:
355 ; HSA-VI: liveins: $sgpr4_sgpr5
356 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
357 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
358 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
359 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<4 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
360 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
361 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
362 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<4 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<4 x float> addrspace(4)* undef`, addrspace 4)
363 ; HSA-VI: G_STORE [[LOAD1]](<4 x s32>), [[LOAD]](p1) :: (store 16 into %ir.out, align 4, addrspace 1)
366 store <4 x float> %in, <4 x float> addrspace(1)* %out, align 4
370 define amdgpu_kernel void @v8i8_arg(<8 x i8> addrspace(1)* %out, <8 x i8> %in) {
371 ; HSA-VI-LABEL: name: v8i8_arg
372 ; HSA-VI: bb.1.entry:
373 ; HSA-VI: liveins: $sgpr4_sgpr5
374 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
375 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
376 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
377 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
378 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
379 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
380 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `<8 x i8> addrspace(4)* undef`, addrspace 4)
381 ; HSA-VI: G_STORE [[LOAD1]](<8 x s8>), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
384 store <8 x i8> %in, <8 x i8> addrspace(1)* %out
388 define amdgpu_kernel void @v8i16_arg(<8 x i16> addrspace(1)* %out, <8 x i16> %in) {
389 ; HSA-VI-LABEL: name: v8i16_arg
390 ; HSA-VI: bb.1.entry:
391 ; HSA-VI: liveins: $sgpr4_sgpr5
392 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
393 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
394 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
395 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
396 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
397 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
398 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<8 x i16> addrspace(4)* undef`, addrspace 4)
399 ; HSA-VI: G_STORE [[LOAD1]](<8 x s16>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
402 store <8 x i16> %in, <8 x i16> addrspace(1)* %out
406 define amdgpu_kernel void @v8i32_arg(<8 x i32> addrspace(1)* nocapture %out, <8 x i32> %in) nounwind {
407 ; HSA-VI-LABEL: name: v8i32_arg
408 ; HSA-VI: bb.1.entry:
409 ; HSA-VI: liveins: $sgpr4_sgpr5
410 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
411 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
412 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
413 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
414 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
415 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
416 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x i32> addrspace(4)* undef`, align 16, addrspace 4)
417 ; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
420 store <8 x i32> %in, <8 x i32> addrspace(1)* %out, align 4
424 define amdgpu_kernel void @v8f32_arg(<8 x float> addrspace(1)* nocapture %out, <8 x float> %in) nounwind {
425 ; HSA-VI-LABEL: name: v8f32_arg
426 ; HSA-VI: bb.1.entry:
427 ; HSA-VI: liveins: $sgpr4_sgpr5
428 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
429 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
430 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
431 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<8 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
432 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
433 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
434 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<8 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<8 x float> addrspace(4)* undef`, align 16, addrspace 4)
435 ; HSA-VI: G_STORE [[LOAD1]](<8 x s32>), [[LOAD]](p1) :: (store 32 into %ir.out, align 4, addrspace 1)
438 store <8 x float> %in, <8 x float> addrspace(1)* %out, align 4
442 define amdgpu_kernel void @v16i8_arg(<16 x i8> addrspace(1)* %out, <16 x i8> %in) {
443 ; HSA-VI-LABEL: name: v16i8_arg
444 ; HSA-VI: bb.1.entry:
445 ; HSA-VI: liveins: $sgpr4_sgpr5
446 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
447 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
448 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
449 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i8> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
450 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
451 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
452 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s8>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 16 from `<16 x i8> addrspace(4)* undef`, addrspace 4)
453 ; HSA-VI: G_STORE [[LOAD1]](<16 x s8>), [[LOAD]](p1) :: (store 16 into %ir.out, addrspace 1)
456 store <16 x i8> %in, <16 x i8> addrspace(1)* %out
460 define amdgpu_kernel void @v16i16_arg(<16 x i16> addrspace(1)* %out, <16 x i16> %in) {
461 ; HSA-VI-LABEL: name: v16i16_arg
462 ; HSA-VI: bb.1.entry:
463 ; HSA-VI: liveins: $sgpr4_sgpr5
464 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
465 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
466 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
467 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i16> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
468 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 32
469 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
470 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s16>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 32 from `<16 x i16> addrspace(4)* undef`, align 16, addrspace 4)
471 ; HSA-VI: G_STORE [[LOAD1]](<16 x s16>), [[LOAD]](p1) :: (store 32 into %ir.out, addrspace 1)
474 store <16 x i16> %in, <16 x i16> addrspace(1)* %out
478 define amdgpu_kernel void @v16i32_arg(<16 x i32> addrspace(1)* nocapture %out, <16 x i32> %in) nounwind {
479 ; HSA-VI-LABEL: name: v16i32_arg
480 ; HSA-VI: bb.1.entry:
481 ; HSA-VI: liveins: $sgpr4_sgpr5
482 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
483 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
484 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
485 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x i32> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
486 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
487 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
488 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x i32> addrspace(4)* undef`, align 16, addrspace 4)
489 ; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
492 store <16 x i32> %in, <16 x i32> addrspace(1)* %out, align 4
496 define amdgpu_kernel void @v16f32_arg(<16 x float> addrspace(1)* nocapture %out, <16 x float> %in) nounwind {
497 ; HSA-VI-LABEL: name: v16f32_arg
498 ; HSA-VI: bb.1.entry:
499 ; HSA-VI: liveins: $sgpr4_sgpr5
500 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
501 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
502 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
503 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `<16 x float> addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
504 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 64
505 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
506 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(<16 x s32>) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 64 from `<16 x float> addrspace(4)* undef`, align 16, addrspace 4)
507 ; HSA-VI: G_STORE [[LOAD1]](<16 x s32>), [[LOAD]](p1) :: (store 64 into %ir.out, align 4, addrspace 1)
510 store <16 x float> %in, <16 x float> addrspace(1)* %out, align 4
514 define amdgpu_kernel void @kernel_arg_i64(i64 addrspace(1)* %out, i64 %a) nounwind {
515 ; HSA-VI-LABEL: name: kernel_arg_i64
516 ; HSA-VI: bb.1 (%ir-block.0):
517 ; HSA-VI: liveins: $sgpr4_sgpr5
518 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
519 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
520 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
521 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
522 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
523 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
524 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(4)* undef`, addrspace 4)
525 ; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
527 store i64 %a, i64 addrspace(1)* %out, align 8
531 define amdgpu_kernel void @f64_kernel_arg(double addrspace(1)* %out, double %in) {
532 ; HSA-VI-LABEL: name: f64_kernel_arg
533 ; HSA-VI: bb.1.entry:
534 ; HSA-VI: liveins: $sgpr4_sgpr5
535 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
536 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
537 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
538 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `double addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
539 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
540 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
541 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s64) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 8 from `double addrspace(4)* undef`, addrspace 4)
542 ; HSA-VI: G_STORE [[LOAD1]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
545 store double %in, double addrspace(1)* %out
549 define amdgpu_kernel void @i1_arg(i1 addrspace(1)* %out, i1 %x) nounwind {
550 ; HSA-VI-LABEL: name: i1_arg
551 ; HSA-VI: bb.1 (%ir-block.0):
552 ; HSA-VI: liveins: $sgpr4_sgpr5
553 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
554 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
555 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
556 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i1 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
557 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
558 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
559 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
560 ; HSA-VI: G_STORE [[LOAD1]](s1), [[LOAD]](p1) :: (store 1 into %ir.out, addrspace 1)
562 store i1 %x, i1 addrspace(1)* %out, align 1
566 define amdgpu_kernel void @i1_arg_zext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
567 ; HSA-VI-LABEL: name: i1_arg_zext_i32
568 ; HSA-VI: bb.1 (%ir-block.0):
569 ; HSA-VI: liveins: $sgpr4_sgpr5
570 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
571 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
572 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
573 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
574 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
575 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
576 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
577 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s32) = G_ZEXT [[LOAD1]](s1)
578 ; HSA-VI: G_STORE [[ZEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
580 %ext = zext i1 %x to i32
581 store i32 %ext, i32 addrspace(1)* %out, align 4
585 define amdgpu_kernel void @i1_arg_zext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
586 ; HSA-VI-LABEL: name: i1_arg_zext_i64
587 ; HSA-VI: bb.1 (%ir-block.0):
588 ; HSA-VI: liveins: $sgpr4_sgpr5
589 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
590 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
591 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
592 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
593 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
594 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
595 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
596 ; HSA-VI: [[ZEXT:%[0-9]+]]:_(s64) = G_ZEXT [[LOAD1]](s1)
597 ; HSA-VI: G_STORE [[ZEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
599 %ext = zext i1 %x to i64
600 store i64 %ext, i64 addrspace(1)* %out, align 8
604 define amdgpu_kernel void @i1_arg_sext_i32(i32 addrspace(1)* %out, i1 %x) nounwind {
605 ; HSA-VI-LABEL: name: i1_arg_sext_i32
606 ; HSA-VI: bb.1 (%ir-block.0):
607 ; HSA-VI: liveins: $sgpr4_sgpr5
608 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
609 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
610 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
611 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i32 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
612 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
613 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
614 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
615 ; HSA-VI: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[LOAD1]](s1)
616 ; HSA-VI: G_STORE [[SEXT]](s32), [[LOAD]](p1) :: (store 4 into %ir.out, addrspace 1)
618 %ext = sext i1 %x to i32
619 store i32 %ext, i32addrspace(1)* %out, align 4
623 define amdgpu_kernel void @i1_arg_sext_i64(i64 addrspace(1)* %out, i1 %x) nounwind {
624 ; HSA-VI-LABEL: name: i1_arg_sext_i64
625 ; HSA-VI: bb.1 (%ir-block.0):
626 ; HSA-VI: liveins: $sgpr4_sgpr5
627 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
628 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
629 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
630 ; HSA-VI: [[LOAD:%[0-9]+]]:_(p1) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 8 from `i64 addrspace(1)* addrspace(4)* undef`, align 16, addrspace 4)
631 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 8
632 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
633 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s1) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i1 addrspace(4)* undef`, align 8, addrspace 4)
634 ; HSA-VI: [[SEXT:%[0-9]+]]:_(s64) = G_SEXT [[LOAD1]](s1)
635 ; HSA-VI: G_STORE [[SEXT]](s64), [[LOAD]](p1) :: (store 8 into %ir.out, addrspace 1)
637 %ext = sext i1 %x to i64
638 store i64 %ext, i64 addrspace(1)* %out, align 8
642 define amdgpu_kernel void @empty_struct_arg({} %in) nounwind {
643 ; HSA-VI-LABEL: name: empty_struct_arg
644 ; HSA-VI: bb.1 (%ir-block.0):
645 ; HSA-VI: liveins: $sgpr4_sgpr5
646 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
651 ; The correct load offsets for these:
657 ; With the SelectionDAG argument lowering, the alignments for the
658 ; struct members is not properly considered, making these wrong.
660 ; FIXME: GlobalISel extractvalue emission broken
662 define amdgpu_kernel void @struct_argument_alignment({i32, i64} %arg0, i8, {i32, i64} %arg1) {
663 ; %val0 = extractvalue {i32, i64} %arg0, 0
664 ; %val1 = extractvalue {i32, i64} %arg0, 1
665 ; %val2 = extractvalue {i32, i64} %arg1, 0
666 ; %val3 = extractvalue {i32, i64} %arg1, 1
667 ; store volatile i32 %val0, i32 addrspace(1)* null
668 ; store volatile i64 %val1, i64 addrspace(1)* null
669 ; store volatile i32 %val2, i32 addrspace(1)* null
670 ; store volatile i64 %val3, i64 addrspace(1)* null
671 ; HSA-VI-LABEL: name: struct_argument_alignment
672 ; HSA-VI: bb.1 (%ir-block.1):
673 ; HSA-VI: liveins: $sgpr4_sgpr5
674 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
675 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
676 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
677 ; HSA-VI: [[LOAD:%[0-9]+]]:_(s128) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, addrspace 4)
678 ; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s128), 0
679 ; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s128), 64
680 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 16
681 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
682 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 16, addrspace 4)
683 ; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 24
684 ; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C2]](s64)
685 ; HSA-VI: [[LOAD2:%[0-9]+]]:_(s128) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 16 from `{ i32, i64 } addrspace(4)* undef`, align 8, addrspace 4)
686 ; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s128), 0
687 ; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s128), 64
692 ; No padding between i8 and next struct, but round up at end to 4 byte
694 define amdgpu_kernel void @packed_struct_argument_alignment(<{i32, i64}> %arg0, i8, <{i32, i64}> %arg1) {
695 ; %val0 = extractvalue <{i32, i64}> %arg0, 0
696 ; %val1 = extractvalue <{i32, i64}> %arg0, 1
697 ; %val2 = extractvalue <{i32, i64}> %arg1, 0
698 ; %val3 = extractvalue <{i32, i64}> %arg1, 1
699 ; store volatile i32 %val0, i32 addrspace(1)* null
700 ; store volatile i64 %val1, i64 addrspace(1)* null
701 ; store volatile i32 %val2, i32 addrspace(1)* null
702 ; store volatile i64 %val3, i64 addrspace(1)* null
703 ; HSA-VI-LABEL: name: packed_struct_argument_alignment
704 ; HSA-VI: bb.1 (%ir-block.1):
705 ; HSA-VI: liveins: $sgpr4_sgpr5
706 ; HSA-VI: [[COPY:%[0-9]+]]:_(p4) = COPY $sgpr4_sgpr5
707 ; HSA-VI: [[C:%[0-9]+]]:_(s64) = G_CONSTANT i64 0
708 ; HSA-VI: [[GEP:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C]](s64)
709 ; HSA-VI: [[LOAD:%[0-9]+]]:_(s96) = G_LOAD [[GEP]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 16, addrspace 4)
710 ; HSA-VI: [[EXTRACT:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD]](s96), 0
711 ; HSA-VI: [[EXTRACT1:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD]](s96), 32
712 ; HSA-VI: [[C1:%[0-9]+]]:_(s64) = G_CONSTANT i64 12
713 ; HSA-VI: [[GEP1:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C1]](s64)
714 ; HSA-VI: [[LOAD1:%[0-9]+]]:_(s8) = G_LOAD [[GEP1]](p4) :: (dereferenceable invariant load 1 from `i8 addrspace(4)* undef`, align 4, addrspace 4)
715 ; HSA-VI: [[C2:%[0-9]+]]:_(s64) = G_CONSTANT i64 13
716 ; HSA-VI: [[GEP2:%[0-9]+]]:_(p4) = G_GEP [[COPY]], [[C2]](s64)
717 ; HSA-VI: [[LOAD2:%[0-9]+]]:_(s96) = G_LOAD [[GEP2]](p4) :: (dereferenceable invariant load 12 from `<{ i32, i64 }> addrspace(4)* undef`, align 1, addrspace 4)
718 ; HSA-VI: [[EXTRACT2:%[0-9]+]]:_(s32) = G_EXTRACT [[LOAD2]](s96), 0
719 ; HSA-VI: [[EXTRACT3:%[0-9]+]]:_(s64) = G_EXTRACT [[LOAD2]](s96), 32