1 ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2 ! REQUIRES: target=powerpc{{.*}}
4 !----------------------
6 !----------------------
8 ! CHECK-LABEL: vec_st_vi1i2vi1
9 subroutine vec_st_vi1i2vi1(arg1
, arg2
, arg3
)
10 vector(integer(1)) :: arg1
, arg3
12 call vec_st(arg1
, arg2
, arg3
)
14 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
15 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
16 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
17 ! LLVMIR: %[[bcArg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
18 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
19 end subroutine vec_st_vi1i2vi1
21 ! CHECK-LABEL: vec_st_vi2i2vi2
22 subroutine vec_st_vi2i2vi2(arg1
, arg2
, arg3
)
23 vector(integer(2)) :: arg1
, arg3
25 call vec_st(arg1
, arg2
, arg3
)
27 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
28 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
29 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
30 ! LLVMIR: %[[bcArg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
31 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
32 end subroutine vec_st_vi2i2vi2
34 ! CHECK-LABEL: vec_st_vi4i2vi4
35 subroutine vec_st_vi4i2vi4(arg1
, arg2
, arg3
)
36 vector(integer(4)) :: arg1
, arg3
38 call vec_st(arg1
, arg2
, arg3
)
40 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
41 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
42 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
43 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[arg3]])
44 end subroutine vec_st_vi4i2vi4
46 ! CHECK-LABEL: vec_st_vu1i4vu1
47 subroutine vec_st_vu1i4vu1(arg1
, arg2
, arg3
)
48 vector(unsigned(1)) :: arg1
, arg3
50 call vec_st(arg1
, arg2
, arg3
)
52 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
53 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
54 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
55 ! LLVMIR: %[[bcArg1:.*]] = bitcast <16 x i8> %[[arg1]] to <4 x i32>
56 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
57 end subroutine vec_st_vu1i4vu1
59 ! CHECK-LABEL: vec_st_vu2i4vu2
60 subroutine vec_st_vu2i4vu2(arg1
, arg2
, arg3
)
61 vector(unsigned(2)) :: arg1
, arg3
63 call vec_st(arg1
, arg2
, arg3
)
65 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
66 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
67 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
68 ! LLVMIR: %[[bcArg1:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
69 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[bcArg1]], ptr %[[arg3]])
70 end subroutine vec_st_vu2i4vu2
72 ! CHECK-LABEL: vec_st_vu4i4vu4
73 subroutine vec_st_vu4i4vu4(arg1
, arg2
, arg3
)
74 vector(unsigned(4)) :: arg1
, arg3
76 call vec_st(arg1
, arg2
, arg3
)
78 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
79 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
80 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
81 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[arg3]])
82 end subroutine vec_st_vu4i4vu4
84 ! CHECK-LABEL: vec_st_vi4i4via4
85 subroutine vec_st_vi4i4via4(arg1
, arg2
, arg3
, i
)
86 vector(integer(4)) :: arg1
, arg3(5)
88 call vec_st(arg1
, arg2
, arg3(i
))
90 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
91 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
92 ! LLVMIR: %[[iextsub:.*]] = sub nsw i64 %[[iext]], 1
93 ! LLVMIR: %[[iextmul:.*]] = mul nsw i64 %[[iextsub]], 1
94 ! LLVMIR: %[[iextmul2:.*]] = mul nsw i64 %[[iextmul]], 1
95 ! LLVMIR: %[[iextadd:.*]] = add nsw i64 %[[iextmul2]], 0
96 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iextadd]]
97 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
98 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
99 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
100 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[arg1]], ptr %[[gep2]])
101 end subroutine vec_st_vi4i4via4
103 !----------------------
105 !----------------------
107 ! CHECK-LABEL: vec_ste_vi1i2i1
108 subroutine vec_ste_vi1i2i1(arg1
, arg2
, arg3
)
109 vector(integer(1)) :: arg1
112 call vec_ste(arg1
, arg2
, arg3
)
114 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
115 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
116 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
117 ! LLVMIR: call void @llvm.ppc.altivec.stvebx(<16 x i8> %[[arg1]], ptr %[[arg3]])
118 end subroutine vec_ste_vi1i2i1
120 ! CHECK-LABEL: vec_ste_vi2i2i2
121 subroutine vec_ste_vi2i2i2(arg1
, arg2
, arg3
)
122 vector(integer(2)) :: arg1
125 call vec_ste(arg1
, arg2
, arg3
)
127 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
128 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
129 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
130 ! LLVMIR: call void @llvm.ppc.altivec.stvehx(<8 x i16> %[[arg1]], ptr %[[arg3]])
131 end subroutine vec_ste_vi2i2i2
133 ! CHECK-LABEL: vec_ste_vi4i2i4
134 subroutine vec_ste_vi4i2i4(arg1
, arg2
, arg3
)
135 vector(integer(4)) :: arg1
138 call vec_ste(arg1
, arg2
, arg3
)
140 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
141 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
142 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i16 %5
143 ! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[arg3]])
144 end subroutine vec_ste_vi4i2i4
146 ! CHECK-LABEL: vec_ste_vu1i4u1
147 subroutine vec_ste_vu1i4u1(arg1
, arg2
, arg3
)
148 vector(unsigned(1)) :: arg1
151 call vec_ste(arg1
, arg2
, arg3
)
153 ! LLVMIR: %[[arg1:.*]] = load <16 x i8>, ptr %{{.*}}, align 16
154 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
155 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
156 ! LLVMIR: call void @llvm.ppc.altivec.stvebx(<16 x i8> %[[arg1]], ptr %[[arg3]])
157 end subroutine vec_ste_vu1i4u1
159 ! CHECK-LABEL: vec_ste_vu2i4u2
160 subroutine vec_ste_vu2i4u2(arg1
, arg2
, arg3
)
161 vector(unsigned(2)) :: arg1
164 call vec_ste(arg1
, arg2
, arg3
)
166 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %{{.*}}, align 16
167 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
168 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
169 ! LLVMIR: call void @llvm.ppc.altivec.stvehx(<8 x i16> %[[arg1]], ptr %[[arg3]])
170 end subroutine vec_ste_vu2i4u2
172 ! CHECK-LABEL: vec_ste_vu4i4u4
173 subroutine vec_ste_vu4i4u4(arg1
, arg2
, arg3
)
174 vector(unsigned(4)) :: arg1
177 call vec_ste(arg1
, arg2
, arg3
)
179 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %{{.*}}, align 16
180 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %{{.*}}, align 4
181 ! LLVMIR: %[[arg3:.*]] = getelementptr i8, ptr %{{.*}}, i32 %5
182 ! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[arg3]])
183 end subroutine vec_ste_vu4i4u4
185 ! CHECK-LABEL: vec_ste_vr4i4r4
186 subroutine vec_ste_vr4i4r4(arg1
, arg2
, arg3
)
187 vector(real(4)) :: arg1
190 call vec_ste(arg1
, arg2
, arg3
)
192 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %0, align 16
193 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
194 ! LLVMIR: %[[pos:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
195 ! LLVMIR: %[[bc:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
196 ! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[bc]], ptr %[[pos]])
198 end subroutine vec_ste_vr4i4r4
200 ! CHECK-LABEL: vec_ste_vi4i4ia4
201 subroutine vec_ste_vi4i4ia4(arg1
, arg2
, arg3
, i
)
202 vector(integer(4)) :: arg1
203 integer(4) :: arg2
, i
204 integer(4) :: arg3(5)
205 call vec_ste(arg1
, arg2
, arg3(i
))
207 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
208 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
209 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
210 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
211 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
212 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
213 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
214 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
215 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
216 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
217 ! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[arg1]], ptr %[[gep2]])
218 end subroutine vec_ste_vi4i4ia4
220 !----------------------
222 !----------------------
224 ! CHECK-LABEL: vec_stxv_test_vr4i2r4
225 subroutine vec_stxv_test_vr4i2r4(arg1
, arg2
, arg3
)
226 vector(real(4)) :: arg1
229 call vec_stxv(arg1
, arg2
, arg3
)
231 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
232 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
233 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
234 ! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
235 end subroutine vec_stxv_test_vr4i2r4
237 ! CHECK-LABEL: vec_stxv_test_vi4i8ia4
238 subroutine vec_stxv_test_vi4i8ia4(arg1
, arg2
, arg3
, i
)
239 vector(integer(4)) :: arg1
241 integer(4) :: arg3(10)
243 call vec_stxv(arg1
, arg2
, arg3(i
))
245 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
246 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
247 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
248 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
249 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
250 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
251 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
252 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
253 ! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
254 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
255 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
256 end subroutine vec_stxv_test_vi4i8ia4
258 ! CHECK-LABEL: vec_stxv_test_vi2i4vi2
259 subroutine vec_stxv_test_vi2i4vi2(arg1
, arg2
, arg3
)
260 vector(integer(2)) :: arg1
262 vector(integer(2)) :: arg3
263 call vec_stxv(arg1
, arg2
, arg3
)
265 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
266 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
267 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
268 ! LLVMIR: store <8 x i16> %[[arg1]], ptr %[[addr]], align 16
269 end subroutine vec_stxv_test_vi2i4vi2
271 ! CHECK-LABEL: vec_stxv_test_vi4i4vai4
272 subroutine vec_stxv_test_vi4i4vai4(arg1
, arg2
, arg3
, i
)
273 vector(integer(4)) :: arg1
275 vector(integer(4)) :: arg3(20)
277 call vec_stxv(arg1
, arg2
, arg3(i
))
279 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
280 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
281 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
282 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
283 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
284 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
285 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
286 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
287 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
288 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
289 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
290 end subroutine vec_stxv_test_vi4i4vai4
292 !----------------------
294 !----------------------
296 ! CHECK-LABEL: vec_xst_test_vr4i2r4
297 subroutine vec_xst_test_vr4i2r4(arg1
, arg2
, arg3
)
298 vector(real(4)) :: arg1
301 call vec_xst(arg1
, arg2
, arg3
)
304 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
305 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
306 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
307 ! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
308 end subroutine vec_xst_test_vr4i2r4
310 ! CHECK-LABEL: vec_xst_test_vi4i8ia4
311 subroutine vec_xst_test_vi4i8ia4(arg1
, arg2
, arg3
, i
)
312 vector(integer(4)) :: arg1
314 integer(4) :: arg3(10)
316 call vec_xst(arg1
, arg2
, arg3(i
))
318 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
319 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
320 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
321 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
322 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
323 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
324 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
325 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
326 ! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
327 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
328 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
329 end subroutine vec_xst_test_vi4i8ia4
331 ! CHECK-LABEL: vec_xst_test_vi2i4vi2
332 subroutine vec_xst_test_vi2i4vi2(arg1
, arg2
, arg3
)
333 vector(integer(2)) :: arg1
335 vector(integer(2)) :: arg3
336 call vec_xst(arg1
, arg2
, arg3
)
338 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
339 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
340 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
341 ! LLVMIR: store <8 x i16> %[[arg1]], ptr %[[addr]], align 16
342 end subroutine vec_xst_test_vi2i4vi2
344 ! CHECK-LABEL: vec_xst_test_vi4i4vai4
345 subroutine vec_xst_test_vi4i4vai4(arg1
, arg2
, arg3
, i
)
346 vector(integer(4)) :: arg1
348 vector(integer(4)) :: arg3(20)
350 call vec_xst(arg1
, arg2
, arg3(i
))
352 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
353 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
354 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
355 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
356 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
357 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
358 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
359 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
360 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
361 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
362 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
363 end subroutine vec_xst_test_vi4i4vai4
365 !----------------------
367 !----------------------
369 ! CHECK-LABEL: vec_xst_be_test_vr4i2r4
370 subroutine vec_xst_be_test_vr4i2r4(arg1
, arg2
, arg3
)
371 vector(real(4)) :: arg1
374 call vec_xst_be(arg1
, arg2
, arg3
)
376 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
377 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
378 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
379 ! LLVMIR: %[[shf:.*]] = shufflevector <4 x float> %[[arg1]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
380 ! LLVMIR: store <4 x float> %[[shf]], ptr %[[addr]], align 16
381 end subroutine vec_xst_be_test_vr4i2r4
383 ! CHECK-LABEL: vec_xst_be_test_vi4i8ia4
384 subroutine vec_xst_be_test_vi4i8ia4(arg1
, arg2
, arg3
, i
)
385 vector(integer(4)) :: arg1
387 integer(4) :: arg3(10)
389 call vec_xst_be(arg1
, arg2
, arg3(i
))
391 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
392 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
393 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
394 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
395 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
396 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
397 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
398 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
399 ! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
400 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
401 ! LLVMIR: %[[src:.*]] = shufflevector <4 x i32> %[[arg1]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
402 ! LLVMIR: store <4 x i32> %[[src]], ptr %[[gep2]], align 16
403 end subroutine vec_xst_be_test_vi4i8ia4
405 ! CHECK-LABEL: vec_xst_be_test_vi2i4vi2
406 subroutine vec_xst_be_test_vi2i4vi2(arg1
, arg2
, arg3
)
407 vector(integer(2)) :: arg1
409 vector(integer(2)) :: arg3
410 call vec_xst_be(arg1
, arg2
, arg3
)
412 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
413 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
414 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
415 ! LLVMIR: %[[src:.*]] = shufflevector <8 x i16> %[[arg1]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
416 ! LLVMIR: store <8 x i16> %[[src]], ptr %[[addr]], align 16
417 end subroutine vec_xst_be_test_vi2i4vi2
419 ! CHECK-LABEL: vec_xst_be_test_vi4i4vai4
420 subroutine vec_xst_be_test_vi4i4vai4(arg1
, arg2
, arg3
, i
)
421 vector(integer(4)) :: arg1
423 vector(integer(4)) :: arg3(20)
425 call vec_xst_be(arg1
, arg2
, arg3(i
))
427 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
428 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
429 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
430 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
431 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
432 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
433 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
434 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
435 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
436 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
437 ! LLVMIR: %[[src:.*]] = shufflevector <4 x i32> %[[arg1]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
438 ! LLVMIR: store <4 x i32> %[[src]], ptr %[[gep2]], align 16
439 end subroutine vec_xst_be_test_vi4i4vai4
441 !----------------------
443 !----------------------
445 ! CHECK-LABEL: vec_xstd2_test_vr4i2r4
446 subroutine vec_xstd2_test_vr4i2r4(arg1
, arg2
, arg3
)
447 vector(real(4)) :: arg1
450 call vec_xstd2(arg1
, arg2
, arg3
)
453 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
454 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
455 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
456 ! LLVMIR: %[[src:.*]] = bitcast <4 x float> %[[arg1]] to <2 x i64>
457 ! LLVMIR: store <2 x i64> %[[src]], ptr %[[addr]], align 16
458 end subroutine vec_xstd2_test_vr4i2r4
460 ! CHECK-LABEL: vec_xstd2_test_vi4i8ia4
461 subroutine vec_xstd2_test_vi4i8ia4(arg1
, arg2
, arg3
, i
)
462 vector(integer(4)) :: arg1
464 integer(4) :: arg3(10)
466 call vec_xstd2(arg1
, arg2
, arg3(i
))
468 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
469 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
470 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
471 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
472 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
473 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
474 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
475 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
476 ! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
477 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
478 ! LLVMIR: %[[src:.*]] = bitcast <4 x i32> %[[arg1]] to <2 x i64>
479 ! LLVMIR: store <2 x i64> %[[src]], ptr %[[gep2]], align 16
480 end subroutine vec_xstd2_test_vi4i8ia4
482 ! CHECK-LABEL: vec_xstd2_test_vi2i4vi2
483 subroutine vec_xstd2_test_vi2i4vi2(arg1
, arg2
, arg3
)
484 vector(integer(2)) :: arg1
486 vector(integer(2)) :: arg3
487 call vec_xstd2(arg1
, arg2
, arg3
)
489 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
490 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
491 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
492 ! LLVMIR: %[[src:.*]] = bitcast <8 x i16> %[[arg1]] to <2 x i64>
493 ! LLVMIR: store <2 x i64> %[[src]], ptr %[[addr]], align 16
494 end subroutine vec_xstd2_test_vi2i4vi2
496 ! CHECK-LABEL: vec_xstd2_test_vi4i4vai4
497 subroutine vec_xstd2_test_vi4i4vai4(arg1
, arg2
, arg3
, i
)
498 vector(integer(4)) :: arg1
500 vector(integer(4)) :: arg3(20)
502 call vec_xstd2(arg1
, arg2
, arg3(i
))
504 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
505 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
506 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
507 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
508 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
509 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
510 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
511 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
512 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
513 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
514 ! LLVMIR: %[[src:.*]] = bitcast <4 x i32> %[[arg1]] to <2 x i64>
515 ! LLVMIR: store <2 x i64> %[[src]], ptr %[[gep2]], align 16
516 end subroutine vec_xstd2_test_vi4i4vai4
518 !----------------------
520 !----------------------
522 ! CHECK-LABEL: vec_xstw4_test_vr4i2r4
523 subroutine vec_xstw4_test_vr4i2r4(arg1
, arg2
, arg3
)
524 vector(real(4)) :: arg1
527 call vec_xstw4(arg1
, arg2
, arg3
)
530 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %{{.*}}, align 16
531 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %{{.*}}, align 2
532 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %{{.*}}, i16 %[[arg2]]
533 ! LLVMIR: store <4 x float> %[[arg1]], ptr %[[addr]], align 16
534 end subroutine vec_xstw4_test_vr4i2r4
536 ! CHECK-LABEL: vec_xstw4_test_vi4i8ia4
537 subroutine vec_xstw4_test_vi4i8ia4(arg1
, arg2
, arg3
, i
)
538 vector(integer(4)) :: arg1
540 integer(4) :: arg3(10)
542 call vec_xstw4(arg1
, arg2
, arg3(i
))
544 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
545 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
546 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
547 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
548 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
549 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
550 ! LLVMIR: %[[gep1:.*]] = getelementptr i32, ptr %2, i64 %[[iadd]]
551 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
552 ! LLVMIR: %[[arg2:.*]] = load i64, ptr %1, align 8
553 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i64 %[[arg2]]
554 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
555 end subroutine vec_xstw4_test_vi4i8ia4
557 ! CHECK-LABEL: vec_xstw4_test_vi2i4vi2
558 subroutine vec_xstw4_test_vi2i4vi2(arg1
, arg2
, arg3
)
559 vector(integer(2)) :: arg1
561 vector(integer(2)) :: arg3
562 call vec_xstw4(arg1
, arg2
, arg3
)
564 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
565 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
566 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
567 ! LLVMIR: %[[src:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
568 ! LLVMIR: store <4 x i32> %[[src]], ptr %[[addr]], align 16
569 end subroutine vec_xstw4_test_vi2i4vi2
571 ! CHECK-LABEL: vec_xstw4_test_vi4i4vai4
572 subroutine vec_xstw4_test_vi4i4vai4(arg1
, arg2
, arg3
, i
)
573 vector(integer(4)) :: arg1
575 vector(integer(4)) :: arg3(20)
577 call vec_xstw4(arg1
, arg2
, arg3(i
))
579 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
580 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
581 ! LLVMIR: %[[isub:.*]] = sub nsw i64 %[[iext]], 1
582 ! LLVMIR: %[[imul1:.*]] = mul nsw i64 %[[isub]], 1
583 ! LLVMIR: %[[imul2:.*]] = mul nsw i64 %[[imul1]], 1
584 ! LLVMIR: %[[iadd:.*]] = add nsw i64 %[[imul2]], 0
585 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x i32>, ptr %2, i64 %[[iadd]]
586 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
587 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
588 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i32 %[[arg2]]
589 ! LLVMIR: store <4 x i32> %[[arg1]], ptr %[[gep2]], align 16
590 end subroutine vec_xstw4_test_vi4i4vai4