[memprof] Move YAML support to MemProfYAML.h (NFC) (#119515)
[llvm-project.git] / flang / test / Lower / PowerPC / ppc-pwr10-vec-intrinsics.f90
blob1c7c3e4a8b0935116ee7a44d24df7fb6df3e62fa
1 ! RUN: %flang_fc1 -flang-experimental-hlfir -triple powerpc64le-unknown-unknown -target-cpu pwr10 -emit-llvm %s -o - | FileCheck --check-prefixes="LLVMIR" %s
2 ! REQUIRES: target=powerpc{{.*}}
4 !----------------------
5 ! mma_lxvp
6 !----------------------
8 subroutine mma_lxvp_test_i2(v1, offset, vp)
9 use, intrinsic :: mma
10 integer(2) :: offset
11 vector(integer(2)) :: v1
12 __vector_pair :: vp
13 vp = mma_lxvp(offset, v1)
14 end subroutine mma_lxvp_test_i2
16 !CHECK-LABEL: @mma_lxvp_test_i2_
17 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
18 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
19 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
20 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
22 subroutine test_cvspbf16()
23 implicit none
24 vector(unsigned(1)) :: v1, v2
25 v1 = vec_cvspbf16(v2)
26 end subroutine test_cvspbf16
28 !CHECK-LABEL: @test_cvspbf16_
29 !LLVMIR: %1 = alloca <16 x i8>, i64 1, align 16
30 !LLVMIR: %2 = alloca <16 x i8>, i64 1, align 16
31 !LLVMIR: %3 = load <16 x i8>, ptr %1, align 16
32 !LLVMIR: %4 = call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> %3)
33 !LLVMIR: store <16 x i8> %4, ptr %2, align 16
35 subroutine test_cvbf16spn()
36 implicit none
37 vector(unsigned(1)) :: v1, v2
38 v1 = vec_cvbf16spn(v2)
39 end subroutine test_cvbf16spn
41 !CHECK-LABEL: @test_cvbf16spn_
42 !LLVMIR: %1 = alloca <16 x i8>, i64 1, align 16
43 !LLVMIR: %2 = alloca <16 x i8>, i64 1, align 16
44 !LLVMIR: %3 = load <16 x i8>, ptr %1, align 16
45 !LLVMIR: %4 = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> %3)
46 !LLVMIR: store <16 x i8> %4, ptr %2, align 16
48 !----------------------
49 ! vec_lxvp
50 !----------------------
52 subroutine vec_lxvp_test_i2(v1, offset, vp)
53 integer(2) :: offset
54 vector(integer(2)) :: v1
55 __vector_pair :: vp
56 vp = vec_lxvp(offset, v1)
57 end subroutine vec_lxvp_test_i2
59 !CHECK-LABEL: @vec_lxvp_test_i2_
60 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
61 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
62 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
63 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
65 subroutine vec_lxvp_test_i4(v1, offset, vp)
66 integer(2) :: offset
67 vector(integer(4)) :: v1
68 __vector_pair :: vp
69 vp = vec_lxvp(offset, v1)
70 end subroutine vec_lxvp_test_i4
72 !CHECK-LABEL: @vec_lxvp_test_i4_
73 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
74 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
75 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
76 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
78 subroutine vec_lxvp_test_u2(v1, offset, vp)
79 integer(2) :: offset
80 vector(unsigned(2)) :: v1
81 __vector_pair :: vp
82 vp = vec_lxvp(offset, v1)
83 end subroutine vec_lxvp_test_u2
85 !CHECK-LABEL: @vec_lxvp_test_u2_
86 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
87 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
88 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
89 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
91 subroutine vec_lxvp_test_u4(v1, offset, vp)
92 integer(2) :: offset
93 vector(unsigned(4)) :: v1
94 __vector_pair :: vp
95 vp = vec_lxvp(offset, v1)
96 end subroutine vec_lxvp_test_u4
98 !CHECK-LABEL: @vec_lxvp_test_u4_
99 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
100 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
101 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
102 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
104 subroutine vec_lxvp_test_r4(v1, offset, vp)
105 integer(2) :: offset
106 vector(real(4)) :: v1
107 __vector_pair :: vp
108 vp = vec_lxvp(offset, v1)
109 end subroutine vec_lxvp_test_r4
111 !CHECK-LABEL: @vec_lxvp_test_r4_
112 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
113 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
114 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
115 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
117 subroutine vec_lxvp_test_r8(v1, offset, vp)
118 integer(2) :: offset
119 vector(real(8)) :: v1
120 __vector_pair :: vp
121 vp = vec_lxvp(offset, v1)
122 end subroutine vec_lxvp_test_r8
124 !CHECK-LABEL: @vec_lxvp_test_r8_
125 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
126 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
127 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
128 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
130 subroutine vec_lxvp_test_vp(v1, offset, vp)
131 integer(2) :: offset
132 __vector_pair :: v1
133 __vector_pair :: vp
134 vp = vec_lxvp(offset, v1)
135 end subroutine vec_lxvp_test_vp
137 !CHECK-LABEL: @vec_lxvp_test_vp_
138 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
139 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
140 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
141 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
143 subroutine vec_lxvp_test_i2_arr(v1, offset, vp)
144 integer :: offset
145 vector(integer(2)) :: v1(10)
146 __vector_pair :: vp
147 vp = vec_lxvp(offset, v1)
148 end subroutine vec_lxvp_test_i2_arr
150 !CHECK-LABEL: @vec_lxvp_test_i2_arr_
151 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
152 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
153 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
154 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
156 subroutine vec_lxvp_test_i4_arr(v1, offset, vp)
157 integer :: offset
158 vector(integer(4)) :: v1(10)
159 __vector_pair :: vp
160 vp = vec_lxvp(offset, v1)
161 end subroutine vec_lxvp_test_i4_arr
163 !CHECK-LABEL: @vec_lxvp_test_i4_arr_
164 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
165 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
166 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
167 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
169 subroutine vec_lxvp_test_u2_arr(v1, offset, vp)
170 integer :: offset
171 vector(unsigned(2)) :: v1(10)
172 __vector_pair :: vp
173 vp = vec_lxvp(offset, v1)
174 end subroutine vec_lxvp_test_u2_arr
176 !CHECK-LABEL: @vec_lxvp_test_u2_arr_
177 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
178 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
179 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
180 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
182 subroutine vec_lxvp_test_u4_arr(v1, offset, vp)
183 integer :: offset
184 vector(unsigned(4)) :: v1(10)
185 __vector_pair :: vp
186 vp = vec_lxvp(offset, v1)
187 end subroutine vec_lxvp_test_u4_arr
189 !CHECK-LABEL: @vec_lxvp_test_u4_arr_
190 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
191 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
192 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
193 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
195 subroutine vec_lxvp_test_r4_arr(v1, offset, vp)
196 integer :: offset
197 vector(real(4)) :: v1(10)
198 __vector_pair :: vp
199 vp = vec_lxvp(offset, v1)
200 end subroutine vec_lxvp_test_r4_arr
202 !CHECK-LABEL: @vec_lxvp_test_r4_arr_
203 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
204 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
205 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
206 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
208 subroutine vec_lxvp_test_r8_arr(v1, offset, vp)
209 integer :: offset
210 vector(real(8)) :: v1(10)
211 __vector_pair :: vp
212 vp = vec_lxvp(offset, v1)
213 end subroutine vec_lxvp_test_r8_arr
215 !CHECK-LABEL: @vec_lxvp_test_r8_arr_
216 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
217 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
218 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
219 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
221 subroutine vec_lxvp_test_vp_arr(v1, offset, vp)
222 integer(8) :: offset
223 __vector_pair :: v1(10)
224 __vector_pair :: vp
225 vp = vec_lxvp(offset, v1)
226 end subroutine vec_lxvp_test_vp_arr
228 !CHECK-LABEL: @vec_lxvp_test_vp_arr_
229 !LLVMIR: %[[offset:.*]] = load i64, ptr %1, align 8
230 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i64 %[[offset]]
231 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
232 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
234 !----------------------
235 ! vsx_lxvp
236 !----------------------
238 subroutine vsx_lxvp_test_i4(v1, offset, vp)
239 integer(2) :: offset
240 vector(integer(4)) :: v1
241 __vector_pair :: vp
242 vp = vsx_lxvp(offset, v1)
243 end subroutine vsx_lxvp_test_i4
245 !CHECK-LABEL: @vsx_lxvp_test_i4_
246 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
247 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
248 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
249 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
251 subroutine vsx_lxvp_test_r8(v1, offset, vp)
252 integer(2) :: offset
253 vector(real(8)) :: v1
254 __vector_pair :: vp
255 vp = vsx_lxvp(offset, v1)
256 end subroutine vsx_lxvp_test_r8
258 !CHECK-LABEL: @vsx_lxvp_test_r8_
259 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
260 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i16 %[[offset]]
261 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
262 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
264 subroutine vsx_lxvp_test_i2_arr(v1, offset, vp)
265 integer :: offset
266 vector(integer(2)) :: v1(10)
267 __vector_pair :: vp
268 vp = vsx_lxvp(offset, v1)
269 end subroutine vsx_lxvp_test_i2_arr
271 !CHECK-LABEL: @vsx_lxvp_test_i2_arr_
272 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
273 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i32 %[[offset]]
274 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
275 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
277 subroutine vsx_lxvp_test_vp_arr(v1, offset, vp)
278 integer(8) :: offset
279 __vector_pair :: v1(10)
280 __vector_pair :: vp
281 vp = vsx_lxvp(offset, v1)
282 end subroutine vsx_lxvp_test_vp_arr
284 !CHECK-LABEL: @vsx_lxvp_test_vp_arr_
285 !LLVMIR: %[[offset:.*]] = load i64, ptr %1, align 8
286 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %0, i64 %[[offset]]
287 !LLVMIR: %[[call:.*]] = call <256 x i1> @llvm.ppc.vsx.lxvp(ptr %[[addr]])
288 !LLVMIR: store <256 x i1> %[[call]], ptr %2, align 32
290 !----------------------
291 ! mma_stxvp
292 !----------------------
294 subroutine test_mma_stxvp_i1(vp, offset, v1)
295 use, intrinsic :: mma
296 integer(1) :: offset
297 vector(integer(2)) :: v1
298 __vector_pair :: vp
299 call mma_stxvp(vp, offset, v1)
300 end subroutine test_mma_stxvp_i1
302 !CHECK-LABEL: @test_mma_stxvp_i1_
303 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
304 !LLVMIR: %[[offset:.*]] = load i8, ptr %1, align 1
305 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i8 %[[offset]]
306 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
308 !----------------------
309 ! vec_stxvp
310 !----------------------
312 subroutine test_vec_stxvp_i1(vp, offset, v1)
313 integer(1) :: offset
314 vector(integer(2)) :: v1
315 __vector_pair :: vp
316 call vec_stxvp(vp, offset, v1)
317 end subroutine test_vec_stxvp_i1
319 !CHECK-LABEL: @test_vec_stxvp_i1_
320 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
321 !LLVMIR: %[[offset:.*]] = load i8, ptr %1, align 1
322 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i8 %[[offset]]
323 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
325 subroutine test_vec_stxvp_i8(vp, offset, v1)
326 integer(8) :: offset
327 vector(integer(8)) :: v1
328 __vector_pair :: vp
329 call vec_stxvp(vp, offset, v1)
330 end subroutine test_vec_stxvp_i8
332 !CHECK-LABEL: @test_vec_stxvp_i8_
333 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
334 !LLVMIR: %[[offset:.*]] = load i64, ptr %1, align 8
335 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i64 %[[offset]]
336 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
338 subroutine test_vec_stxvp_vi2(vp, offset, v1)
339 integer(2) :: offset
340 vector(integer(2)) :: v1
341 __vector_pair :: vp
342 call vec_stxvp(vp, offset, v1)
343 end subroutine test_vec_stxvp_vi2
345 !CHECK-LABEL: @test_vec_stxvp_vi2_
346 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
347 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
348 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
349 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
351 subroutine test_vec_stxvp_vi4(vp, offset, v1)
352 integer(2) :: offset
353 vector(integer(4)) :: v1
354 __vector_pair :: vp
355 call vec_stxvp(vp, offset, v1)
356 end subroutine test_vec_stxvp_vi4
358 !CHECK-LABEL: @test_vec_stxvp_vi4_
359 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
360 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
361 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
362 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
364 subroutine test_vec_stxvp_vu2(vp, offset, v1)
365 integer(2) :: offset
366 vector(unsigned(2)) :: v1
367 __vector_pair :: vp
368 call vec_stxvp(vp, offset, v1)
369 end subroutine test_vec_stxvp_vu2
371 !CHECK-LABEL: @test_vec_stxvp_vu2_
372 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
373 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
374 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
375 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
377 subroutine test_vec_stxvp_vu4(vp, offset, v1)
378 integer(2) :: offset
379 vector(unsigned(4)) :: v1
380 __vector_pair :: vp
381 call vec_stxvp(vp, offset, v1)
382 end subroutine test_vec_stxvp_vu4
384 !CHECK-LABEL: @test_vec_stxvp_vu4_
385 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
386 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
387 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
388 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
390 subroutine test_vec_stxvp_vr4(vp, offset, v1)
391 integer(2) :: offset
392 vector(real(4)) :: v1
393 __vector_pair :: vp
394 call vec_stxvp(vp, offset, v1)
395 end subroutine test_vec_stxvp_vr4
397 !CHECK-LABEL: @test_vec_stxvp_vr4_
398 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
399 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
400 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
401 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
403 subroutine test_vec_stxvp_vr8(vp, offset, v1)
404 integer(2) :: offset
405 vector(real(8)) :: v1
406 __vector_pair :: vp
407 call vec_stxvp(vp, offset, v1)
408 end subroutine test_vec_stxvp_vr8
410 !CHECK-LABEL: @test_vec_stxvp_vr8_
411 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
412 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
413 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
414 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
416 subroutine test_vec_stxvp_vvp(vp, offset, v1)
417 integer(2) :: offset
418 __vector_pair :: v1
419 __vector_pair :: vp
420 call vec_stxvp(vp, offset, v1)
421 end subroutine test_vec_stxvp_vvp
423 !CHECK-LABEL: @test_vec_stxvp_vvp_
424 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
425 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
426 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
427 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
429 subroutine test_vec_stxvp_vi2_arr(vp, offset, v1)
430 integer :: offset
431 vector(integer(2)) :: v1(10)
432 __vector_pair :: vp
433 call vec_stxvp(vp, offset, v1)
434 end subroutine test_vec_stxvp_vi2_arr
436 !CHECK-LABEL: @test_vec_stxvp_vi2_arr_
437 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
438 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
439 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
440 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
442 subroutine test_vec_stxvp_vi4_arr(vp, offset, v1)
443 integer :: offset
444 vector(integer(4)) :: v1(10)
445 __vector_pair :: vp
446 call vec_stxvp(vp, offset, v1)
447 end subroutine test_vec_stxvp_vi4_arr
449 !CHECK-LABEL: @test_vec_stxvp_vi4_arr_
450 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
451 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
452 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
453 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
455 subroutine test_vec_stxvp_vu2_arr(vp, offset, v1)
456 integer :: offset
457 vector(unsigned(2)) :: v1(11)
458 __vector_pair :: vp
459 call vec_stxvp(vp, offset, v1)
460 end subroutine test_vec_stxvp_vu2_arr
462 !CHECK-LABEL: @test_vec_stxvp_vu2_arr_
463 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
464 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
465 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
466 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
468 subroutine test_vec_stxvp_vu4_arr(vp, offset, v1)
469 integer(8) :: offset
470 vector(unsigned(4)) :: v1(11,3)
471 __vector_pair :: vp
472 call vec_stxvp(vp, offset, v1)
473 end subroutine test_vec_stxvp_vu4_arr
475 !CHECK-LABEL: @test_vec_stxvp_vu4_arr_
476 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
477 !LLVMIR: %[[offset:.*]] = load i64, ptr %1, align 8
478 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i64 %[[offset]]
479 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
481 subroutine test_vec_stxvp_vr4_arr(vp, offset, v1)
482 integer :: offset
483 vector(real(4)) :: v1(10)
484 __vector_pair :: vp
485 call vec_stxvp(vp, offset, v1)
486 end subroutine test_vec_stxvp_vr4_arr
488 !CHECK-LABEL: @test_vec_stxvp_vr4_arr_
489 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
490 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
491 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
492 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
494 subroutine test_vec_stxvp_vr8_arr(vp, offset, v1)
495 integer :: offset
496 vector(real(8)) :: v1(10)
497 __vector_pair :: vp
498 call vec_stxvp(vp, offset, v1)
499 end subroutine test_vec_stxvp_vr8_arr
501 !CHECK-LABEL: @test_vec_stxvp_vr8_arr_
502 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
503 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
504 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
505 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
507 subroutine test_vec_stxvp_vp_arr(vp, offset, v1)
508 integer :: offset
509 __vector_pair :: v1(10)
510 __vector_pair :: vp
511 call vec_stxvp(vp, offset, v1)
512 end subroutine test_vec_stxvp_vp_arr
514 !CHECK-LABEL: @test_vec_stxvp_vp_arr_
515 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
516 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
517 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
518 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
520 !----------------------
521 ! vsx_stxvp
522 !----------------------
524 subroutine test_vsx_stxvp_i1(vp, offset, v1)
525 integer(1) :: offset
526 vector(integer(2)) :: v1
527 __vector_pair :: vp
528 call vsx_stxvp(vp, offset, v1)
529 end subroutine test_vsx_stxvp_i1
531 !CHECK-LABEL: @test_vsx_stxvp_i1_
532 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
533 !LLVMIR: %[[offset:.*]] = load i8, ptr %1, align 1
534 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i8 %[[offset]]
535 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
537 subroutine test_vsx_stxvp_vi2(vp, offset, v1)
538 integer(2) :: offset
539 vector(integer(2)) :: v1
540 __vector_pair :: vp
541 call vsx_stxvp(vp, offset, v1)
542 end subroutine test_vsx_stxvp_vi2
544 !CHECK-LABEL: @test_vsx_stxvp_vi2_
545 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
546 !LLVMIR: %[[offset:.*]] = load i16, ptr %1, align 2
547 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i16 %[[offset]]
548 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
550 subroutine test_vsx_stxvp_vr8_arr(vp, offset, v1)
551 integer :: offset
552 vector(real(8)) :: v1(10)
553 __vector_pair :: vp
554 call vsx_stxvp(vp, offset, v1)
555 end subroutine test_vsx_stxvp_vr8_arr
557 !CHECK-LABEL: @test_vsx_stxvp_vr8_arr_
558 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
559 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
560 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
561 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])
563 subroutine test_vsx_stxvp_vp_arr(vp, offset, v1)
564 integer :: offset
565 __vector_pair :: v1(10)
566 __vector_pair :: vp
567 call vsx_stxvp(vp, offset, v1)
568 end subroutine test_vsx_stxvp_vp_arr
570 !CHECK-LABEL: @test_vsx_stxvp_vp_arr_
571 !LLVMIR: %[[vp:.*]] = load <256 x i1>, ptr %0, align 32
572 !LLVMIR: %[[offset:.*]] = load i32, ptr %1, align 4
573 !LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[offset]]
574 !LLVMIR: call void @llvm.ppc.vsx.stxvp(<256 x i1> %[[vp]], ptr %[[addr]])