[AMDGPU] Parse wwm filter flag for regalloc fast (#119347)
[llvm-project.git] / flang / test / Lower / PowerPC / ppc-vec-load-elem-order.f90
blob214fe423628d6e5e63d749dfd72d9a8e92e73b34
1 ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -fno-ppc-native-vector-element-order -triple ppc64le-unknown-linux -o - | FileCheck --check-prefixes="LLVMIR" %s
2 ! REQUIRES: target=powerpc{{.*}}
4 !-------------------
5 ! vec_ld
6 !-------------------
8 ! CHECK-LABEL: @vec_ld_testi8
9 subroutine vec_ld_testi8(arg1, arg2, res)
10 integer(1) :: arg1
11 vector(integer(1)) :: arg2, res
12 res = vec_ld(arg1, arg2)
14 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
15 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
16 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
17 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <16 x i8>
18 ! LLVMIR: %[[shflv:.*]] = shufflevector <16 x i8> %[[bc]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
19 ! LLVMIR: store <16 x i8> %[[shflv]], ptr %2, align 16
20 end subroutine vec_ld_testi8
22 ! CHECK-LABEL: @vec_ld_testi16
23 subroutine vec_ld_testi16(arg1, arg2, res)
24 integer(2) :: arg1
25 vector(integer(2)) :: arg2, res
26 res = vec_ld(arg1, arg2)
28 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
29 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
30 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
31 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <8 x i16>
32 ! LLVMIR: %[[shflv:.*]] = shufflevector <8 x i16> %[[bc]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
33 ! LLVMIR: store <8 x i16> %[[shflv]], ptr %2, align 16
34 end subroutine vec_ld_testi16
36 ! CHECK-LABEL: @vec_ld_testi32
37 subroutine vec_ld_testi32(arg1, arg2, res)
38 integer(4) :: arg1
39 vector(integer(4)) :: arg2, res
40 res = vec_ld(arg1, arg2)
42 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
43 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
44 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
45 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
46 ! LLVMIR: store <4 x i32> %[[shflv]], ptr %2, align 16
47 end subroutine vec_ld_testi32
49 ! CHECK-LABEL: @vec_ld_testf32
50 subroutine vec_ld_testf32(arg1, arg2, res)
51 integer(8) :: arg1
52 vector(real(4)) :: arg2, res
53 res = vec_ld(arg1, arg2)
55 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
56 ! LLVMIR: %[[i4:.*]] = trunc i64 %[[arg1]] to i32
57 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[i4]]
58 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
59 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
60 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x float> %[[bc]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
61 ! LLVMIR: store <4 x float> %[[shflv]], ptr %2, align 16
62 end subroutine vec_ld_testf32
64 ! CHECK-LABEL: @vec_ld_testu32
65 subroutine vec_ld_testu32(arg1, arg2, res)
66 integer(1) :: arg1
67 vector(unsigned(4)) :: arg2, res
68 res = vec_ld(arg1, arg2)
70 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
71 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
72 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
73 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
74 ! LLVMIR: store <4 x i32> %[[shflv]], ptr %2, align 16
75 end subroutine vec_ld_testu32
77 ! CHECK-LABEL: @vec_ld_testi32a
78 subroutine vec_ld_testi32a(arg1, arg2, res)
79 integer(4) :: arg1
80 integer(4) :: arg2(10)
81 vector(integer(4)) :: res
82 res = vec_ld(arg1, arg2)
84 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
85 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
86 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
87 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
88 ! LLVMIR: store <4 x i32> %[[shflv]], ptr %2, align 16
89 end subroutine vec_ld_testi32a
91 ! CHECK-LABEL: @vec_ld_testf32av
92 subroutine vec_ld_testf32av(arg1, arg2, res)
93 integer(8) :: arg1
94 vector(real(4)) :: arg2(2, 4, 8)
95 vector(real(4)) :: res
96 res = vec_ld(arg1, arg2)
98 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
99 ! LLVMIR: %[[i4:.*]] = trunc i64 %[[arg1]] to i32
100 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[i4]]
101 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
102 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
103 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x float> %[[bc]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
104 ! LLVMIR: store <4 x float> %[[shflv]], ptr %2, align 16
105 end subroutine vec_ld_testf32av
107 ! CHECK-LABEL: @vec_ld_testi32s
108 subroutine vec_ld_testi32s(arg1, arg2, res)
109 integer(4) :: arg1
110 real(4) :: arg2
111 vector(real(4)) :: res
112 res = vec_ld(arg1, arg2)
114 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
115 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
116 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvx(ptr %[[addr]])
117 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
118 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x float> %[[bc]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
119 ! LLVMIR: store <4 x float> %[[shflv]], ptr %2, align 16
120 end subroutine vec_ld_testi32s
122 !-------------------
123 ! vec_lde
124 !-------------------
126 ! CHECK-LABEL: @vec_lde_testi8s
127 subroutine vec_lde_testi8s(arg1, arg2, res)
128 integer(1) :: arg1
129 integer(1) :: arg2
130 vector(integer(1)) :: res
131 res = vec_lde(arg1, arg2)
133 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
134 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
135 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvebx(ptr %[[addr]])
136 ! LLVMIR: %[[shflv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
137 ! LLVMIR: store <16 x i8> %[[shflv]], ptr %2, align 16
138 end subroutine vec_lde_testi8s
140 ! CHECK-LABEL: @vec_lde_testi16a
141 subroutine vec_lde_testi16a(arg1, arg2, res)
142 integer(2) :: arg1
143 integer(2) :: arg2(2, 11, 7)
144 vector(integer(2)) :: res
145 res = vec_lde(arg1, arg2)
147 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
148 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
149 ! LLVMIR: %[[ld:.*]] = call <8 x i16> @llvm.ppc.altivec.lvehx(ptr %[[addr]])
150 ! LLVMIR: %[[shflv:.*]] = shufflevector <8 x i16> %[[ld]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
151 ! LLVMIR: store <8 x i16> %[[shflv]], ptr %2, align 16
152 end subroutine vec_lde_testi16a
154 ! CHECK-LABEL: @vec_lde_testi32a
155 subroutine vec_lde_testi32a(arg1, arg2, res)
156 integer(4) :: arg1
157 integer(4) :: arg2(5)
158 vector(integer(4)) :: res
159 res = vec_lde(arg1, arg2)
161 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
162 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
163 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvewx(ptr %[[addr]])
164 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x i32> %[[ld]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
165 ! LLVMIR: store <4 x i32> %[[shflv]], ptr %2, align 16
166 end subroutine vec_lde_testi32a
168 ! CHECK-LABEL: @vec_lde_testf32a
169 subroutine vec_lde_testf32a(arg1, arg2, res)
170 integer(8) :: arg1
171 real(4) :: arg2(11)
172 vector(real(4)) :: res
173 res = vec_lde(arg1, arg2)
175 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
176 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
177 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.altivec.lvewx(ptr %[[addr]])
178 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
179 ! LLVMIR: %[[shflv:.*]] = shufflevector <4 x float> %[[bc]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
180 ! LLVMIR: store <4 x float> %[[shflv]], ptr %2, align 16
181 end subroutine vec_lde_testf32a
183 !-------------------
184 ! vec_lvsl
185 !-------------------
187 ! CHECK-LABEL: @vec_lvsl_testi8s
188 subroutine vec_lvsl_testi8s(arg1, arg2, res)
189 integer(1) :: arg1
190 integer(1) :: arg2
191 vector(unsigned(1)) :: res
192 res = vec_lvsl(arg1, arg2)
194 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
195 ! LLVMIR: %[[iext:.*]] = sext i8 %[[arg1]] to i64
196 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
197 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
198 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
199 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
200 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
201 end subroutine vec_lvsl_testi8s
203 ! CHECK-LABEL: @vec_lvsl_testi16a
204 subroutine vec_lvsl_testi16a(arg1, arg2, res)
205 integer(2) :: arg1
206 integer(2) :: arg2(4)
207 vector(unsigned(1)) :: res
208 res = vec_lvsl(arg1, arg2)
210 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
211 ! LLVMIR: %[[iext:.*]] = sext i16 %[[arg1]] to i64
212 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
213 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
214 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
215 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
216 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
217 end subroutine vec_lvsl_testi16a
219 ! CHECK-LABEL: @vec_lvsl_testi32a
220 subroutine vec_lvsl_testi32a(arg1, arg2, res)
221 integer(4) :: arg1
222 integer(4) :: arg2(11, 3, 4)
223 vector(unsigned(1)) :: res
224 res = vec_lvsl(arg1, arg2)
226 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
227 ! LLVMIR: %[[iext:.*]] = sext i32 %[[arg1]] to i64
228 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
229 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
230 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
231 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
232 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
233 end subroutine vec_lvsl_testi32a
235 ! CHECK-LABEL: @vec_lvsl_testf32a
236 subroutine vec_lvsl_testf32a(arg1, arg2, res)
237 integer(8) :: arg1
238 real(4) :: arg2(51)
239 vector(unsigned(1)) :: res
240 res = vec_lvsl(arg1, arg2)
242 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
243 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[arg1]], 56
244 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
245 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
246 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsl(ptr %[[addr]])
247 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
248 end subroutine vec_lvsl_testf32a
250 !-------------------
251 ! vec_lvsr
252 !-------------------
254 ! CHECK-LABEL: @vec_lvsr_testi8s
255 subroutine vec_lvsr_testi8s(arg1, arg2, res)
256 integer(1) :: arg1
257 integer(1) :: arg2
258 vector(unsigned(1)) :: res
259 res = vec_lvsr(arg1, arg2)
261 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
262 ! LLVMIR: %[[iext:.*]] = sext i8 %[[arg1]] to i64
263 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
264 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
265 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
266 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[addr]])
267 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
268 end subroutine vec_lvsr_testi8s
270 ! CHECK-LABEL: @vec_lvsr_testi16a
271 subroutine vec_lvsr_testi16a(arg1, arg2, res)
272 integer(2) :: arg1
273 integer(2) :: arg2(41)
274 vector(unsigned(1)) :: res
275 res = vec_lvsr(arg1, arg2)
277 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
278 ! LLVMIR: %[[iext:.*]] = sext i16 %[[arg1]] to i64
279 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
280 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
281 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
282 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[addr]])
283 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
284 end subroutine vec_lvsr_testi16a
286 ! CHECK-LABEL: @vec_lvsr_testi32a
287 subroutine vec_lvsr_testi32a(arg1, arg2, res)
288 integer(4) :: arg1
289 integer(4) :: arg2(23, 31, 47)
290 vector(unsigned(1)) :: res
291 res = vec_lvsr(arg1, arg2)
293 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
294 ! LLVMIR: %[[iext:.*]] = sext i32 %[[arg1]] to i64
295 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[iext]], 56
296 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
297 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
298 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[addr]])
299 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
300 end subroutine vec_lvsr_testi32a
302 ! CHECK-LABEL: @vec_lvsr_testf32a
303 subroutine vec_lvsr_testf32a(arg1, arg2, res)
304 integer(8) :: arg1
305 real(4) :: arg2
306 vector(unsigned(1)) :: res
307 res = vec_lvsr(arg1, arg2)
309 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
310 ! LLVMIR: %[[lshft:.*]] = shl i64 %[[arg1]], 56
311 ! LLVMIR: %[[rshft:.*]] = ashr i64 %[[lshft]], 56
312 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[rshft]]
313 ! LLVMIR: %[[ld:.*]] = call <16 x i8> @llvm.ppc.altivec.lvsr(ptr %[[addr]])
314 ! LLVMIR: store <16 x i8> %[[ld]], ptr %2, align 16
315 end subroutine vec_lvsr_testf32a
317 !-------------------
318 ! vec_lxv
319 !-------------------
321 ! CHECK-LABEL: @vec_lxv_testi8a
322 subroutine vec_lxv_testi8a(arg1, arg2, res)
323 integer(1) :: arg1
324 integer(1) :: arg2(4)
325 vector(integer(1)) :: res
326 res = vec_lxv(arg1, arg2)
328 ! LLVMIR: %[[offset:.*]] = load i8, ptr %0, align 1
329 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[offset]]
330 ! LLVMIR: %[[res:.*]] = load <16 x i8>, ptr %[[addr]], align 1
331 ! LLVMIR: store <16 x i8> %[[res]], ptr %2, align 16
332 end subroutine vec_lxv_testi8a
334 ! CHECK-LABEL: @vec_lxv_testi16a
335 subroutine vec_lxv_testi16a(arg1, arg2, res)
336 integer(2) :: arg1
337 integer(2) :: arg2(2, 4, 8)
338 vector(integer(2)) :: res
339 res = vec_lxv(arg1, arg2)
341 ! LLVMIR: %[[offset:.*]] = load i16, ptr %0, align 2
342 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[offset]]
343 ! LLVMIR: %[[res:.*]] = load <8 x i16>, ptr %[[addr]], align 1
344 ! LLVMIR: store <8 x i16> %[[res]], ptr %2, align 16
345 end subroutine vec_lxv_testi16a
347 ! CHECK-LABEL: @vec_lxv_testi32a
348 subroutine vec_lxv_testi32a(arg1, arg2, res)
349 integer(4) :: arg1
350 integer(4) :: arg2(2, 4, 8)
351 vector(integer(4)) :: res
352 res = vec_lxv(arg1, arg2)
354 ! LLVMIR: %[[offset:.*]] = load i32, ptr %0, align 4
355 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[offset]]
356 ! LLVMIR: %[[res:.*]] = load <4 x i32>, ptr %[[addr]], align 1
357 ! LLVMIR: store <4 x i32> %[[res]], ptr %2, align 16
358 end subroutine vec_lxv_testi32a
360 ! CHECK-LABEL: @vec_lxv_testf32a
361 subroutine vec_lxv_testf32a(arg1, arg2, res)
362 integer(2) :: arg1
363 real(4) :: arg2(4)
364 vector(real(4)) :: res
365 res = vec_lxv(arg1, arg2)
367 ! LLVMIR: %[[offset:.*]] = load i16, ptr %0, align 2
368 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[offset]]
369 ! LLVMIR: %[[res:.*]] = load <4 x float>, ptr %[[addr]], align 1
370 ! LLVMIR: store <4 x float> %[[res]], ptr %2, align 16
371 end subroutine vec_lxv_testf32a
373 ! CHECK-LABEL: @vec_lxv_testf64a
374 subroutine vec_lxv_testf64a(arg1, arg2, res)
375 integer(8) :: arg1
376 real(8) :: arg2(4)
377 vector(real(8)) :: res
378 res = vec_lxv(arg1, arg2)
380 ! LLVMIR: %[[offset:.*]] = load i64, ptr %0, align 8
381 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[offset]]
382 ! LLVMIR: %[[res:.*]] = load <2 x double>, ptr %[[addr]], align 1
383 ! LLVMIR: store <2 x double> %[[res]], ptr %2, align 16
384 end subroutine vec_lxv_testf64a
386 !-------------------
387 ! vec_xl
388 !-------------------
390 ! CHECK-LABEL: @vec_xl_testi8a
391 subroutine vec_xl_testi8a(arg1, arg2, res)
392 integer(1) :: arg1
393 integer(1) :: arg2
394 vector(integer(1)) :: res
395 res = vec_xl(arg1, arg2)
398 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
399 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
400 ! LLVMIR: %[[ld:.*]] = load <16 x i8>, ptr %[[addr]], align 1
401 ! LLVMIR: %[[shflv:.*]] = shufflevector <16 x i8> %[[ld]], <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
402 ! LLVMIR: store <16 x i8> %[[shflv]], ptr %2, align 16
403 end subroutine vec_xl_testi8a
405 ! CHECK-LABEL: @vec_xl_testi16a
406 subroutine vec_xl_testi16a(arg1, arg2, res)
407 integer(2) :: arg1
408 integer(2) :: arg2(2, 8)
409 vector(integer(2)) :: res
410 res = vec_xl(arg1, arg2)
412 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
413 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
414 ! LLVMIR: %[[ld:.*]] = load <8 x i16>, ptr %[[addr]], align 1
415 ! LLVMIR: %[[shflv:.*]] = shufflevector <8 x i16> %[[ld]], <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
416 ! LLVMIR: store <8 x i16> %[[shflv]], ptr %2, align 16
417 end subroutine vec_xl_testi16a
419 ! CHECK-LABEL: @vec_xl_testi32a
420 subroutine vec_xl_testi32a(arg1, arg2, res)
421 integer(4) :: arg1
422 integer(4) :: arg2(2, 4, 8)
423 vector(integer(4)) :: res
424 res = vec_xl(arg1, arg2)
426 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
427 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
428 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
429 ! LLVMIR: store <4 x i32> %[[ld]], ptr %2, align 16
430 end subroutine vec_xl_testi32a
432 ! CHECK-LABEL: @vec_xl_testi64a
433 subroutine vec_xl_testi64a(arg1, arg2, res)
434 integer(8) :: arg1
435 integer(8) :: arg2(2, 4, 1)
436 vector(integer(8)) :: res
437 res = vec_xl(arg1, arg2)
439 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
440 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
441 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
442 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <2 x i64>
443 ! LLVMIR: store <2 x i64> %[[bc]], ptr %2, align 16
444 end subroutine vec_xl_testi64a
446 ! CHECK-LABEL: @vec_xl_testf32a
447 subroutine vec_xl_testf32a(arg1, arg2, res)
448 integer(2) :: arg1
449 real(4) :: arg2(4)
450 vector(real(4)) :: res
451 res = vec_xl(arg1, arg2)
453 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
454 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
455 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
456 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
457 ! LLVMIR: store <4 x float> %[[bc]], ptr %2, align 16
458 end subroutine vec_xl_testf32a
460 ! CHECK-LABEL: @vec_xl_testf64a
461 subroutine vec_xl_testf64a(arg1, arg2, res)
462 integer(8) :: arg1
463 real(8) :: arg2(2)
464 vector(real(8)) :: res
465 res = vec_xl(arg1, arg2)
467 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
468 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
469 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
470 ! LLVMIR: store <2 x double> %[[ld]], ptr %2, align 16
471 end subroutine vec_xl_testf64a
473 !-------------------
474 ! vec_xl_be
475 !-------------------
477 ! CHECK-LABEL: @vec_xl_be_testi8a
478 subroutine vec_xl_be_testi8a(arg1, arg2, res)
479 integer(1) :: arg1
480 integer(1) :: arg2(2, 4, 8)
481 vector(integer(1)) :: res
482 res = vec_xl_be(arg1, arg2)
485 ! LLVMIR: %4 = load i8, ptr %0, align 1
486 ! LLVMIR: %5 = getelementptr i8, ptr %1, i8 %4
487 ! LLVMIR: %6 = load <16 x i8>, ptr %5, align 1
488 ! LLVMIR: %7 = shufflevector <16 x i8> %6, <16 x i8> undef, <16 x i32> <i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
489 ! LLVMIR: store <16 x i8> %7, ptr %2, align 16
490 end subroutine vec_xl_be_testi8a
492 ! CHECK-LABEL: @vec_xl_be_testi16a
493 subroutine vec_xl_be_testi16a(arg1, arg2, res)
494 integer(2) :: arg1
495 integer(2) :: arg2(8,2)
496 vector(integer(2)) :: res
497 res = vec_xl_be(arg1, arg2)
499 ! LLVMIR: %4 = load i16, ptr %0, align 2
500 ! LLVMIR: %5 = getelementptr i8, ptr %1, i16 %4
501 ! LLVMIR: %6 = load <8 x i16>, ptr %5, align 1
502 ! LLVMIR: %7 = shufflevector <8 x i16> %6, <8 x i16> undef, <8 x i32> <i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
503 ! LLVMIR: store <8 x i16> %7, ptr %2, align 16
504 end subroutine vec_xl_be_testi16a
506 ! CHECK-LABEL: @vec_xl_be_testi32a
507 subroutine vec_xl_be_testi32a(arg1, arg2, res)
508 integer(4) :: arg1
509 integer(4) :: arg2(2, 4)
510 vector(integer(4)) :: res
511 res = vec_xl_be(arg1, arg2)
513 ! LLVMIR: %4 = load i32, ptr %0, align 4
514 ! LLVMIR: %5 = getelementptr i8, ptr %1, i32 %4
515 ! LLVMIR: %6 = load <4 x i32>, ptr %5, align 1
516 ! LLVMIR: %7 = shufflevector <4 x i32> %6, <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
517 ! LLVMIR: store <4 x i32> %7, ptr %2, align 16
518 end subroutine vec_xl_be_testi32a
520 ! CHECK-LABEL: @vec_xl_be_testi64a
521 subroutine vec_xl_be_testi64a(arg1, arg2, res)
522 integer(8) :: arg1
523 integer(8) :: arg2(2, 4, 8)
524 vector(integer(8)) :: res
525 res = vec_xl_be(arg1, arg2)
527 ! LLVMIR: %4 = load i64, ptr %0, align 8
528 ! LLVMIR: %5 = getelementptr i8, ptr %1, i64 %4
529 ! LLVMIR: %6 = load <2 x i64>, ptr %5, align 1
530 ! LLVMIR: %7 = shufflevector <2 x i64> %6, <2 x i64> undef, <2 x i32> <i32 1, i32 0>
531 ! LLVMIR: store <2 x i64> %7, ptr %2, align 16
532 end subroutine vec_xl_be_testi64a
534 ! CHECK-LABEL: @vec_xl_be_testf32a
535 subroutine vec_xl_be_testf32a(arg1, arg2, res)
536 integer(2) :: arg1
537 real(4) :: arg2(4)
538 vector(real(4)) :: res
539 res = vec_xl_be(arg1, arg2)
541 ! LLVMIR: %4 = load i16, ptr %0, align 2
542 ! LLVMIR: %5 = getelementptr i8, ptr %1, i16 %4
543 ! LLVMIR: %6 = load <4 x float>, ptr %5, align 1
544 ! LLVMIR: %7 = shufflevector <4 x float> %6, <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
545 ! LLVMIR: store <4 x float> %7, ptr %2, align 16
546 end subroutine vec_xl_be_testf32a
548 ! CHECK-LABEL: @vec_xl_be_testf64a
549 subroutine vec_xl_be_testf64a(arg1, arg2, res)
550 integer(8) :: arg1
551 real(8) :: arg2(4)
552 vector(real(8)) :: res
553 res = vec_xl_be(arg1, arg2)
555 ! LLVMIR: %4 = load i64, ptr %0, align 8
556 ! LLVMIR: %5 = getelementptr i8, ptr %1, i64 %4
557 ! LLVMIR: %6 = load <2 x double>, ptr %5, align 1
558 ! LLVMIR: %7 = shufflevector <2 x double> %6, <2 x double> undef, <2 x i32> <i32 1, i32 0>
559 ! LLVMIR: store <2 x double> %7, ptr %2, align 16
560 end subroutine vec_xl_be_testf64a
562 !-------------------
563 ! vec_xld2
564 !-------------------
566 ! CHECK-LABEL: @vec_xld2_testi8a
567 subroutine vec_xld2_testi8a(arg1, arg2, res)
568 integer(1) :: arg1
569 vector(integer(1)) :: arg2(4)
570 vector(integer(1)) :: res
571 res = vec_xld2(arg1, arg2)
573 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
574 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
575 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
576 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <16 x i8>
577 ! LLVMIR: store <16 x i8> %[[bc]], ptr %2, align 16
578 end subroutine vec_xld2_testi8a
580 ! CHECK-LABEL: @vec_xld2_testi16a
581 subroutine vec_xld2_testi16a(arg1, arg2, res)
582 integer(2) :: arg1
583 vector(integer(2)) :: arg2(4)
584 vector(integer(2)) :: res
585 res = vec_xld2(arg1, arg2)
587 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
588 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
589 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
590 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <8 x i16>
591 ! LLVMIR: store <8 x i16> %[[bc]], ptr %2, align 16
592 end subroutine vec_xld2_testi16a
594 ! CHECK-LABEL: @vec_xld2_testi32a
595 subroutine vec_xld2_testi32a(arg1, arg2, res)
596 integer(4) :: arg1
597 vector(integer(4)) :: arg2(11)
598 vector(integer(4)) :: res
599 res = vec_xld2(arg1, arg2)
601 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
602 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
603 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
604 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <4 x i32>
605 ! LLVMIR: store <4 x i32> %[[bc]], ptr %2, align 16
606 end subroutine vec_xld2_testi32a
608 ! CHECK-LABEL: @vec_xld2_testi64a
609 subroutine vec_xld2_testi64a(arg1, arg2, res)
610 integer(8) :: arg1
611 vector(integer(8)) :: arg2(31,7)
612 vector(integer(8)) :: res
613 res = vec_xld2(arg1, arg2)
615 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
616 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
617 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
618 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <2 x i64>
619 ! LLVMIR: store <2 x i64> %[[bc]], ptr %2, align 16
620 end subroutine vec_xld2_testi64a
622 ! CHECK-LABEL: @vec_xld2_testf32a
623 subroutine vec_xld2_testf32a(arg1, arg2, res)
624 integer(2) :: arg1
625 vector(real(4)) :: arg2(5)
626 vector(real(4)) :: res
627 res = vec_xld2(arg1, arg2)
629 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
630 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
631 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
632 ! LLVMIR: %[[bc:.*]] = bitcast <2 x double> %[[ld]] to <4 x float>
633 ! LLVMIR: store <4 x float> %[[bc]], ptr %2, align 16
634 end subroutine vec_xld2_testf32a
636 ! CHECK-LABEL: @vec_xld2_testf64a
637 subroutine vec_xld2_testf64a(arg1, arg2, res)
638 integer(8) :: arg1
639 vector(real(8)) :: arg2(4)
640 vector(real(8)) :: res
641 res = vec_xld2(arg1, arg2)
643 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
644 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
645 ! LLVMIR: %[[ld:.*]] = call contract <2 x double> @llvm.ppc.vsx.lxvd2x.be(ptr %[[addr]])
646 ! LLVMIR: store <2 x double> %[[ld]], ptr %2, align 16
647 end subroutine vec_xld2_testf64a
649 !-------------------
650 ! vec_xlw4
651 !-------------------
653 ! CHECK-LABEL: @vec_xlw4_testi8a
654 subroutine vec_xlw4_testi8a(arg1, arg2, res)
655 integer(1) :: arg1
656 vector(integer(1)) :: arg2(2, 11, 37)
657 vector(integer(1)) :: res
658 res = vec_xlw4(arg1, arg2)
660 ! LLVMIR: %[[arg1:.*]] = load i8, ptr %0, align 1
661 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i8 %[[arg1]]
662 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
663 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <16 x i8>
664 ! LLVMIR: store <16 x i8> %[[bc]], ptr %2, align 16
665 end subroutine vec_xlw4_testi8a
667 ! CHECK-LABEL: @vec_xlw4_testi16a
668 subroutine vec_xlw4_testi16a(arg1, arg2, res)
669 integer(2) :: arg1
670 vector(integer(2)) :: arg2(2, 8)
671 vector(integer(2)) :: res
672 res = vec_xlw4(arg1, arg2)
674 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
675 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
676 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
677 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <8 x i16>
678 ! LLVMIR: store <8 x i16> %[[bc]], ptr %2, align 16
679 end subroutine vec_xlw4_testi16a
681 ! CHECK-LABEL: @vec_xlw4_testu32a
682 subroutine vec_xlw4_testu32a(arg1, arg2, res)
683 integer(4) :: arg1
684 vector(unsigned(4)) :: arg2(8, 4)
685 vector(unsigned(4)) :: res
686 res = vec_xlw4(arg1, arg2)
688 ! LLVMIR: %[[arg1:.*]] = load i32, ptr %0, align 4
689 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i32 %[[arg1]]
690 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
691 ! LLVMIR: store <4 x i32> %[[ld]], ptr %2, align 16
692 end subroutine vec_xlw4_testu32a
694 ! CHECK-LABEL: @vec_xlw4_testf32a
695 subroutine vec_xlw4_testf32a(arg1, arg2, res)
696 integer(2) :: arg1
697 vector(real(4)) :: arg2
698 vector(real(4)) :: res
699 res = vec_xlw4(arg1, arg2)
701 ! LLVMIR: %[[arg1:.*]] = load i16, ptr %0, align 2
702 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i16 %[[arg1]]
703 ! LLVMIR: %[[ld:.*]] = call <4 x i32> @llvm.ppc.vsx.lxvw4x.be(ptr %[[addr]])
704 ! LLVMIR: %[[bc:.*]] = bitcast <4 x i32> %[[ld]] to <4 x float>
705 ! LLVMIR: store <4 x float> %[[bc]], ptr %2, align 16
706 end subroutine vec_xlw4_testf32a
708 !-------------------
709 ! vec_xlds
710 !-------------------
712 ! CHECK-LABEL: @vec_xlds_testi64a
713 subroutine vec_xlds_testi64a(arg1, arg2, res)
714 integer(8) :: arg1
715 vector(integer(8)) :: arg2(4)
716 vector(integer(8)) :: res
717 res = vec_xlds(arg1, arg2)
719 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
720 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
721 ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8
722 ! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0
723 ! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer
724 ! LLVMIR: store <2 x i64> %[[shflv]], ptr %2, align 16
725 end subroutine vec_xlds_testi64a
727 ! CHECK-LABEL: @vec_xlds_testf64a
728 subroutine vec_xlds_testf64a(arg1, arg2, res)
729 integer(8) :: arg1
730 vector(real(8)) :: arg2(4)
731 vector(real(8)) :: res
732 res = vec_xlds(arg1, arg2)
734 ! LLVMIR: %[[arg1:.*]] = load i64, ptr %0, align 8
735 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %1, i64 %[[arg1]]
736 ! LLVMIR: %[[ld:.*]] = load i64, ptr %[[addr]], align 8
737 ! LLVMIR: %[[insrt:.*]] = insertelement <2 x i64> undef, i64 %[[ld]], i32 0
738 ! LLVMIR: %[[shflv:.*]] = shufflevector <2 x i64> %[[insrt]], <2 x i64> undef, <2 x i32> zeroinitializer
739 ! LLVMIR: %[[bc:.*]] = bitcast <2 x i64> %[[shflv]] to <2 x double>
740 ! LLVMIR: store <2 x double> %[[bc]], ptr %2, align 16
741 end subroutine vec_xlds_testf64a