[flang]: This is to fix the HLFIR path for PPC Vector type intrinsics. (#66547)
[llvm-project.git] / flang / test / Lower / PowerPC / ppc-vec-store-elem-order.f90
blob494ed21f4fe928be92707e4f6e470c65a6856454
1 ! RUN: %flang_fc1 -flang-experimental-hlfir -emit-llvm %s -fno-ppc-native-vector-element-order -triple ppc64le-unknown-linux -o - | FileCheck --check-prefixes="LLVMIR" %s
2 ! REQUIRES: target=powerpc{{.*}}
4 !----------------------
5 ! vec_st
6 !----------------------
7 ! CHECK-LABEL: vec_st_test
8 subroutine vec_st_test(arg1, arg2, arg3)
9 vector(integer(2)) :: arg1
10 integer(4) :: arg2
11 vector(integer(2)) :: arg3
12 call vec_st(arg1, arg2, arg3)
14 ! LLVMIR: %[[arg1:.*]] = load <8 x i16>, ptr %0, align 16
15 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
16 ! LLVMIR: %[[addr:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
17 ! LLVMIR: %[[bc:.*]] = bitcast <8 x i16> %[[arg1]] to <4 x i32>
18 ! LLVMIR: %[[shf:.*]] = shufflevector <4 x i32> %[[bc]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
19 ! LLVMIR: call void @llvm.ppc.altivec.stvx(<4 x i32> %[[shf]], ptr %[[addr]])
20 end subroutine vec_st_test
22 !----------------------
23 ! vec_ste
24 !----------------------
25 ! CHECK-LABEL: vec_ste_test
26 subroutine vec_ste_test(arg1, arg2, arg3)
27 vector(real(4)) :: arg1
28 integer(4) :: arg2
29 real(4) :: arg3
30 call vec_ste(arg1, arg2, arg3)
32 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %0, align 16
33 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
34 ! LLVMIR: %[[addr]] = getelementptr i8, ptr %2, i32 %[[arg2]]
35 ! LLVMIR: %[[bc:.*]] = bitcast <4 x float> %[[arg1]] to <4 x i32>
36 ! LLVMIR: %[[shf:.*]] = shufflevector <4 x i32> %[[bc]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
37 ! LLVMIR: call void @llvm.ppc.altivec.stvewx(<4 x i32> %[[shf]], ptr %[[addr]])
38 end subroutine vec_ste_test
40 !----------------------
41 ! vec_xst
42 !----------------------
43 ! CHECK-LABEL: vec_xst_test
44 subroutine vec_xst_test(arg1, arg2, arg3)
45 vector(integer(4)) :: arg1
46 integer(4) :: arg2
47 vector(integer(4)) :: arg3
48 call vec_xst(arg1, arg2, arg3)
50 ! LLVMIR: %[[arg1:.*]] = load <4 x i32>, ptr %0, align 16
51 ! LLVMIR: %[[arg2:.*]] = load i32, ptr %1, align 4
52 ! LLVMIR: %[[trg:.*]] = getelementptr i8, ptr %2, i32 %[[arg2]]
53 ! LLVMIR: %[[src:.*]] = shufflevector <4 x i32> %[[arg1]], <4 x i32> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
54 ! LLVMIR: store <4 x i32> %[[src]], ptr %[[trg]], align 16
55 end subroutine vec_xst_test
57 !----------------------
58 ! vec_xstd2
59 !----------------------
60 ! CHECK-LABEL: vec_xstd2_test
61 subroutine vec_xstd2_test(arg1, arg2, arg3, i)
62 vector(real(4)) :: arg1
63 integer(2) :: arg2
64 vector(real(4)) :: arg3(*)
65 integer(4) :: i
66 call vec_xstd2(arg1, arg2, arg3(i))
68 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
69 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
70 ! LLVMIR: %[[isub:.*]] = sub i64 %[[iext]], 1
71 ! LLVMIR: %[[imul1:.*]] = mul i64 %[[isub]], 1
72 ! LLVMIR: %[[imul2:.*]] = mul i64 %[[imul1]], 1
73 ! LLVMIR: %[[iadd:.*]] = add i64 %[[imul2]], 0
74 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x float>, ptr %2, i64 %[[iadd]]
75 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %0, align 16
76 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %1, align 2
77 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i16 %[[arg2]]
78 ! LLVMIR: %[[src:.*]] = bitcast <4 x float> %[[arg1]] to <2 x i64>
79 ! LLVMIR: %[[shf:.*]] = shufflevector <2 x i64> %[[src]], <2 x i64> undef, <2 x i32> <i32 1, i32 0>
80 ! LLVMIR: store <2 x i64> %[[shf]], ptr %[[gep2]], align 16
81 end subroutine vec_xstd2_test
83 !----------------------
84 ! vec_xstw4
85 !----------------------
86 ! CHECK-LABEL: vec_xstw4_test
87 subroutine vec_xstw4_test(arg1, arg2, arg3, i)
88 vector(real(4)) :: arg1
89 integer(2) :: arg2
90 vector(real(4)) :: arg3(*)
91 integer(4) :: i
92 call vec_xstw4(arg1, arg2, arg3(i))
94 ! LLVMIR: %[[i:.*]] = load i32, ptr %3, align 4
95 ! LLVMIR: %[[iext:.*]] = sext i32 %[[i]] to i64
96 ! LLVMIR: %[[isub:.*]] = sub i64 %[[iext]], 1
97 ! LLVMIR: %[[imul1:.*]] = mul i64 %[[isub]], 1
98 ! LLVMIR: %[[imul2:.*]] = mul i64 %[[imul1]], 1
99 ! LLVMIR: %[[iadd:.*]] = add i64 %[[imul2]], 0
100 ! LLVMIR: %[[gep1:.*]] = getelementptr <4 x float>, ptr %2, i64 %[[iadd]]
101 ! LLVMIR: %[[arg1:.*]] = load <4 x float>, ptr %0, align 16
102 ! LLVMIR: %[[arg2:.*]] = load i16, ptr %1, align 2
103 ! LLVMIR: %[[gep2:.*]] = getelementptr i8, ptr %[[gep1]], i16 %[[arg2]]
104 ! LLVMIR: %[[src:.*]] = shufflevector <4 x float> %[[arg1]], <4 x float> undef, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
105 ! LLVMIR: store <4 x float> %[[src]], ptr %[[gep2]], align 16
106 end subroutine vec_xstw4_test