1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2p1 -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -p mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2p1 -target-feature +bf16 -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svst2q_u8(
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
20 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
21 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
22 // CHECK-NEXT: ret void
24 // CPP-CHECK-LABEL: @_Z14test_svst2q_u8u10__SVBool_tPKh11svuint8x2_t(
25 // CPP-CHECK-NEXT: entry:
26 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
27 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
28 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
29 // CPP-CHECK-NEXT: ret void
31 void test_svst2q_u8(svbool_t pg
, const uint8_t *base
, svuint8x2_t zt
)
33 SVE_ACLE_FUNC(svst2q
,,_u8
,)(pg
, base
, zt
);
36 // CHECK-LABEL: @test_svst2q_s8(
38 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
39 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
40 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
41 // CHECK-NEXT: ret void
43 // CPP-CHECK-LABEL: @_Z14test_svst2q_s8u10__SVBool_tPKa10svint8x2_t(
44 // CPP-CHECK-NEXT: entry:
45 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
46 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
47 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
48 // CPP-CHECK-NEXT: ret void
50 void test_svst2q_s8(svbool_t pg
, const int8_t *base
, svint8x2_t zt
)
52 SVE_ACLE_FUNC(svst2q
,,_s8
,)(pg
, base
, zt
);
54 // CHECK-LABEL: @test_svst2q_u16(
56 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
57 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
58 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
59 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
60 // CHECK-NEXT: ret void
62 // CPP-CHECK-LABEL: @_Z15test_svst2q_u16u10__SVBool_tPKt12svuint16x2_t(
63 // CPP-CHECK-NEXT: entry:
64 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
65 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
66 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
67 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
68 // CPP-CHECK-NEXT: ret void
70 void test_svst2q_u16(svbool_t pg
, const uint16_t *base
, svuint16x2_t zt
)
72 SVE_ACLE_FUNC(svst2q
,,_u16
,)(pg
, base
, zt
);
75 // CHECK-LABEL: @test_svst2q_s16(
77 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
78 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
79 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
80 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
81 // CHECK-NEXT: ret void
83 // CPP-CHECK-LABEL: @_Z15test_svst2q_s16u10__SVBool_tPKs11svint16x2_t(
84 // CPP-CHECK-NEXT: entry:
85 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
86 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
87 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
88 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
89 // CPP-CHECK-NEXT: ret void
91 void test_svst2q_s16(svbool_t pg
, const int16_t *base
, svint16x2_t zt
)
93 SVE_ACLE_FUNC(svst2q
,,_s16
,)(pg
, base
, zt
);
96 // CHECK-LABEL: @test_svst2q_u32(
98 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
99 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
100 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
101 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
102 // CHECK-NEXT: ret void
104 // CPP-CHECK-LABEL: @_Z15test_svst2q_u32u10__SVBool_tPKj12svuint32x2_t(
105 // CPP-CHECK-NEXT: entry:
106 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
107 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
108 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
109 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
110 // CPP-CHECK-NEXT: ret void
112 void test_svst2q_u32(svbool_t pg
, const uint32_t *base
, svuint32x2_t zt
)
114 SVE_ACLE_FUNC(svst2q
,,_u32
,)(pg
, base
, zt
);
117 // CHECK-LABEL: @test_svst2q_s32(
118 // CHECK-NEXT: entry:
119 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
120 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
121 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
122 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
123 // CHECK-NEXT: ret void
125 // CPP-CHECK-LABEL: @_Z15test_svst2q_s32u10__SVBool_tPKi11svint32x2_t(
126 // CPP-CHECK-NEXT: entry:
127 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
128 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
129 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
130 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
131 // CPP-CHECK-NEXT: ret void
133 void test_svst2q_s32(svbool_t pg
, const int32_t *base
, svint32x2_t zt
)
135 SVE_ACLE_FUNC(svst2q
,,_s32
,)(pg
, base
, zt
);
138 // CHECK-LABEL: @test_svst2q_u64(
139 // CHECK-NEXT: entry:
140 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
141 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
142 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
143 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
144 // CHECK-NEXT: ret void
146 // CPP-CHECK-LABEL: @_Z15test_svst2q_u64u10__SVBool_tPKm12svuint64x2_t(
147 // CPP-CHECK-NEXT: entry:
148 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
149 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
150 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
151 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
152 // CPP-CHECK-NEXT: ret void
154 void test_svst2q_u64(svbool_t pg
, const uint64_t *base
, svuint64x2_t zt
)
156 SVE_ACLE_FUNC(svst2q
,,_u64
,)(pg
, base
, zt
);
159 // CHECK-LABEL: @test_svst2q_s64(
160 // CHECK-NEXT: entry:
161 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
162 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
163 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
164 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
165 // CHECK-NEXT: ret void
167 // CPP-CHECK-LABEL: @_Z15test_svst2q_s64u10__SVBool_tPKl11svint64x2_t(
168 // CPP-CHECK-NEXT: entry:
169 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
170 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
171 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
172 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
173 // CPP-CHECK-NEXT: ret void
175 void test_svst2q_s64(svbool_t pg
, const int64_t *base
, svint64x2_t zt
)
177 SVE_ACLE_FUNC(svst2q
,,_s64
,)(pg
, base
, zt
);
180 // CHECK-LABEL: @test_svst2q_f16(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT:%.*]], i64 0)
183 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT]], i64 8)
184 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
185 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
186 // CHECK-NEXT: ret void
188 // CPP-CHECK-LABEL: @_Z15test_svst2q_f16u10__SVBool_tPKDh13svfloat16x2_t(
189 // CPP-CHECK-NEXT: entry:
190 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT:%.*]], i64 0)
191 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT]], i64 8)
192 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
193 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
194 // CPP-CHECK-NEXT: ret void
196 void test_svst2q_f16(svbool_t pg
, const float16_t
*base
, svfloat16x2_t zt
)
198 SVE_ACLE_FUNC(svst2q
,,_f16
,)(pg
, base
, zt
);
201 // CHECK-LABEL: @test_svst2q_bf16(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT:%.*]], i64 0)
204 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT]], i64 8)
205 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
206 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
207 // CHECK-NEXT: ret void
209 // CPP-CHECK-LABEL: @_Z16test_svst2q_bf16u10__SVBool_tPKu6__bf1614svbfloat16x2_t(
210 // CPP-CHECK-NEXT: entry:
211 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT:%.*]], i64 0)
212 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT]], i64 8)
213 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
214 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[BASE:%.*]])
215 // CPP-CHECK-NEXT: ret void
217 void test_svst2q_bf16(svbool_t pg
, const bfloat16_t
*base
, svbfloat16x2_t zt
)
219 SVE_ACLE_FUNC(svst2q
,,_bf16
,)(pg
, base
, zt
);
222 // CHECK-LABEL: @test_svst2q_f32(
223 // CHECK-NEXT: entry:
224 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT:%.*]], i64 0)
225 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT]], i64 4)
226 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
227 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
228 // CHECK-NEXT: ret void
230 // CPP-CHECK-LABEL: @_Z15test_svst2q_f32u10__SVBool_tPKf13svfloat32x2_t(
231 // CPP-CHECK-NEXT: entry:
232 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT:%.*]], i64 0)
233 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT]], i64 4)
234 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
235 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[BASE:%.*]])
236 // CPP-CHECK-NEXT: ret void
238 void test_svst2q_f32(svbool_t pg
, const float32_t
*base
, svfloat32x2_t zt
)
240 SVE_ACLE_FUNC(svst2q
,,_f32
,)(pg
, base
, zt
);
243 // CHECK-LABEL: @test_svst2q_f64(
244 // CHECK-NEXT: entry:
245 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT:%.*]], i64 0)
246 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT]], i64 2)
247 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
248 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
249 // CHECK-NEXT: ret void
251 // CPP-CHECK-LABEL: @_Z15test_svst2q_f64u10__SVBool_tPKd13svfloat64x2_t(
252 // CPP-CHECK-NEXT: entry:
253 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT:%.*]], i64 0)
254 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT]], i64 2)
255 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
256 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[BASE:%.*]])
257 // CPP-CHECK-NEXT: ret void
259 void test_svst2q_f64(svbool_t pg
, const float64_t
*base
, svfloat64x2_t zt
)
261 SVE_ACLE_FUNC(svst2q
,,_f64
,)(pg
, base
, zt
);
264 // CHECK-LABEL: @test_svst2q_vnum_u8(
265 // CHECK-NEXT: entry:
266 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
267 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
268 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
269 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP2]])
270 // CHECK-NEXT: ret void
272 // CPP-CHECK-LABEL: @_Z19test_svst2q_vnum_u8u10__SVBool_tPKhl11svuint8x2_t(
273 // CPP-CHECK-NEXT: entry:
274 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
275 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
276 // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
277 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP2]])
278 // CPP-CHECK-NEXT: ret void
280 void test_svst2q_vnum_u8(svbool_t pg
, const uint8_t *base
, int64_t vnum
, svuint8x2_t zt
)
282 SVE_ACLE_FUNC(svst2q_vnum_
,,u8
,)(pg
, base
, vnum
, zt
);
285 // CHECK-LABEL: @test_svst2q_vnum_s8(
286 // CHECK-NEXT: entry:
287 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
288 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
289 // CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
290 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP2]])
291 // CHECK-NEXT: ret void
293 // CPP-CHECK-LABEL: @_Z19test_svst2q_vnum_s8u10__SVBool_tPKal10svint8x2_t(
294 // CPP-CHECK-NEXT: entry:
295 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT:%.*]], i64 0)
296 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv32i8(<vscale x 32 x i8> [[ZT]], i64 16)
297 // CPP-CHECK-NEXT: [[TMP2:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
298 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP2]])
299 // CPP-CHECK-NEXT: ret void
301 void test_svst2q_vnum_s8(svbool_t pg
, const int8_t *base
, int64_t vnum
, svint8x2_t zt
)
303 SVE_ACLE_FUNC(svst2q_vnum
,,_s8
,)(pg
, base
, vnum
, zt
);
306 // CHECK-LABEL: @test_svst2q_vnum_u16(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
309 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
310 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
311 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
312 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
313 // CHECK-NEXT: ret void
315 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_u16u10__SVBool_tPKtl12svuint16x2_t(
316 // CPP-CHECK-NEXT: entry:
317 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
318 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
319 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
320 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
321 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
322 // CPP-CHECK-NEXT: ret void
324 void test_svst2q_vnum_u16(svbool_t pg
, const uint16_t *base
, int64_t vnum
, svuint16x2_t zt
)
326 SVE_ACLE_FUNC(svst2q_vnum
,,_u16
,)(pg
, base
, vnum
, zt
);
329 // CHECK-LABEL: @test_svst2q_vnum_s16(
330 // CHECK-NEXT: entry:
331 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
332 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
333 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
334 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
335 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
336 // CHECK-NEXT: ret void
338 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_s16u10__SVBool_tPKsl11svint16x2_t(
339 // CPP-CHECK-NEXT: entry:
340 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT:%.*]], i64 0)
341 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv16i16(<vscale x 16 x i16> [[ZT]], i64 8)
342 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
343 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
344 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
345 // CPP-CHECK-NEXT: ret void
347 void test_svst2q_vnum_s16(svbool_t pg
, const int16_t *base
, int64_t vnum
, svint16x2_t zt
)
349 SVE_ACLE_FUNC(svst2q_vnum
,,_s16
,)(pg
, base
, vnum
, zt
);
352 // CHECK-LABEL: @test_svst2q_vnum_u32(
353 // CHECK-NEXT: entry:
354 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
355 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
356 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
357 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
358 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
359 // CHECK-NEXT: ret void
361 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_u32u10__SVBool_tPKjl12svuint32x2_t(
362 // CPP-CHECK-NEXT: entry:
363 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
364 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
365 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
366 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
367 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
368 // CPP-CHECK-NEXT: ret void
370 void test_svst2q_vnum_u32(svbool_t pg
, const uint32_t *base
, int64_t vnum
, svuint32x2_t zt
)
372 SVE_ACLE_FUNC(svst2q_vnum
,,_u32
,)(pg
, base
, vnum
, zt
);
375 // CHECK-LABEL: @test_svst2q_vnum_s32(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
378 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
379 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
380 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
381 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
382 // CHECK-NEXT: ret void
384 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_s32u10__SVBool_tPKil11svint32x2_t(
385 // CPP-CHECK-NEXT: entry:
386 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT:%.*]], i64 0)
387 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> [[ZT]], i64 4)
388 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
389 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
390 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
391 // CPP-CHECK-NEXT: ret void
393 void test_svst2q_vnum_s32(svbool_t pg
, const int32_t *base
, int64_t vnum
, svint32x2_t zt
)
395 SVE_ACLE_FUNC(svst2q_vnum
,,_s32
,)(pg
, base
, vnum
, zt
);
398 // CHECK-LABEL: @test_svst2q_vnum_u64(
399 // CHECK-NEXT: entry:
400 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
401 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
402 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
403 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
404 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
405 // CHECK-NEXT: ret void
407 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_u64u10__SVBool_tPKml12svuint64x2_t(
408 // CPP-CHECK-NEXT: entry:
409 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
410 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
411 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
412 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
413 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
414 // CPP-CHECK-NEXT: ret void
416 void test_svst2q_vnum_u64(svbool_t pg
, const uint64_t *base
, int64_t vnum
, svuint64x2_t zt
)
418 SVE_ACLE_FUNC(svst2q_vnum
,,_u64
,)(pg
, base
, vnum
, zt
);
421 // CHECK-LABEL: @test_svst2q_vnum_s64(
422 // CHECK-NEXT: entry:
423 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
424 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
425 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
426 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
427 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
428 // CHECK-NEXT: ret void
430 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_s64u10__SVBool_tPKll11svint64x2_t(
431 // CPP-CHECK-NEXT: entry:
432 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT:%.*]], i64 0)
433 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv4i64(<vscale x 4 x i64> [[ZT]], i64 2)
434 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
435 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
436 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
437 // CPP-CHECK-NEXT: ret void
439 void test_svst2q_vnum_s64(svbool_t pg
, const int64_t *base
, int64_t vnum
, svint64x2_t zt
)
441 SVE_ACLE_FUNC(svst2q_vnum
,,_s64
,)(pg
, base
, vnum
, zt
);
444 // CHECK-LABEL: @test_svst2q_vnum_f16(
445 // CHECK-NEXT: entry:
446 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT:%.*]], i64 0)
447 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT]], i64 8)
448 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
449 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
450 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
451 // CHECK-NEXT: ret void
453 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_f16u10__SVBool_tPKDhl13svfloat16x2_t(
454 // CPP-CHECK-NEXT: entry:
455 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT:%.*]], i64 0)
456 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv16f16(<vscale x 16 x half> [[ZT]], i64 8)
457 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
458 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
459 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
460 // CPP-CHECK-NEXT: ret void
462 void test_svst2q_vnum_f16(svbool_t pg
, const float16_t
*base
, int64_t vnum
, svfloat16x2_t zt
)
464 SVE_ACLE_FUNC(svst2q_vnum
,,_f16
,)(pg
, base
, vnum
, zt
);
467 // CHECK-LABEL: @test_svst2q_vnum_bf16(
468 // CHECK-NEXT: entry:
469 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT:%.*]], i64 0)
470 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT]], i64 8)
471 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
472 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
473 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
474 // CHECK-NEXT: ret void
476 // CPP-CHECK-LABEL: @_Z21test_svst2q_vnum_bf16u10__SVBool_tPKu6__bf16l14svbfloat16x2_t(
477 // CPP-CHECK-NEXT: entry:
478 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT:%.*]], i64 0)
479 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv16bf16(<vscale x 16 x bfloat> [[ZT]], i64 8)
480 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
481 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
482 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x i1> [[TMP2]], ptr [[TMP3]])
483 // CPP-CHECK-NEXT: ret void
485 void test_svst2q_vnum_bf16(svbool_t pg
, const bfloat16_t
*base
, int64_t vnum
, svbfloat16x2_t zt
)
487 SVE_ACLE_FUNC(svst2q_vnum
,,_bf16
,)(pg
, base
, vnum
, zt
);
490 // CHECK-LABEL: @test_svst2q_vnum_f32(
491 // CHECK-NEXT: entry:
492 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT:%.*]], i64 0)
493 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT]], i64 4)
494 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
495 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
496 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
497 // CHECK-NEXT: ret void
499 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_f32u10__SVBool_tPKfl13svfloat32x2_t(
500 // CPP-CHECK-NEXT: entry:
501 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT:%.*]], i64 0)
502 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv8f32(<vscale x 8 x float> [[ZT]], i64 4)
503 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
504 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
505 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x i1> [[TMP2]], ptr [[TMP3]])
506 // CPP-CHECK-NEXT: ret void
508 void test_svst2q_vnum_f32(svbool_t pg
, const float32_t
*base
, int64_t vnum
, svfloat32x2_t zt
)
510 SVE_ACLE_FUNC(svst2q_vnum
,,_f32
,)(pg
, base
, vnum
, zt
);
513 // CHECK-LABEL: @test_svst2q_vnum_f64(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT:%.*]], i64 0)
516 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT]], i64 2)
517 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
518 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
519 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
520 // CHECK-NEXT: ret void
522 // CPP-CHECK-LABEL: @_Z20test_svst2q_vnum_f64u10__SVBool_tPKdl13svfloat64x2_t(
523 // CPP-CHECK-NEXT: entry:
524 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT:%.*]], i64 0)
525 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv4f64(<vscale x 4 x double> [[ZT]], i64 2)
526 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
527 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
528 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st2q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x i1> [[TMP2]], ptr [[TMP3]])
529 // CPP-CHECK-NEXT: ret void
531 void test_svst2q_vnum_f64(svbool_t pg
, const float64_t
*base
, int64_t vnum
, svfloat64x2_t zt
)
533 SVE_ACLE_FUNC(svst2q_vnum
,,_f64
,)(pg
, base
, vnum
, zt
);
538 // CHECK-LABEL: @test_svst3q_u8(
539 // CHECK-NEXT: entry:
540 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
541 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
542 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
543 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
544 // CHECK-NEXT: ret void
546 // CPP-CHECK-LABEL: @_Z14test_svst3q_u8u10__SVBool_tPKh11svuint8x3_t(
547 // CPP-CHECK-NEXT: entry:
548 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
549 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
550 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
551 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
552 // CPP-CHECK-NEXT: ret void
554 void test_svst3q_u8(svbool_t pg
, const uint8_t *base
, svuint8x3_t zt
)
556 SVE_ACLE_FUNC(svst3q
,,_u8
,)(pg
, base
, zt
);
559 // CHECK-LABEL: @test_svst3q_s8(
560 // CHECK-NEXT: entry:
561 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
562 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
563 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
564 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
565 // CHECK-NEXT: ret void
567 // CPP-CHECK-LABEL: @_Z14test_svst3q_s8u10__SVBool_tPKa10svint8x3_t(
568 // CPP-CHECK-NEXT: entry:
569 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
570 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
571 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
572 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
573 // CPP-CHECK-NEXT: ret void
575 void test_svst3q_s8(svbool_t pg
, const int8_t *base
, svint8x3_t zt
)
577 SVE_ACLE_FUNC(svst3q
,,_s8
,)(pg
, base
, zt
);
579 // CHECK-LABEL: @test_svst3q_u16(
580 // CHECK-NEXT: entry:
581 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
582 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
583 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
584 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
585 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
586 // CHECK-NEXT: ret void
588 // CPP-CHECK-LABEL: @_Z15test_svst3q_u16u10__SVBool_tPKt12svuint16x3_t(
589 // CPP-CHECK-NEXT: entry:
590 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
591 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
592 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
593 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
594 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
595 // CPP-CHECK-NEXT: ret void
597 void test_svst3q_u16(svbool_t pg
, const uint16_t *base
, svuint16x3_t zt
)
599 SVE_ACLE_FUNC(svst3q
,,_u16
,)(pg
, base
, zt
);
602 // CHECK-LABEL: @test_svst3q_s16(
603 // CHECK-NEXT: entry:
604 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
605 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
606 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
607 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
608 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
609 // CHECK-NEXT: ret void
611 // CPP-CHECK-LABEL: @_Z15test_svst3q_s16u10__SVBool_tPKs11svint16x3_t(
612 // CPP-CHECK-NEXT: entry:
613 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
614 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
615 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
616 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
617 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
618 // CPP-CHECK-NEXT: ret void
620 void test_svst3q_s16(svbool_t pg
, const int16_t *base
, svint16x3_t zt
)
622 SVE_ACLE_FUNC(svst3q
,,_s16
,)(pg
, base
, zt
);
625 // CHECK-LABEL: @test_svst3q_u32(
626 // CHECK-NEXT: entry:
627 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
628 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
629 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
630 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
631 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
632 // CHECK-NEXT: ret void
634 // CPP-CHECK-LABEL: @_Z15test_svst3q_u32u10__SVBool_tPKj12svuint32x3_t(
635 // CPP-CHECK-NEXT: entry:
636 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
637 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
638 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
639 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
640 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
641 // CPP-CHECK-NEXT: ret void
643 void test_svst3q_u32(svbool_t pg
, const uint32_t *base
, svuint32x3_t zt
)
645 SVE_ACLE_FUNC(svst3q
,,_u32
,)(pg
, base
, zt
);
648 // CHECK-LABEL: @test_svst3q_s32(
649 // CHECK-NEXT: entry:
650 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
651 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
652 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
653 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
654 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
655 // CHECK-NEXT: ret void
657 // CPP-CHECK-LABEL: @_Z15test_svst3q_s32u10__SVBool_tPKi11svint32x3_t(
658 // CPP-CHECK-NEXT: entry:
659 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
660 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
661 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
662 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
663 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
664 // CPP-CHECK-NEXT: ret void
666 void test_svst3q_s32(svbool_t pg
, const int32_t *base
, svint32x3_t zt
)
668 SVE_ACLE_FUNC(svst3q
,,_s32
,)(pg
, base
, zt
);
671 // CHECK-LABEL: @test_svst3q_u64(
672 // CHECK-NEXT: entry:
673 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
674 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
675 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
676 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
677 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
678 // CHECK-NEXT: ret void
680 // CPP-CHECK-LABEL: @_Z15test_svst3q_u64u10__SVBool_tPKm12svuint64x3_t(
681 // CPP-CHECK-NEXT: entry:
682 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
683 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
684 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
685 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
686 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
687 // CPP-CHECK-NEXT: ret void
689 void test_svst3q_u64(svbool_t pg
, const uint64_t *base
, svuint64x3_t zt
)
691 SVE_ACLE_FUNC(svst3q
,,_u64
,)(pg
, base
, zt
);
694 // CHECK-LABEL: @test_svst3q_s64(
695 // CHECK-NEXT: entry:
696 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
697 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
698 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
699 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
700 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
701 // CHECK-NEXT: ret void
703 // CPP-CHECK-LABEL: @_Z15test_svst3q_s64u10__SVBool_tPKl11svint64x3_t(
704 // CPP-CHECK-NEXT: entry:
705 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
706 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
707 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
708 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
709 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
710 // CPP-CHECK-NEXT: ret void
712 void test_svst3q_s64(svbool_t pg
, const int64_t *base
, svint64x3_t zt
)
714 SVE_ACLE_FUNC(svst3q
,,_s64
,)(pg
, base
, zt
);
717 // CHECK-LABEL: @test_svst3q_f16(
718 // CHECK-NEXT: entry:
719 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT:%.*]], i64 0)
720 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 8)
721 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 16)
722 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
723 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
724 // CHECK-NEXT: ret void
726 // CPP-CHECK-LABEL: @_Z15test_svst3q_f16u10__SVBool_tPKDh13svfloat16x3_t(
727 // CPP-CHECK-NEXT: entry:
728 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT:%.*]], i64 0)
729 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 8)
730 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 16)
731 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
732 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
733 // CPP-CHECK-NEXT: ret void
735 void test_svst3q_f16(svbool_t pg
, const float16_t
*base
, svfloat16x3_t zt
)
737 SVE_ACLE_FUNC(svst3q
,,_f16
,)(pg
, base
, zt
);
740 // CHECK-LABEL: @test_svst3q_bf16(
741 // CHECK-NEXT: entry:
742 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT:%.*]], i64 0)
743 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 8)
744 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 16)
745 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
746 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
747 // CHECK-NEXT: ret void
749 // CPP-CHECK-LABEL: @_Z16test_svst3q_bf16u10__SVBool_tPKu6__bf1614svbfloat16x3_t(
750 // CPP-CHECK-NEXT: entry:
751 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT:%.*]], i64 0)
752 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 8)
753 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 16)
754 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
755 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[BASE:%.*]])
756 // CPP-CHECK-NEXT: ret void
758 void test_svst3q_bf16(svbool_t pg
, const bfloat16_t
*base
, svbfloat16x3_t zt
)
760 SVE_ACLE_FUNC(svst3q
,,_bf16
,)(pg
, base
, zt
);
763 // CHECK-LABEL: @test_svst3q_f32(
764 // CHECK-NEXT: entry:
765 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT:%.*]], i64 0)
766 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 4)
767 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 8)
768 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
769 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
770 // CHECK-NEXT: ret void
772 // CPP-CHECK-LABEL: @_Z15test_svst3q_f32u10__SVBool_tPKf13svfloat32x3_t(
773 // CPP-CHECK-NEXT: entry:
774 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT:%.*]], i64 0)
775 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 4)
776 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 8)
777 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
778 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[BASE:%.*]])
779 // CPP-CHECK-NEXT: ret void
781 void test_svst3q_f32(svbool_t pg
, const float32_t
*base
, svfloat32x3_t zt
)
783 SVE_ACLE_FUNC(svst3q
,,_f32
,)(pg
, base
, zt
);
786 // CHECK-LABEL: @test_svst3q_f64(
787 // CHECK-NEXT: entry:
788 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT:%.*]], i64 0)
789 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 2)
790 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 4)
791 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
792 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
793 // CHECK-NEXT: ret void
795 // CPP-CHECK-LABEL: @_Z15test_svst3q_f64u10__SVBool_tPKd13svfloat64x3_t(
796 // CPP-CHECK-NEXT: entry:
797 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT:%.*]], i64 0)
798 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 2)
799 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 4)
800 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
801 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[BASE:%.*]])
802 // CPP-CHECK-NEXT: ret void
804 void test_svst3q_f64(svbool_t pg
, const float64_t
*base
, svfloat64x3_t zt
)
806 SVE_ACLE_FUNC(svst3q
,,_f64
,)(pg
, base
, zt
);
809 // CHECK-LABEL: @test_svst3q_vnum_u8(
810 // CHECK-NEXT: entry:
811 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
812 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
813 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
814 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
815 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP3]])
816 // CHECK-NEXT: ret void
818 // CPP-CHECK-LABEL: @_Z19test_svst3q_vnum_u8u10__SVBool_tPKhl11svuint8x3_t(
819 // CPP-CHECK-NEXT: entry:
820 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
821 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
822 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
823 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
824 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP3]])
825 // CPP-CHECK-NEXT: ret void
827 void test_svst3q_vnum_u8(svbool_t pg
, const uint8_t *base
, int64_t vnum
, svuint8x3_t zt
)
829 SVE_ACLE_FUNC(svst3q_vnum_
,,u8
,)(pg
, base
, vnum
, zt
);
832 // CHECK-LABEL: @test_svst3q_vnum_s8(
833 // CHECK-NEXT: entry:
834 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
835 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
836 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
837 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
838 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP3]])
839 // CHECK-NEXT: ret void
841 // CPP-CHECK-LABEL: @_Z19test_svst3q_vnum_s8u10__SVBool_tPKal10svint8x3_t(
842 // CPP-CHECK-NEXT: entry:
843 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT:%.*]], i64 0)
844 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 16)
845 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv48i8(<vscale x 48 x i8> [[ZT]], i64 32)
846 // CPP-CHECK-NEXT: [[TMP3:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
847 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP3]])
848 // CPP-CHECK-NEXT: ret void
850 void test_svst3q_vnum_s8(svbool_t pg
, const int8_t *base
, int64_t vnum
, svint8x3_t zt
)
852 SVE_ACLE_FUNC(svst3q_vnum
,,_s8
,)(pg
, base
, vnum
, zt
);
855 // CHECK-LABEL: @test_svst3q_vnum_u16(
856 // CHECK-NEXT: entry:
857 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
858 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
859 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
860 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
861 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
862 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
863 // CHECK-NEXT: ret void
865 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_u16u10__SVBool_tPKtl12svuint16x3_t(
866 // CPP-CHECK-NEXT: entry:
867 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
868 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
869 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
870 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
871 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
872 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
873 // CPP-CHECK-NEXT: ret void
875 void test_svst3q_vnum_u16(svbool_t pg
, const uint16_t *base
, int64_t vnum
, svuint16x3_t zt
)
877 SVE_ACLE_FUNC(svst3q_vnum
,,_u16
,)(pg
, base
, vnum
, zt
);
880 // CHECK-LABEL: @test_svst3q_vnum_s16(
881 // CHECK-NEXT: entry:
882 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
883 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
884 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
885 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
886 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
887 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
888 // CHECK-NEXT: ret void
890 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_s16u10__SVBool_tPKsl11svint16x3_t(
891 // CPP-CHECK-NEXT: entry:
892 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT:%.*]], i64 0)
893 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 8)
894 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv24i16(<vscale x 24 x i16> [[ZT]], i64 16)
895 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
896 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
897 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
898 // CPP-CHECK-NEXT: ret void
900 void test_svst3q_vnum_s16(svbool_t pg
, const int16_t *base
, int64_t vnum
, svint16x3_t zt
)
902 SVE_ACLE_FUNC(svst3q_vnum
,,_s16
,)(pg
, base
, vnum
, zt
);
905 // CHECK-LABEL: @test_svst3q_vnum_u32(
906 // CHECK-NEXT: entry:
907 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
908 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
909 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
910 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
911 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
912 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
913 // CHECK-NEXT: ret void
915 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_u32u10__SVBool_tPKjl12svuint32x3_t(
916 // CPP-CHECK-NEXT: entry:
917 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
918 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
919 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
920 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
921 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
922 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
923 // CPP-CHECK-NEXT: ret void
925 void test_svst3q_vnum_u32(svbool_t pg
, const uint32_t *base
, int64_t vnum
, svuint32x3_t zt
)
927 SVE_ACLE_FUNC(svst3q_vnum
,,_u32
,)(pg
, base
, vnum
, zt
);
930 // CHECK-LABEL: @test_svst3q_vnum_s32(
931 // CHECK-NEXT: entry:
932 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
933 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
934 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
935 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
936 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
937 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
938 // CHECK-NEXT: ret void
940 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_s32u10__SVBool_tPKil11svint32x3_t(
941 // CPP-CHECK-NEXT: entry:
942 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT:%.*]], i64 0)
943 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 4)
944 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv12i32(<vscale x 12 x i32> [[ZT]], i64 8)
945 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
946 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
947 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
948 // CPP-CHECK-NEXT: ret void
950 void test_svst3q_vnum_s32(svbool_t pg
, const int32_t *base
, int64_t vnum
, svint32x3_t zt
)
952 SVE_ACLE_FUNC(svst3q_vnum
,,_s32
,)(pg
, base
, vnum
, zt
);
955 // CHECK-LABEL: @test_svst3q_vnum_u64(
956 // CHECK-NEXT: entry:
957 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
958 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
959 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
960 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
961 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
962 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
963 // CHECK-NEXT: ret void
965 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_u64u10__SVBool_tPKml12svuint64x3_t(
966 // CPP-CHECK-NEXT: entry:
967 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
968 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
969 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
970 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
971 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
972 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
973 // CPP-CHECK-NEXT: ret void
975 void test_svst3q_vnum_u64(svbool_t pg
, const uint64_t *base
, int64_t vnum
, svuint64x3_t zt
)
977 SVE_ACLE_FUNC(svst3q_vnum
,,_u64
,)(pg
, base
, vnum
, zt
);
980 // CHECK-LABEL: @test_svst3q_vnum_s64(
981 // CHECK-NEXT: entry:
982 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
983 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
984 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
985 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
986 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
987 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
988 // CHECK-NEXT: ret void
990 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_s64u10__SVBool_tPKll11svint64x3_t(
991 // CPP-CHECK-NEXT: entry:
992 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT:%.*]], i64 0)
993 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 2)
994 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv6i64(<vscale x 6 x i64> [[ZT]], i64 4)
995 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
996 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
997 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
998 // CPP-CHECK-NEXT: ret void
1000 void test_svst3q_vnum_s64(svbool_t pg
, const int64_t *base
, int64_t vnum
, svint64x3_t zt
)
1002 SVE_ACLE_FUNC(svst3q_vnum
,,_s64
,)(pg
, base
, vnum
, zt
);
1005 // CHECK-LABEL: @test_svst3q_vnum_f16(
1006 // CHECK-NEXT: entry:
1007 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT:%.*]], i64 0)
1008 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 8)
1009 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 16)
1010 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1011 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1012 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
1013 // CHECK-NEXT: ret void
1015 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_f16u10__SVBool_tPKDhl13svfloat16x3_t(
1016 // CPP-CHECK-NEXT: entry:
1017 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT:%.*]], i64 0)
1018 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 8)
1019 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv24f16(<vscale x 24 x half> [[ZT]], i64 16)
1020 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1021 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1022 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
1023 // CPP-CHECK-NEXT: ret void
1025 void test_svst3q_vnum_f16(svbool_t pg
, const float16_t
*base
, int64_t vnum
, svfloat16x3_t zt
)
1027 SVE_ACLE_FUNC(svst3q_vnum
,,_f16
,)(pg
, base
, vnum
, zt
);
1030 // CHECK-LABEL: @test_svst3q_vnum_bf16(
1031 // CHECK-NEXT: entry:
1032 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT:%.*]], i64 0)
1033 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 8)
1034 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 16)
1035 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1036 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1037 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
1038 // CHECK-NEXT: ret void
1040 // CPP-CHECK-LABEL: @_Z21test_svst3q_vnum_bf16u10__SVBool_tPKu6__bf16l14svbfloat16x3_t(
1041 // CPP-CHECK-NEXT: entry:
1042 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT:%.*]], i64 0)
1043 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 8)
1044 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv24bf16(<vscale x 24 x bfloat> [[ZT]], i64 16)
1045 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1046 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1047 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x i1> [[TMP3]], ptr [[TMP4]])
1048 // CPP-CHECK-NEXT: ret void
1050 void test_svst3q_vnum_bf16(svbool_t pg
, const bfloat16_t
*base
, int64_t vnum
, svbfloat16x3_t zt
)
1052 SVE_ACLE_FUNC(svst3q_vnum
,,_bf16
,)(pg
, base
, vnum
, zt
);
1055 // CHECK-LABEL: @test_svst3q_vnum_f32(
1056 // CHECK-NEXT: entry:
1057 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT:%.*]], i64 0)
1058 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 4)
1059 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 8)
1060 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1061 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1062 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
1063 // CHECK-NEXT: ret void
1065 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_f32u10__SVBool_tPKfl13svfloat32x3_t(
1066 // CPP-CHECK-NEXT: entry:
1067 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT:%.*]], i64 0)
1068 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 4)
1069 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv12f32(<vscale x 12 x float> [[ZT]], i64 8)
1070 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1071 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1072 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x i1> [[TMP3]], ptr [[TMP4]])
1073 // CPP-CHECK-NEXT: ret void
1075 void test_svst3q_vnum_f32(svbool_t pg
, const float32_t
*base
, int64_t vnum
, svfloat32x3_t zt
)
1077 SVE_ACLE_FUNC(svst3q_vnum
,,_f32
,)(pg
, base
, vnum
, zt
);
1080 // CHECK-LABEL: @test_svst3q_vnum_f64(
1081 // CHECK-NEXT: entry:
1082 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT:%.*]], i64 0)
1083 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 2)
1084 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 4)
1085 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1086 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1087 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
1088 // CHECK-NEXT: ret void
1090 // CPP-CHECK-LABEL: @_Z20test_svst3q_vnum_f64u10__SVBool_tPKdl13svfloat64x3_t(
1091 // CPP-CHECK-NEXT: entry:
1092 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT:%.*]], i64 0)
1093 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 2)
1094 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv6f64(<vscale x 6 x double> [[ZT]], i64 4)
1095 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1096 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1097 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st3q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x i1> [[TMP3]], ptr [[TMP4]])
1098 // CPP-CHECK-NEXT: ret void
1100 void test_svst3q_vnum_f64(svbool_t pg
, const float64_t
*base
, int64_t vnum
, svfloat64x3_t zt
)
1102 SVE_ACLE_FUNC(svst3q_vnum
,,_f64
,)(pg
, base
, vnum
, zt
);
1107 // CHECK-LABEL: @test_svst4q_u8(
1108 // CHECK-NEXT: entry:
1109 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1110 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1111 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1112 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1113 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
1114 // CHECK-NEXT: ret void
1116 // CPP-CHECK-LABEL: @_Z14test_svst4q_u8u10__SVBool_tPKh11svuint8x4_t(
1117 // CPP-CHECK-NEXT: entry:
1118 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1119 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1120 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1121 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1122 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
1123 // CPP-CHECK-NEXT: ret void
1125 void test_svst4q_u8(svbool_t pg
, const uint8_t *base
, svuint8x4_t zt
)
1127 SVE_ACLE_FUNC(svst4q
,,_u8
,)(pg
, base
, zt
);
1130 // CHECK-LABEL: @test_svst4q_s8(
1131 // CHECK-NEXT: entry:
1132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1133 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1134 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1135 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1136 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
1137 // CHECK-NEXT: ret void
1139 // CPP-CHECK-LABEL: @_Z14test_svst4q_s8u10__SVBool_tPKa10svint8x4_t(
1140 // CPP-CHECK-NEXT: entry:
1141 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1142 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1143 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1144 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1145 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[BASE:%.*]])
1146 // CPP-CHECK-NEXT: ret void
1148 void test_svst4q_s8(svbool_t pg
, const int8_t *base
, svint8x4_t zt
)
1150 SVE_ACLE_FUNC(svst4q
,,_s8
,)(pg
, base
, zt
);
1152 // CHECK-LABEL: @test_svst4q_u16(
1153 // CHECK-NEXT: entry:
1154 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1155 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1156 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1157 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1158 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1159 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1160 // CHECK-NEXT: ret void
1162 // CPP-CHECK-LABEL: @_Z15test_svst4q_u16u10__SVBool_tPKt12svuint16x4_t(
1163 // CPP-CHECK-NEXT: entry:
1164 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1165 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1166 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1167 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1168 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1169 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1170 // CPP-CHECK-NEXT: ret void
1172 void test_svst4q_u16(svbool_t pg
, const uint16_t *base
, svuint16x4_t zt
)
1174 SVE_ACLE_FUNC(svst4q
,,_u16
,)(pg
, base
, zt
);
1177 // CHECK-LABEL: @test_svst4q_s16(
1178 // CHECK-NEXT: entry:
1179 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1180 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1181 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1182 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1183 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1184 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1185 // CHECK-NEXT: ret void
1187 // CPP-CHECK-LABEL: @_Z15test_svst4q_s16u10__SVBool_tPKs11svint16x4_t(
1188 // CPP-CHECK-NEXT: entry:
1189 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1190 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1191 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1192 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1193 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1194 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1195 // CPP-CHECK-NEXT: ret void
1197 void test_svst4q_s16(svbool_t pg
, const int16_t *base
, svint16x4_t zt
)
1199 SVE_ACLE_FUNC(svst4q
,,_s16
,)(pg
, base
, zt
);
1202 // CHECK-LABEL: @test_svst4q_u32(
1203 // CHECK-NEXT: entry:
1204 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1205 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1206 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1207 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1208 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1209 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1210 // CHECK-NEXT: ret void
1212 // CPP-CHECK-LABEL: @_Z15test_svst4q_u32u10__SVBool_tPKj12svuint32x4_t(
1213 // CPP-CHECK-NEXT: entry:
1214 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1215 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1216 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1217 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1218 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1219 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1220 // CPP-CHECK-NEXT: ret void
1222 void test_svst4q_u32(svbool_t pg
, const uint32_t *base
, svuint32x4_t zt
)
1224 SVE_ACLE_FUNC(svst4q
,,_u32
,)(pg
, base
, zt
);
1227 // CHECK-LABEL: @test_svst4q_s32(
1228 // CHECK-NEXT: entry:
1229 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1230 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1231 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1232 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1233 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1234 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1235 // CHECK-NEXT: ret void
1237 // CPP-CHECK-LABEL: @_Z15test_svst4q_s32u10__SVBool_tPKi11svint32x4_t(
1238 // CPP-CHECK-NEXT: entry:
1239 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1240 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1241 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1242 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1243 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1244 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1245 // CPP-CHECK-NEXT: ret void
1247 void test_svst4q_s32(svbool_t pg
, const int32_t *base
, svint32x4_t zt
)
1249 SVE_ACLE_FUNC(svst4q
,,_s32
,)(pg
, base
, zt
);
1252 // CHECK-LABEL: @test_svst4q_u64(
1253 // CHECK-NEXT: entry:
1254 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1255 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1256 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1257 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1258 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1259 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1260 // CHECK-NEXT: ret void
1262 // CPP-CHECK-LABEL: @_Z15test_svst4q_u64u10__SVBool_tPKm12svuint64x4_t(
1263 // CPP-CHECK-NEXT: entry:
1264 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1265 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1266 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1267 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1268 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1269 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1270 // CPP-CHECK-NEXT: ret void
1272 void test_svst4q_u64(svbool_t pg
, const uint64_t *base
, svuint64x4_t zt
)
1274 SVE_ACLE_FUNC(svst4q
,,_u64
,)(pg
, base
, zt
);
1277 // CHECK-LABEL: @test_svst4q_s64(
1278 // CHECK-NEXT: entry:
1279 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1280 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1281 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1282 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1283 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1284 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1285 // CHECK-NEXT: ret void
1287 // CPP-CHECK-LABEL: @_Z15test_svst4q_s64u10__SVBool_tPKl11svint64x4_t(
1288 // CPP-CHECK-NEXT: entry:
1289 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1290 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1291 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1292 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1293 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1294 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1295 // CPP-CHECK-NEXT: ret void
1297 void test_svst4q_s64(svbool_t pg
, const int64_t *base
, svint64x4_t zt
)
1299 SVE_ACLE_FUNC(svst4q
,,_s64
,)(pg
, base
, zt
);
1302 // CHECK-LABEL: @test_svst4q_f16(
1303 // CHECK-NEXT: entry:
1304 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT:%.*]], i64 0)
1305 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 8)
1306 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 16)
1307 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 24)
1308 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1309 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1310 // CHECK-NEXT: ret void
1312 // CPP-CHECK-LABEL: @_Z15test_svst4q_f16u10__SVBool_tPKDh13svfloat16x4_t(
1313 // CPP-CHECK-NEXT: entry:
1314 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT:%.*]], i64 0)
1315 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 8)
1316 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 16)
1317 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 24)
1318 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1319 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1320 // CPP-CHECK-NEXT: ret void
1322 void test_svst4q_f16(svbool_t pg
, const float16_t
*base
, svfloat16x4_t zt
)
1324 SVE_ACLE_FUNC(svst4q
,,_f16
,)(pg
, base
, zt
);
1327 // CHECK-LABEL: @test_svst4q_bf16(
1328 // CHECK-NEXT: entry:
1329 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT:%.*]], i64 0)
1330 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 8)
1331 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 16)
1332 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 24)
1333 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1334 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1335 // CHECK-NEXT: ret void
1337 // CPP-CHECK-LABEL: @_Z16test_svst4q_bf16u10__SVBool_tPKu6__bf1614svbfloat16x4_t(
1338 // CPP-CHECK-NEXT: entry:
1339 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT:%.*]], i64 0)
1340 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 8)
1341 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 16)
1342 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 24)
1343 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1344 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[BASE:%.*]])
1345 // CPP-CHECK-NEXT: ret void
1347 void test_svst4q_bf16(svbool_t pg
, const bfloat16_t
*base
, svbfloat16x4_t zt
)
1349 SVE_ACLE_FUNC(svst4q
,,_bf16
,)(pg
, base
, zt
);
1352 // CHECK-LABEL: @test_svst4q_f32(
1353 // CHECK-NEXT: entry:
1354 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT:%.*]], i64 0)
1355 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 4)
1356 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 8)
1357 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 12)
1358 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1359 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1360 // CHECK-NEXT: ret void
1362 // CPP-CHECK-LABEL: @_Z15test_svst4q_f32u10__SVBool_tPKf13svfloat32x4_t(
1363 // CPP-CHECK-NEXT: entry:
1364 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT:%.*]], i64 0)
1365 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 4)
1366 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 8)
1367 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 12)
1368 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1369 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[BASE:%.*]])
1370 // CPP-CHECK-NEXT: ret void
1372 void test_svst4q_f32(svbool_t pg
, const float32_t
*base
, svfloat32x4_t zt
)
1374 SVE_ACLE_FUNC(svst4q
,,_f32
,)(pg
, base
, zt
);
1377 // CHECK-LABEL: @test_svst4q_f64(
1378 // CHECK-NEXT: entry:
1379 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT:%.*]], i64 0)
1380 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 2)
1381 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 4)
1382 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 6)
1383 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1384 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x double> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1385 // CHECK-NEXT: ret void
1387 // CPP-CHECK-LABEL: @_Z15test_svst4q_f64u10__SVBool_tPKd13svfloat64x4_t(
1388 // CPP-CHECK-NEXT: entry:
1389 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT:%.*]], i64 0)
1390 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 2)
1391 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 4)
1392 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 6)
1393 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1394 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x double> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[BASE:%.*]])
1395 // CPP-CHECK-NEXT: ret void
1397 void test_svst4q_f64(svbool_t pg
, const float64_t
*base
, svfloat64x4_t zt
)
1399 SVE_ACLE_FUNC(svst4q
,,_f64
,)(pg
, base
, zt
);
1402 // CHECK-LABEL: @test_svst4q_vnum_u8(
1403 // CHECK-NEXT: entry:
1404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1405 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1406 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1407 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1408 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1409 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP4]])
1410 // CHECK-NEXT: ret void
1412 // CPP-CHECK-LABEL: @_Z19test_svst4q_vnum_u8u10__SVBool_tPKhl11svuint8x4_t(
1413 // CPP-CHECK-NEXT: entry:
1414 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1415 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1416 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1417 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1418 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1419 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP4]])
1420 // CPP-CHECK-NEXT: ret void
1422 void test_svst4q_vnum_u8(svbool_t pg
, const uint8_t *base
, int64_t vnum
, svuint8x4_t zt
)
1424 SVE_ACLE_FUNC(svst4q_vnum_
,,u8
,)(pg
, base
, vnum
, zt
);
1427 // CHECK-LABEL: @test_svst4q_vnum_s8(
1428 // CHECK-NEXT: entry:
1429 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1430 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1431 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1432 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1433 // CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1434 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP4]])
1435 // CHECK-NEXT: ret void
1437 // CPP-CHECK-LABEL: @_Z19test_svst4q_vnum_s8u10__SVBool_tPKal10svint8x4_t(
1438 // CPP-CHECK-NEXT: entry:
1439 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT:%.*]], i64 0)
1440 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 16)
1441 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 32)
1442 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 16 x i8> @llvm.vector.extract.nxv16i8.nxv64i8(<vscale x 64 x i8> [[ZT]], i64 48)
1443 // CPP-CHECK-NEXT: [[TMP4:%.*]] = getelementptr <vscale x 16 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1444 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv16i8(<vscale x 16 x i8> [[TMP0]], <vscale x 16 x i8> [[TMP1]], <vscale x 16 x i8> [[TMP2]], <vscale x 16 x i8> [[TMP3]], <vscale x 16 x i1> [[PG:%.*]], ptr [[TMP4]])
1445 // CPP-CHECK-NEXT: ret void
1447 void test_svst4q_vnum_s8(svbool_t pg
, const int8_t *base
, int64_t vnum
, svint8x4_t zt
)
1449 SVE_ACLE_FUNC(svst4q_vnum
,,_s8
,)(pg
, base
, vnum
, zt
);
1452 // CHECK-LABEL: @test_svst4q_vnum_u16(
1453 // CHECK-NEXT: entry:
1454 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1455 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1456 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1457 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1458 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1459 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1460 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1461 // CHECK-NEXT: ret void
1463 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_u16u10__SVBool_tPKtl12svuint16x4_t(
1464 // CPP-CHECK-NEXT: entry:
1465 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1466 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1467 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1468 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1469 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1470 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1471 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1472 // CPP-CHECK-NEXT: ret void
1474 void test_svst4q_vnum_u16(svbool_t pg
, const uint16_t *base
, int64_t vnum
, svuint16x4_t zt
)
1476 SVE_ACLE_FUNC(svst4q_vnum
,,_u16
,)(pg
, base
, vnum
, zt
);
1479 // CHECK-LABEL: @test_svst4q_vnum_s16(
1480 // CHECK-NEXT: entry:
1481 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1482 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1483 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1484 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1485 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1486 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1487 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1488 // CHECK-NEXT: ret void
1490 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_s16u10__SVBool_tPKsl11svint16x4_t(
1491 // CPP-CHECK-NEXT: entry:
1492 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT:%.*]], i64 0)
1493 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 8)
1494 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 16)
1495 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x i16> @llvm.vector.extract.nxv8i16.nxv32i16(<vscale x 32 x i16> [[ZT]], i64 24)
1496 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1497 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1498 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8i16(<vscale x 8 x i16> [[TMP0]], <vscale x 8 x i16> [[TMP1]], <vscale x 8 x i16> [[TMP2]], <vscale x 8 x i16> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1499 // CPP-CHECK-NEXT: ret void
1501 void test_svst4q_vnum_s16(svbool_t pg
, const int16_t *base
, int64_t vnum
, svint16x4_t zt
)
1503 SVE_ACLE_FUNC(svst4q_vnum
,,_s16
,)(pg
, base
, vnum
, zt
);
1506 // CHECK-LABEL: @test_svst4q_vnum_u32(
1507 // CHECK-NEXT: entry:
1508 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1509 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1510 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1511 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1512 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1513 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1514 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1515 // CHECK-NEXT: ret void
1517 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_u32u10__SVBool_tPKjl12svuint32x4_t(
1518 // CPP-CHECK-NEXT: entry:
1519 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1520 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1521 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1522 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1523 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1524 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1525 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1526 // CPP-CHECK-NEXT: ret void
1528 void test_svst4q_vnum_u32(svbool_t pg
, const uint32_t *base
, int64_t vnum
, svuint32x4_t zt
)
1530 SVE_ACLE_FUNC(svst4q_vnum
,,_u32
,)(pg
, base
, vnum
, zt
);
1533 // CHECK-LABEL: @test_svst4q_vnum_s32(
1534 // CHECK-NEXT: entry:
1535 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1536 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1537 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1538 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1539 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1540 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1541 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1542 // CHECK-NEXT: ret void
1544 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_s32u10__SVBool_tPKil11svint32x4_t(
1545 // CPP-CHECK-NEXT: entry:
1546 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT:%.*]], i64 0)
1547 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 4)
1548 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 8)
1549 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> [[ZT]], i64 12)
1550 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1551 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x i32>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1552 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4i32(<vscale x 4 x i32> [[TMP0]], <vscale x 4 x i32> [[TMP1]], <vscale x 4 x i32> [[TMP2]], <vscale x 4 x i32> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1553 // CPP-CHECK-NEXT: ret void
1555 void test_svst4q_vnum_s32(svbool_t pg
, const int32_t *base
, int64_t vnum
, svint32x4_t zt
)
1557 SVE_ACLE_FUNC(svst4q_vnum
,,_s32
,)(pg
, base
, vnum
, zt
);
1560 // CHECK-LABEL: @test_svst4q_vnum_u64(
1561 // CHECK-NEXT: entry:
1562 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1563 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1564 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1565 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1566 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1567 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1568 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1569 // CHECK-NEXT: ret void
1571 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_u64u10__SVBool_tPKml12svuint64x4_t(
1572 // CPP-CHECK-NEXT: entry:
1573 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1574 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1575 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1576 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1577 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1578 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1579 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1580 // CPP-CHECK-NEXT: ret void
1582 void test_svst4q_vnum_u64(svbool_t pg
, const uint64_t *base
, int64_t vnum
, svuint64x4_t zt
)
1584 SVE_ACLE_FUNC(svst4q_vnum
,,_u64
,)(pg
, base
, vnum
, zt
);
1587 // CHECK-LABEL: @test_svst4q_vnum_s64(
1588 // CHECK-NEXT: entry:
1589 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1590 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1591 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1592 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1593 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1594 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1595 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1596 // CHECK-NEXT: ret void
1598 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_s64u10__SVBool_tPKll11svint64x4_t(
1599 // CPP-CHECK-NEXT: entry:
1600 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT:%.*]], i64 0)
1601 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 2)
1602 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 4)
1603 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x i64> @llvm.vector.extract.nxv2i64.nxv8i64(<vscale x 8 x i64> [[ZT]], i64 6)
1604 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1605 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x i64>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1606 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2i64(<vscale x 2 x i64> [[TMP0]], <vscale x 2 x i64> [[TMP1]], <vscale x 2 x i64> [[TMP2]], <vscale x 2 x i64> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1607 // CPP-CHECK-NEXT: ret void
1609 void test_svst4q_vnum_s64(svbool_t pg
, const int64_t *base
, int64_t vnum
, svint64x4_t zt
)
1611 SVE_ACLE_FUNC(svst4q_vnum
,,_s64
,)(pg
, base
, vnum
, zt
);
1614 // CHECK-LABEL: @test_svst4q_vnum_f16(
1615 // CHECK-NEXT: entry:
1616 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT:%.*]], i64 0)
1617 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 8)
1618 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 16)
1619 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 24)
1620 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1621 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1622 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1623 // CHECK-NEXT: ret void
1625 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_f16u10__SVBool_tPKDhl13svfloat16x4_t(
1626 // CPP-CHECK-NEXT: entry:
1627 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT:%.*]], i64 0)
1628 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 8)
1629 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 16)
1630 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x half> @llvm.vector.extract.nxv8f16.nxv32f16(<vscale x 32 x half> [[ZT]], i64 24)
1631 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1632 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x half>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1633 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8f16(<vscale x 8 x half> [[TMP0]], <vscale x 8 x half> [[TMP1]], <vscale x 8 x half> [[TMP2]], <vscale x 8 x half> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1634 // CPP-CHECK-NEXT: ret void
1636 void test_svst4q_vnum_f16(svbool_t pg
, const float16_t
*base
, int64_t vnum
, svfloat16x4_t zt
)
1638 SVE_ACLE_FUNC(svst4q_vnum
,,_f16
,)(pg
, base
, vnum
, zt
);
1641 // CHECK-LABEL: @test_svst4q_vnum_bf16(
1642 // CHECK-NEXT: entry:
1643 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT:%.*]], i64 0)
1644 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 8)
1645 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 16)
1646 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 24)
1647 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1648 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1649 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1650 // CHECK-NEXT: ret void
1652 // CPP-CHECK-LABEL: @_Z21test_svst4q_vnum_bf16u10__SVBool_tPKu6__bf16l14svbfloat16x4_t(
1653 // CPP-CHECK-NEXT: entry:
1654 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT:%.*]], i64 0)
1655 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 8)
1656 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 16)
1657 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 8 x bfloat> @llvm.vector.extract.nxv8bf16.nxv32bf16(<vscale x 32 x bfloat> [[ZT]], i64 24)
1658 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
1659 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 8 x bfloat>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1660 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv8bf16(<vscale x 8 x bfloat> [[TMP0]], <vscale x 8 x bfloat> [[TMP1]], <vscale x 8 x bfloat> [[TMP2]], <vscale x 8 x bfloat> [[TMP3]], <vscale x 8 x i1> [[TMP4]], ptr [[TMP5]])
1661 // CPP-CHECK-NEXT: ret void
1663 void test_svst4q_vnum_bf16(svbool_t pg
, const bfloat16_t
*base
, int64_t vnum
, svbfloat16x4_t zt
)
1665 SVE_ACLE_FUNC(svst4q_vnum
,,_bf16
,)(pg
, base
, vnum
, zt
);
1668 // CHECK-LABEL: @test_svst4q_vnum_f32(
1669 // CHECK-NEXT: entry:
1670 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT:%.*]], i64 0)
1671 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 4)
1672 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 8)
1673 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 12)
1674 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1675 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1676 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1677 // CHECK-NEXT: ret void
1679 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_f32u10__SVBool_tPKfl13svfloat32x4_t(
1680 // CPP-CHECK-NEXT: entry:
1681 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT:%.*]], i64 0)
1682 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 4)
1683 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 8)
1684 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 4 x float> @llvm.vector.extract.nxv4f32.nxv16f32(<vscale x 16 x float> [[ZT]], i64 12)
1685 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
1686 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 4 x float>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1687 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv4f32(<vscale x 4 x float> [[TMP0]], <vscale x 4 x float> [[TMP1]], <vscale x 4 x float> [[TMP2]], <vscale x 4 x float> [[TMP3]], <vscale x 4 x i1> [[TMP4]], ptr [[TMP5]])
1688 // CPP-CHECK-NEXT: ret void
1690 void test_svst4q_vnum_f32(svbool_t pg
, const float32_t
*base
, int64_t vnum
, svfloat32x4_t zt
)
1692 SVE_ACLE_FUNC(svst4q_vnum
,,_f32
,)(pg
, base
, vnum
, zt
);
1695 // CHECK-LABEL: @test_svst4q_vnum_f64(
1696 // CHECK-NEXT: entry:
1697 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT:%.*]], i64 0)
1698 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 2)
1699 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 4)
1700 // CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 6)
1701 // CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1702 // CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1703 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x double> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1704 // CHECK-NEXT: ret void
1706 // CPP-CHECK-LABEL: @_Z20test_svst4q_vnum_f64u10__SVBool_tPKdl13svfloat64x4_t(
1707 // CPP-CHECK-NEXT: entry:
1708 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT:%.*]], i64 0)
1709 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 2)
1710 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 4)
1711 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call <vscale x 2 x double> @llvm.vector.extract.nxv2f64.nxv8f64(<vscale x 8 x double> [[ZT]], i64 6)
1712 // CPP-CHECK-NEXT: [[TMP4:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
1713 // CPP-CHECK-NEXT: [[TMP5:%.*]] = getelementptr <vscale x 2 x double>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
1714 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st4q.nxv2f64(<vscale x 2 x double> [[TMP0]], <vscale x 2 x double> [[TMP1]], <vscale x 2 x double> [[TMP2]], <vscale x 2 x double> [[TMP3]], <vscale x 2 x i1> [[TMP4]], ptr [[TMP5]])
1715 // CPP-CHECK-NEXT: ret void
1717 void test_svst4q_vnum_f64(svbool_t pg
, const float64_t
*base
, int64_t vnum
, svfloat64x4_t zt
)
1719 SVE_ACLE_FUNC(svst4q_vnum
,,_f64
,)(pg
, base
, vnum
, zt
);
1722 // Scatter for 128 bits
1723 // vector base + scalar offset
1724 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_u64(
1725 // CHECK-NEXT: entry:
1726 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1727 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1728 // CHECK-NEXT: ret void
1730 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_u64u10__SVBool_tu12__SVUint64_tlS0_(
1731 // CPP-CHECK-NEXT: entry:
1732 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1733 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1734 // CPP-CHECK-NEXT: ret void
1736 void test_svst1q_scatter_u64base_offset_u64(svbool_t pg
, svuint64_t base
, int64_t offset
, svuint64_t data
)
1738 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _u64
)(pg
, base
, offset
, data
);
1741 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_s64(
1742 // CHECK-NEXT: entry:
1743 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1744 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1745 // CHECK-NEXT: ret void
1747 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_s64u10__SVBool_tu12__SVUint64_tlu11__SVInt64_t(
1748 // CPP-CHECK-NEXT: entry:
1749 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1750 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1751 // CPP-CHECK-NEXT: ret void
1753 void test_svst1q_scatter_u64base_offset_s64(svbool_t pg
, svuint64_t base
, int64_t offset
, svint64_t data
)
1755 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _s64
)(pg
, base
, offset
, data
);
1759 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_u32(
1760 // CHECK-NEXT: entry:
1761 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1762 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1763 // CHECK-NEXT: ret void
1765 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_u32u10__SVBool_tu12__SVUint64_tlu12__SVUint32_t(
1766 // CPP-CHECK-NEXT: entry:
1767 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1768 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1769 // CPP-CHECK-NEXT: ret void
1771 void test_svst1q_scatter_u64base_offset_u32(svbool_t pg
, svuint64_t base
, int64_t offset
, svuint32_t data
)
1773 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _u32
)(pg
, base
, offset
, data
);
1776 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_s32(
1777 // CHECK-NEXT: entry:
1778 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1779 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1780 // CHECK-NEXT: ret void
1782 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_s32u10__SVBool_tu12__SVUint64_tlu11__SVInt32_t(
1783 // CPP-CHECK-NEXT: entry:
1784 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1785 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1786 // CPP-CHECK-NEXT: ret void
1788 void test_svst1q_scatter_u64base_offset_s32(svbool_t pg
, svuint64_t base
, int64_t offset
, svint32_t data
)
1790 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _s32
)(pg
, base
, offset
, data
);
1793 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_u16(
1794 // CHECK-NEXT: entry:
1795 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1796 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1797 // CHECK-NEXT: ret void
1799 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_u16u10__SVBool_tu12__SVUint64_tlu12__SVUint16_t(
1800 // CPP-CHECK-NEXT: entry:
1801 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1802 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1803 // CPP-CHECK-NEXT: ret void
1805 void test_svst1q_scatter_u64base_offset_u16(svbool_t pg
, svuint64_t base
, int64_t offset
, svuint16_t data
)
1807 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _u16
)(pg
, base
, offset
, data
);
1810 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_s16(
1811 // CHECK-NEXT: entry:
1812 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1813 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1814 // CHECK-NEXT: ret void
1816 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_s16u10__SVBool_tu12__SVUint64_tlu11__SVInt16_t(
1817 // CPP-CHECK-NEXT: entry:
1818 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1819 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1820 // CPP-CHECK-NEXT: ret void
1822 void test_svst1q_scatter_u64base_offset_s16(svbool_t pg
, svuint64_t base
, int64_t offset
, svint16_t data
)
1824 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _s16
)(pg
, base
, offset
, data
);
1827 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_u8(
1828 // CHECK-NEXT: entry:
1829 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1830 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1831 // CHECK-NEXT: ret void
1833 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_offset_u8u10__SVBool_tu12__SVUint64_tlu11__SVUint8_t(
1834 // CPP-CHECK-NEXT: entry:
1835 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1836 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1837 // CPP-CHECK-NEXT: ret void
1839 void test_svst1q_scatter_u64base_offset_u8(svbool_t pg
, svuint64_t base
, int64_t offset
, svuint8_t data
)
1841 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _u8
)(pg
, base
, offset
, data
);
1844 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_s8(
1845 // CHECK-NEXT: entry:
1846 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1847 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1848 // CHECK-NEXT: ret void
1850 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_offset_s8u10__SVBool_tu12__SVUint64_tlu10__SVInt8_t(
1851 // CPP-CHECK-NEXT: entry:
1852 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1853 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1854 // CPP-CHECK-NEXT: ret void
1856 void test_svst1q_scatter_u64base_offset_s8(svbool_t pg
, svuint64_t base
, int64_t offset
, svint8_t data
)
1858 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _s8
)(pg
, base
, offset
, data
);
1861 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_f64(
1862 // CHECK-NEXT: entry:
1863 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1864 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1865 // CHECK-NEXT: ret void
1867 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_f64u10__SVBool_tu12__SVUint64_tlu13__SVFloat64_t(
1868 // CPP-CHECK-NEXT: entry:
1869 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1870 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1871 // CPP-CHECK-NEXT: ret void
1873 void test_svst1q_scatter_u64base_offset_f64(svbool_t pg
, svuint64_t base
, int64_t offset
, svfloat64_t data
)
1875 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _f64
)(pg
, base
, offset
, data
);
1878 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_f32(
1879 // CHECK-NEXT: entry:
1880 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1881 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1882 // CHECK-NEXT: ret void
1884 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_f32u10__SVBool_tu12__SVUint64_tlu13__SVFloat32_t(
1885 // CPP-CHECK-NEXT: entry:
1886 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1887 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1888 // CPP-CHECK-NEXT: ret void
1890 void test_svst1q_scatter_u64base_offset_f32(svbool_t pg
, svuint64_t base
, int64_t offset
, svfloat32_t data
)
1892 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _f32
)(pg
, base
, offset
, data
);
1895 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_f16(
1896 // CHECK-NEXT: entry:
1897 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1898 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1899 // CHECK-NEXT: ret void
1901 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_offset_f16u10__SVBool_tu12__SVUint64_tlu13__SVFloat16_t(
1902 // CPP-CHECK-NEXT: entry:
1903 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1904 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1905 // CPP-CHECK-NEXT: ret void
1907 void test_svst1q_scatter_u64base_offset_f16(svbool_t pg
, svuint64_t base
, int64_t offset
, svfloat16_t data
)
1909 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _f16
)(pg
, base
, offset
, data
);
1912 // CHECK-LABEL: @test_svst1q_scatter_u64base_offset_bf16(
1913 // CHECK-NEXT: entry:
1914 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1915 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1916 // CHECK-NEXT: ret void
1918 // CPP-CHECK-LABEL: @_Z39test_svst1q_scatter_u64base_offset_bf16u10__SVBool_tu12__SVUint64_tlu14__SVBfloat16_t(
1919 // CPP-CHECK-NEXT: entry:
1920 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1921 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[OFFSET:%.*]])
1922 // CPP-CHECK-NEXT: ret void
1924 void test_svst1q_scatter_u64base_offset_bf16(svbool_t pg
, svuint64_t base
, int64_t offset
, svbfloat16_t data
)
1926 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
, _offset
, _bf16
)(pg
, base
, offset
, data
);
1929 // Vector Base and no Offset
1930 // CHECK-LABEL: @test_svst1q_scatter_u64base_u64(
1931 // CHECK-NEXT: entry:
1932 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1933 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1934 // CHECK-NEXT: ret void
1936 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_u64u10__SVBool_tu12__SVUint64_tS0_(
1937 // CPP-CHECK-NEXT: entry:
1938 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1939 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1940 // CPP-CHECK-NEXT: ret void
1942 void test_svst1q_scatter_u64base_u64(svbool_t pg
, svuint64_t base
, svuint64_t data
)
1944 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _u64
)(pg
, base
, data
);
1947 // CHECK-LABEL: @test_svst1q_scatter_u64base_s64(
1948 // CHECK-NEXT: entry:
1949 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1950 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1951 // CHECK-NEXT: ret void
1953 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_s64u10__SVBool_tu12__SVUint64_tu11__SVInt64_t(
1954 // CPP-CHECK-NEXT: entry:
1955 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1956 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1957 // CPP-CHECK-NEXT: ret void
1959 void test_svst1q_scatter_u64base_s64(svbool_t pg
, svuint64_t base
, svint64_t data
)
1961 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _s64
)(pg
, base
, data
);
1964 // CHECK-LABEL: @test_svst1q_scatter_u64base_u32(
1965 // CHECK-NEXT: entry:
1966 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1967 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1968 // CHECK-NEXT: ret void
1970 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_u32u10__SVBool_tu12__SVUint64_tu12__SVUint32_t(
1971 // CPP-CHECK-NEXT: entry:
1972 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1973 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1974 // CPP-CHECK-NEXT: ret void
1976 void test_svst1q_scatter_u64base_u32(svbool_t pg
, svuint64_t base
, svuint32_t data
)
1978 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _u32
)(pg
, base
, data
);
1981 // CHECK-LABEL: @test_svst1q_scatter_u64base_s32(
1982 // CHECK-NEXT: entry:
1983 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1984 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1985 // CHECK-NEXT: ret void
1987 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_s32u10__SVBool_tu12__SVUint64_tu11__SVInt32_t(
1988 // CPP-CHECK-NEXT: entry:
1989 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
1990 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
1991 // CPP-CHECK-NEXT: ret void
1993 void test_svst1q_scatter_u64base_s32(svbool_t pg
, svuint64_t base
, svint32_t data
)
1995 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _s32
)(pg
, base
, data
);
1998 // CHECK-LABEL: @test_svst1q_scatter_u64base_u16(
1999 // CHECK-NEXT: entry:
2000 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2001 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2002 // CHECK-NEXT: ret void
2004 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_u16u10__SVBool_tu12__SVUint64_tu12__SVUint16_t(
2005 // CPP-CHECK-NEXT: entry:
2006 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2007 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2008 // CPP-CHECK-NEXT: ret void
2010 void test_svst1q_scatter_u64base_u16(svbool_t pg
, svuint64_t base
, svuint16_t data
)
2012 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _u16
)(pg
, base
, data
);
2015 // CHECK-LABEL: @test_svst1q_scatter_u64base_s16(
2016 // CHECK-NEXT: entry:
2017 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2018 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2019 // CHECK-NEXT: ret void
2021 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_s16u10__SVBool_tu12__SVUint64_tu11__SVInt16_t(
2022 // CPP-CHECK-NEXT: entry:
2023 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2024 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2025 // CPP-CHECK-NEXT: ret void
2027 void test_svst1q_scatter_u64base_s16(svbool_t pg
, svuint64_t base
, svint16_t data
)
2029 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _s16
)(pg
, base
, data
);
2032 // CHECK-LABEL: @test_svst1q_scatter_u64base_u8(
2033 // CHECK-NEXT: entry:
2034 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2035 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2036 // CHECK-NEXT: ret void
2038 // CPP-CHECK-LABEL: @_Z30test_svst1q_scatter_u64base_u8u10__SVBool_tu12__SVUint64_tu11__SVUint8_t(
2039 // CPP-CHECK-NEXT: entry:
2040 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2041 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2042 // CPP-CHECK-NEXT: ret void
2044 void test_svst1q_scatter_u64base_u8(svbool_t pg
, svuint64_t base
, svuint8_t data
)
2046 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _u8
)(pg
, base
, data
);
2049 // CHECK-LABEL: @test_svst1q_scatter_u64base_s8(
2050 // CHECK-NEXT: entry:
2051 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2052 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2053 // CHECK-NEXT: ret void
2055 // CPP-CHECK-LABEL: @_Z30test_svst1q_scatter_u64base_s8u10__SVBool_tu12__SVUint64_tu10__SVInt8_t(
2056 // CPP-CHECK-NEXT: entry:
2057 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2058 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv16i8.nxv2i64(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2059 // CPP-CHECK-NEXT: ret void
2061 void test_svst1q_scatter_u64base_s8(svbool_t pg
, svuint64_t base
, svint8_t data
)
2063 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _s8
)(pg
, base
, data
);
2066 // CHECK-LABEL: @test_svst1q_scatter_u64base_f64(
2067 // CHECK-NEXT: entry:
2068 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2069 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2070 // CHECK-NEXT: ret void
2072 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_f64u10__SVBool_tu12__SVUint64_tu13__SVFloat64_t(
2073 // CPP-CHECK-NEXT: entry:
2074 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2075 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2076 // CPP-CHECK-NEXT: ret void
2078 void test_svst1q_scatter_u64base_f64(svbool_t pg
, svuint64_t base
, svfloat64_t data
)
2080 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _f64
)(pg
, base
, data
);
2083 // CHECK-LABEL: @test_svst1q_scatter_u64base_f32(
2084 // CHECK-NEXT: entry:
2085 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2086 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2087 // CHECK-NEXT: ret void
2089 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_f32u10__SVBool_tu12__SVUint64_tu13__SVFloat32_t(
2090 // CPP-CHECK-NEXT: entry:
2091 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2092 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2093 // CPP-CHECK-NEXT: ret void
2095 void test_svst1q_scatter_u64base_f32(svbool_t pg
, svuint64_t base
, svfloat32_t data
)
2097 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _f32
)(pg
, base
, data
);
2100 // CHECK-LABEL: @test_svst1q_scatter_u64base_f16(
2101 // CHECK-NEXT: entry:
2102 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2103 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2104 // CHECK-NEXT: ret void
2106 // CPP-CHECK-LABEL: @_Z31test_svst1q_scatter_u64base_f16u10__SVBool_tu12__SVUint64_tu13__SVFloat16_t(
2107 // CPP-CHECK-NEXT: entry:
2108 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2109 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2110 // CPP-CHECK-NEXT: ret void
2112 void test_svst1q_scatter_u64base_f16(svbool_t pg
, svuint64_t base
, svfloat16_t data
)
2114 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,, _f16
)(pg
, base
, data
);
2117 // CHECK-LABEL: @test_svst1q_scatter_u64base_bf16(
2118 // CHECK-NEXT: entry:
2119 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2120 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2121 // CHECK-NEXT: ret void
2123 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64base_bf16u10__SVBool_tu12__SVUint64_tu14__SVBfloat16_t(
2124 // CPP-CHECK-NEXT: entry:
2125 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2126 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 0)
2127 // CPP-CHECK-NEXT: ret void
2129 void test_svst1q_scatter_u64base_bf16(svbool_t pg
, svuint64_t base
, svbfloat16_t data
)
2131 SVE_ACLE_FUNC(svst1q_scatter
, _u64base
,,_bf16
)(pg
, base
, data
);
2134 // CHECK-LABEL: @test_svst1q_scatter_u64index_s16(
2135 // CHECK-NEXT: entry:
2136 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2137 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2138 // CHECK-NEXT: ret void
2140 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_s16u10__SVBool_tPsu12__SVUint64_tu11__SVInt16_t(
2141 // CPP-CHECK-NEXT: entry:
2142 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2143 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2144 // CPP-CHECK-NEXT: ret void
2146 void test_svst1q_scatter_u64index_s16(svbool_t pg
, int16_t *base
, svuint64_t idx
, svint16_t data
) {
2147 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _s16
)(pg
, base
, idx
, data
);
2150 // CHECK-LABEL: @test_svst1q_scatter_u64index_u16(
2151 // CHECK-NEXT: entry:
2152 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2153 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2154 // CHECK-NEXT: ret void
2156 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_u16u10__SVBool_tPtu12__SVUint64_tu12__SVUint16_t(
2157 // CPP-CHECK-NEXT: entry:
2158 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2159 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2160 // CPP-CHECK-NEXT: ret void
2162 void test_svst1q_scatter_u64index_u16(svbool_t pg
, uint16_t *base
, svuint64_t idx
, svuint16_t data
) {
2163 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _u16
)(pg
, base
, idx
, data
);
2166 // CHECK-LABEL: @test_svst1q_scatter_u64index_s32(
2167 // CHECK-NEXT: entry:
2168 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2169 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2170 // CHECK-NEXT: ret void
2172 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_s32u10__SVBool_tPiu12__SVUint64_tu11__SVInt32_t(
2173 // CPP-CHECK-NEXT: entry:
2174 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2175 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2176 // CPP-CHECK-NEXT: ret void
2178 void test_svst1q_scatter_u64index_s32(svbool_t pg
, int32_t *base
, svuint64_t idx
, svint32_t data
) {
2179 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _s32
)(pg
, base
, idx
, data
);
2182 // CHECK-LABEL: @test_svst1q_scatter_u64index_u32(
2183 // CHECK-NEXT: entry:
2184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2185 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2186 // CHECK-NEXT: ret void
2188 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_u32u10__SVBool_tPju12__SVUint64_tu12__SVUint32_t(
2189 // CPP-CHECK-NEXT: entry:
2190 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2191 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2192 // CPP-CHECK-NEXT: ret void
2194 void test_svst1q_scatter_u64index_u32(svbool_t pg
, uint32_t *base
, svuint64_t idx
, svuint32_t data
) {
2195 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _u32
)(pg
, base
, idx
, data
);
2198 // CHECK-LABEL: @test_svst1q_scatter_u64index_s64(
2199 // CHECK-NEXT: entry:
2200 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2201 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2202 // CHECK-NEXT: ret void
2204 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_s64u10__SVBool_tPlu12__SVUint64_tu11__SVInt64_t(
2205 // CPP-CHECK-NEXT: entry:
2206 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2207 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2208 // CPP-CHECK-NEXT: ret void
2210 void test_svst1q_scatter_u64index_s64(svbool_t pg
, int64_t *base
, svuint64_t idx
, svint64_t data
) {
2211 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _s64
)(pg
, base
, idx
, data
);
2214 // CHECK-LABEL: @test_svst1q_scatter_u64index_u64(
2215 // CHECK-NEXT: entry:
2216 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2217 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2218 // CHECK-NEXT: ret void
2220 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_u64u10__SVBool_tPmu12__SVUint64_tS1_(
2221 // CPP-CHECK-NEXT: entry:
2222 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2223 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2224 // CPP-CHECK-NEXT: ret void
2226 void test_svst1q_scatter_u64index_u64(svbool_t pg
, uint64_t *base
, svuint64_t idx
, svuint64_t data
) {
2227 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _u64
)(pg
, base
, idx
, data
);
2230 // CHECK-LABEL: @test_svst1q_scatter_u64index_bf16(
2231 // CHECK-NEXT: entry:
2232 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2233 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8bf16(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2234 // CHECK-NEXT: ret void
2236 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64index_bf16u10__SVBool_tPu6__bf16u12__SVUint64_tu14__SVBfloat16_t(
2237 // CPP-CHECK-NEXT: entry:
2238 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2239 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8bf16(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2240 // CPP-CHECK-NEXT: ret void
2242 void test_svst1q_scatter_u64index_bf16(svbool_t pg
, bfloat16_t
*base
, svuint64_t idx
, svbfloat16_t data
) {
2243 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _bf16
)(pg
, base
, idx
, data
);
2246 // CHECK-LABEL: @test_svst1q_scatter_u64index_f16(
2247 // CHECK-NEXT: entry:
2248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2249 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2250 // CHECK-NEXT: ret void
2252 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_f16u10__SVBool_tPDhu12__SVUint64_tu13__SVFloat16_t(
2253 // CPP-CHECK-NEXT: entry:
2254 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2255 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2256 // CPP-CHECK-NEXT: ret void
2258 void test_svst1q_scatter_u64index_f16(svbool_t pg
, float16_t
*base
, svuint64_t idx
, svfloat16_t data
) {
2259 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _f16
)(pg
, base
, idx
, data
);
2262 // CHECK-LABEL: @test_svst1q_scatter_u64index_f32(
2263 // CHECK-NEXT: entry:
2264 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2265 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2266 // CHECK-NEXT: ret void
2268 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_f32u10__SVBool_tPfu12__SVUint64_tu13__SVFloat32_t(
2269 // CPP-CHECK-NEXT: entry:
2270 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2271 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2272 // CPP-CHECK-NEXT: ret void
2274 void test_svst1q_scatter_u64index_f32(svbool_t pg
, float32_t
*base
, svuint64_t idx
, svfloat32_t data
) {
2275 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _f32
)(pg
, base
, idx
, data
);
2278 // CHECK-LABEL: @test_svst1q_scatter_u64index_f64(
2279 // CHECK-NEXT: entry:
2280 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2281 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2282 // CHECK-NEXT: ret void
2284 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64index_f64u10__SVBool_tPdu12__SVUint64_tu13__SVFloat64_t(
2285 // CPP-CHECK-NEXT: entry:
2286 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2287 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.index.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[IDX:%.*]])
2288 // CPP-CHECK-NEXT: ret void
2290 void test_svst1q_scatter_u64index_f64(svbool_t pg
, float64_t
*base
, svuint64_t idx
, svfloat64_t data
) {
2291 SVE_ACLE_FUNC(svst1q_scatter_
, u64
, index
, _f64
)(pg
, base
, idx
, data
);
2294 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_s16(
2295 // CHECK-NEXT: entry:
2296 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2297 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2298 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2299 // CHECK-NEXT: ret void
2301 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_s16u10__SVBool_tu12__SVUint64_tlu11__SVInt16_t(
2302 // CPP-CHECK-NEXT: entry:
2303 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2304 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2305 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2306 // CPP-CHECK-NEXT: ret void
2308 void test_svst1q_scatter_u64base_index_s16(svbool_t pg
, svuint64_t base
, int64_t idx
, svint16_t data
) {
2309 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_s16
)(pg
, base
, idx
, data
);
2312 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_u16(
2313 // CHECK-NEXT: entry:
2314 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2315 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2316 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2317 // CHECK-NEXT: ret void
2319 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_u16u10__SVBool_tu12__SVUint64_tlu12__SVUint16_t(
2320 // CPP-CHECK-NEXT: entry:
2321 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2322 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2323 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8i16.nxv2i64(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2324 // CPP-CHECK-NEXT: ret void
2326 void test_svst1q_scatter_u64base_index_u16(svbool_t pg
, svuint64_t base
, int64_t idx
, svuint16_t data
) {
2327 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_u16
)(pg
, base
, idx
, data
);
2330 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_s32(
2331 // CHECK-NEXT: entry:
2332 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2333 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2334 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2335 // CHECK-NEXT: ret void
2337 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_s32u10__SVBool_tu12__SVUint64_tlu11__SVInt32_t(
2338 // CPP-CHECK-NEXT: entry:
2339 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2340 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2341 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2342 // CPP-CHECK-NEXT: ret void
2344 void test_svst1q_scatter_u64base_index_s32(svbool_t pg
, svuint64_t base
, int64_t idx
, svint32_t data
) {
2345 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_s32
)(pg
, base
, idx
, data
);
2348 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_u32(
2349 // CHECK-NEXT: entry:
2350 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2351 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2352 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2353 // CHECK-NEXT: ret void
2355 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_u32u10__SVBool_tu12__SVUint64_tlu12__SVUint32_t(
2356 // CPP-CHECK-NEXT: entry:
2357 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2358 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2359 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4i32.nxv2i64(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2360 // CPP-CHECK-NEXT: ret void
2362 void test_svst1q_scatter_u64base_index_u32(svbool_t pg
, svuint64_t base
, int64_t idx
, svuint32_t data
) {
2363 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_u32
)(pg
, base
, idx
, data
);
2366 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_s64(
2367 // CHECK-NEXT: entry:
2368 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2369 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2370 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2371 // CHECK-NEXT: ret void
2373 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_s64u10__SVBool_tu12__SVUint64_tlu11__SVInt64_t(
2374 // CPP-CHECK-NEXT: entry:
2375 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2376 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2377 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2378 // CPP-CHECK-NEXT: ret void
2380 void test_svst1q_scatter_u64base_index_s64(svbool_t pg
, svuint64_t base
, int64_t idx
, svint64_t data
) {
2381 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_s64
)(pg
, base
, idx
, data
);
2384 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_u64(
2385 // CHECK-NEXT: entry:
2386 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2387 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2388 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2389 // CHECK-NEXT: ret void
2391 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_u64u10__SVBool_tu12__SVUint64_tlS0_(
2392 // CPP-CHECK-NEXT: entry:
2393 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2394 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2395 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2396 // CPP-CHECK-NEXT: ret void
2398 void test_svst1q_scatter_u64base_index_u64(svbool_t pg
, svuint64_t base
, int64_t idx
, svuint64_t data
) {
2399 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_u64
)(pg
, base
, idx
, data
);
2402 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_bf16(
2403 // CHECK-NEXT: entry:
2404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2405 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2406 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2407 // CHECK-NEXT: ret void
2409 // CPP-CHECK-LABEL: @_Z38test_svst1q_scatter_u64base_index_bf16u10__SVBool_tu12__SVUint64_tlu14__SVBfloat16_t(
2410 // CPP-CHECK-NEXT: entry:
2411 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2412 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2413 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8bf16.nxv2i64(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2414 // CPP-CHECK-NEXT: ret void
2416 void test_svst1q_scatter_u64base_index_bf16(svbool_t pg
, svuint64_t base
, int64_t idx
, svbfloat16_t data
) {
2417 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_bf16
)(pg
, base
, idx
, data
);
2420 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_f16(
2421 // CHECK-NEXT: entry:
2422 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2423 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2424 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2425 // CHECK-NEXT: ret void
2427 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_f16u10__SVBool_tu12__SVUint64_tlu13__SVFloat16_t(
2428 // CPP-CHECK-NEXT: entry:
2429 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2430 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 1
2431 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv8f16.nxv2i64(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2432 // CPP-CHECK-NEXT: ret void
2434 void test_svst1q_scatter_u64base_index_f16(svbool_t pg
, svuint64_t base
, int64_t idx
, svfloat16_t data
) {
2435 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_f16
)(pg
, base
, idx
, data
);
2438 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_f32(
2439 // CHECK-NEXT: entry:
2440 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2441 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2442 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2443 // CHECK-NEXT: ret void
2445 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_f32u10__SVBool_tu12__SVUint64_tlu13__SVFloat32_t(
2446 // CPP-CHECK-NEXT: entry:
2447 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2448 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 2
2449 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv4f32.nxv2i64(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2450 // CPP-CHECK-NEXT: ret void
2452 void test_svst1q_scatter_u64base_index_f32(svbool_t pg
, svuint64_t base
, int64_t idx
, svfloat32_t data
) {
2453 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_f32
)(pg
, base
, idx
, data
);
2456 // CHECK-LABEL: @test_svst1q_scatter_u64base_index_f64(
2457 // CHECK-NEXT: entry:
2458 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2459 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2460 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2461 // CHECK-NEXT: ret void
2463 // CPP-CHECK-LABEL: @_Z37test_svst1q_scatter_u64base_index_f64u10__SVBool_tu12__SVUint64_tlu13__SVFloat64_t(
2464 // CPP-CHECK-NEXT: entry:
2465 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2466 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[IDX:%.*]], 3
2467 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], <vscale x 2 x i64> [[BASE:%.*]], i64 [[TMP1]])
2468 // CPP-CHECK-NEXT: ret void
2470 void test_svst1q_scatter_u64base_index_f64(svbool_t pg
, svuint64_t base
, int64_t idx
, svfloat64_t data
) {
2471 SVE_ACLE_FUNC(svst1q_scatter
,_u64base
,_index
,_f64
)(pg
, base
, idx
, data
);
2474 // CHECK-LABEL: @test_svst1q_scatter_u64offset_s8(
2475 // CHECK-NEXT: entry:
2476 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2477 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2478 // CHECK-NEXT: ret void
2480 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64offset_s8u10__SVBool_tPau12__SVUint64_tu10__SVInt8_t(
2481 // CPP-CHECK-NEXT: entry:
2482 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2483 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2484 // CPP-CHECK-NEXT: ret void
2486 void test_svst1q_scatter_u64offset_s8(svbool_t pg
, int8_t *base
, svuint64_t off
, svint8_t data
) {
2487 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_s8
)(pg
, base
, off
, data
);
2490 // CHECK-LABEL: @test_svst1q_scatter_u64offset_u8(
2491 // CHECK-NEXT: entry:
2492 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2493 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2494 // CHECK-NEXT: ret void
2496 // CPP-CHECK-LABEL: @_Z32test_svst1q_scatter_u64offset_u8u10__SVBool_tPhu12__SVUint64_tu11__SVUint8_t(
2497 // CPP-CHECK-NEXT: entry:
2498 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2499 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv16i8(<vscale x 16 x i8> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2500 // CPP-CHECK-NEXT: ret void
2502 void test_svst1q_scatter_u64offset_u8(svbool_t pg
, uint8_t *base
, svuint64_t off
, svuint8_t data
) {
2503 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_u8
)(pg
, base
, off
, data
);
2506 // CHECK-LABEL: @test_svst1q_scatter_u64offset_s16(
2507 // CHECK-NEXT: entry:
2508 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2509 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2510 // CHECK-NEXT: ret void
2512 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_s16u10__SVBool_tPsu12__SVUint64_tu11__SVInt16_t(
2513 // CPP-CHECK-NEXT: entry:
2514 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2515 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2516 // CPP-CHECK-NEXT: ret void
2518 void test_svst1q_scatter_u64offset_s16(svbool_t pg
, int16_t *base
, svuint64_t off
, svint16_t data
) {
2519 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_s16
)(pg
, base
, off
, data
);
2522 // CHECK-LABEL: @test_svst1q_scatter_u64offset_u16(
2523 // CHECK-NEXT: entry:
2524 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2525 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2526 // CHECK-NEXT: ret void
2528 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_u16u10__SVBool_tPtu12__SVUint64_tu12__SVUint16_t(
2529 // CPP-CHECK-NEXT: entry:
2530 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2531 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8i16(<vscale x 8 x i16> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2532 // CPP-CHECK-NEXT: ret void
2534 void test_svst1q_scatter_u64offset_u16(svbool_t pg
, uint16_t *base
, svuint64_t off
, svuint16_t data
) {
2535 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_u16
)(pg
, base
, off
, data
);
2538 // CHECK-LABEL: @test_svst1q_scatter_u64offset_s32(
2539 // CHECK-NEXT: entry:
2540 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2541 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2542 // CHECK-NEXT: ret void
2544 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_s32u10__SVBool_tPiu12__SVUint64_tu11__SVInt32_t(
2545 // CPP-CHECK-NEXT: entry:
2546 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2547 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2548 // CPP-CHECK-NEXT: ret void
2550 void test_svst1q_scatter_u64offset_s32(svbool_t pg
, int32_t *base
, svuint64_t off
, svint32_t data
) {
2551 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_s32
)(pg
, base
, off
, data
);
2554 // CHECK-LABEL: @test_svst1q_scatter_u64offset_u32(
2555 // CHECK-NEXT: entry:
2556 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2557 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2558 // CHECK-NEXT: ret void
2560 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_u32u10__SVBool_tPju12__SVUint64_tu12__SVUint32_t(
2561 // CPP-CHECK-NEXT: entry:
2562 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2563 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4i32(<vscale x 4 x i32> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2564 // CPP-CHECK-NEXT: ret void
2566 void test_svst1q_scatter_u64offset_u32(svbool_t pg
, uint32_t *base
, svuint64_t off
, svuint32_t data
) {
2567 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_u32
)(pg
, base
, off
, data
);
2570 // CHECK-LABEL: @test_svst1q_scatter_u64offset_s64(
2571 // CHECK-NEXT: entry:
2572 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2573 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2574 // CHECK-NEXT: ret void
2576 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_s64u10__SVBool_tPlu12__SVUint64_tu11__SVInt64_t(
2577 // CPP-CHECK-NEXT: entry:
2578 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2579 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2580 // CPP-CHECK-NEXT: ret void
2582 void test_svst1q_scatter_u64offset_s64(svbool_t pg
, int64_t *base
, svuint64_t off
, svint64_t data
) {
2583 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_s64
)(pg
, base
, off
, data
);
2586 // CHECK-LABEL: @test_svst1q_scatter_u64offset_u64(
2587 // CHECK-NEXT: entry:
2588 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2589 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2590 // CHECK-NEXT: ret void
2592 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_u64u10__SVBool_tPmu12__SVUint64_tS1_(
2593 // CPP-CHECK-NEXT: entry:
2594 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2595 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2i64(<vscale x 2 x i64> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2596 // CPP-CHECK-NEXT: ret void
2598 void test_svst1q_scatter_u64offset_u64(svbool_t pg
, uint64_t *base
, svuint64_t off
, svuint64_t data
) {
2599 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_u64
)(pg
, base
, off
, data
);
2602 // CHECK-LABEL: @test_svst1q_scatter_u64offset_bf16(
2603 // CHECK-NEXT: entry:
2604 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2605 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8bf16(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2606 // CHECK-NEXT: ret void
2608 // CPP-CHECK-LABEL: @_Z34test_svst1q_scatter_u64offset_bf16u10__SVBool_tPu6__bf16u12__SVUint64_tu14__SVBfloat16_t(
2609 // CPP-CHECK-NEXT: entry:
2610 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2611 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8bf16(<vscale x 8 x bfloat> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2612 // CPP-CHECK-NEXT: ret void
2614 void test_svst1q_scatter_u64offset_bf16(svbool_t pg
, bfloat16_t
*base
, svuint64_t off
, svbfloat16_t data
) {
2615 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_bf16
)(pg
, base
, off
, data
);
2618 // CHECK-LABEL: @test_svst1q_scatter_u64offset_f16(
2619 // CHECK-NEXT: entry:
2620 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2621 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2622 // CHECK-NEXT: ret void
2624 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_f16u10__SVBool_tPDhu12__SVUint64_tu13__SVFloat16_t(
2625 // CPP-CHECK-NEXT: entry:
2626 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2627 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv8f16(<vscale x 8 x half> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2628 // CPP-CHECK-NEXT: ret void
2630 void test_svst1q_scatter_u64offset_f16(svbool_t pg
, float16_t
*base
, svuint64_t off
, svfloat16_t data
) {
2631 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_f16
)(pg
, base
, off
, data
);
2634 // CHECK-LABEL: @test_svst1q_scatter_u64offset_f32(
2635 // CHECK-NEXT: entry:
2636 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2637 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2638 // CHECK-NEXT: ret void
2640 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_f32u10__SVBool_tPfu12__SVUint64_tu13__SVFloat32_t(
2641 // CPP-CHECK-NEXT: entry:
2642 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2643 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv4f32(<vscale x 4 x float> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2644 // CPP-CHECK-NEXT: ret void
2646 void test_svst1q_scatter_u64offset_f32(svbool_t pg
, float32_t
*base
, svuint64_t off
, svfloat32_t data
) {
2647 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_f32
)(pg
, base
, off
, data
);
2650 // CHECK-LABEL: @test_svst1q_scatter_u64offset_f64(
2651 // CHECK-NEXT: entry:
2652 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2653 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2654 // CHECK-NEXT: ret void
2656 // CPP-CHECK-LABEL: @_Z33test_svst1q_scatter_u64offset_f64u10__SVBool_tPdu12__SVUint64_tu13__SVFloat64_t(
2657 // CPP-CHECK-NEXT: entry:
2658 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 1 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv1i1(<vscale x 16 x i1> [[PG:%.*]])
2659 // CPP-CHECK-NEXT: tail call void @llvm.aarch64.sve.st1q.scatter.vector.offset.nxv2f64(<vscale x 2 x double> [[DATA:%.*]], <vscale x 1 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFF:%.*]])
2660 // CPP-CHECK-NEXT: ret void
2662 void test_svst1q_scatter_u64offset_f64(svbool_t pg
, float64_t
*base
, svuint64_t off
, svfloat64_t data
) {
2663 SVE_ACLE_FUNC(svst1q_scatter_
,u64
,offset
,_f64
)(pg
, base
, off
, data
);