Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-intrinsics / acle_sve_st1b.c
blob5694f09bb3882bb69dea5cd2540c4d89a564bdea
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -o - -emit-llvm %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -o - -emit-llvm %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
5 #include <arm_sve.h>
7 #ifdef SVE_OVERLOADED_FORMS
8 // A simple used,unused... macro, long enough to represent any SVE builtin.
9 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
10 #else
11 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
12 #endif
14 // CHECK-LABEL: @test_svst1b_s16(
15 // CHECK-NEXT: entry:
16 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
17 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
18 // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
19 // CHECK-NEXT: ret void
21 void test_svst1b_s16(svbool_t pg, int8_t *base, svint16_t data)
23 return SVE_ACLE_FUNC(svst1b,_s16,,)(pg, base, data);
26 // CHECK-LABEL: @test_svst1b_s32(
27 // CHECK-NEXT: entry:
28 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
29 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
30 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
31 // CHECK-NEXT: ret void
33 void test_svst1b_s32(svbool_t pg, int8_t *base, svint32_t data)
35 return SVE_ACLE_FUNC(svst1b,_s32,,)(pg, base, data);
38 // CHECK-LABEL: @test_svst1b_s64(
39 // CHECK-NEXT: entry:
40 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
41 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
42 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
43 // CHECK-NEXT: ret void
45 void test_svst1b_s64(svbool_t pg, int8_t *base, svint64_t data)
47 return SVE_ACLE_FUNC(svst1b,_s64,,)(pg, base, data);
50 // CHECK-LABEL: @test_svst1b_u16(
51 // CHECK-NEXT: entry:
52 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
53 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
54 // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 8 x i1> [[TMP0]])
55 // CHECK-NEXT: ret void
57 void test_svst1b_u16(svbool_t pg, uint8_t *base, svuint16_t data)
59 return SVE_ACLE_FUNC(svst1b,_u16,,)(pg, base, data);
62 // CHECK-LABEL: @test_svst1b_u32(
63 // CHECK-NEXT: entry:
64 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
65 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
66 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
67 // CHECK-NEXT: ret void
69 void test_svst1b_u32(svbool_t pg, uint8_t *base, svuint32_t data)
71 return SVE_ACLE_FUNC(svst1b,_u32,,)(pg, base, data);
74 // CHECK-LABEL: @test_svst1b_u64(
75 // CHECK-NEXT: entry:
76 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
77 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
78 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
79 // CHECK-NEXT: ret void
81 void test_svst1b_u64(svbool_t pg, uint8_t *base, svuint64_t data)
83 return SVE_ACLE_FUNC(svst1b,_u64,,)(pg, base, data);
86 // CHECK-LABEL: @test_svst1b_vnum_s16(
87 // CHECK-NEXT: entry:
88 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
89 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 8 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
90 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
91 // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]])
92 // CHECK-NEXT: ret void
94 void test_svst1b_vnum_s16(svbool_t pg, int8_t *base, int64_t vnum, svint16_t data)
96 return SVE_ACLE_FUNC(svst1b_vnum,_s16,,)(pg, base, vnum, data);
99 // CHECK-LABEL: @test_svst1b_vnum_s32(
100 // CHECK-NEXT: entry:
101 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
102 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
103 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
104 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]])
105 // CHECK-NEXT: ret void
107 void test_svst1b_vnum_s32(svbool_t pg, int8_t *base, int64_t vnum, svint32_t data)
109 return SVE_ACLE_FUNC(svst1b_vnum,_s32,,)(pg, base, vnum, data);
112 // CHECK-LABEL: @test_svst1b_vnum_s64(
113 // CHECK-NEXT: entry:
114 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
115 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
116 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
117 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]])
118 // CHECK-NEXT: ret void
120 void test_svst1b_vnum_s64(svbool_t pg, int8_t *base, int64_t vnum, svint64_t data)
122 return SVE_ACLE_FUNC(svst1b_vnum,_s64,,)(pg, base, vnum, data);
125 // CHECK-LABEL: @test_svst1b_vnum_u16(
126 // CHECK-NEXT: entry:
127 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
128 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 8 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
129 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x i8>
130 // CHECK-NEXT: tail call void @llvm.masked.store.nxv8i8.p0(<vscale x 8 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 8 x i1> [[TMP0]])
131 // CHECK-NEXT: ret void
133 void test_svst1b_vnum_u16(svbool_t pg, uint8_t *base, int64_t vnum, svuint16_t data)
135 return SVE_ACLE_FUNC(svst1b_vnum,_u16,,)(pg, base, vnum, data);
138 // CHECK-LABEL: @test_svst1b_vnum_u32(
139 // CHECK-NEXT: entry:
140 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
141 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
142 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
143 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i8.p0(<vscale x 4 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 4 x i1> [[TMP0]])
144 // CHECK-NEXT: ret void
146 void test_svst1b_vnum_u32(svbool_t pg, uint8_t *base, int64_t vnum, svuint32_t data)
148 return SVE_ACLE_FUNC(svst1b_vnum,_u32,,)(pg, base, vnum, data);
151 // CHECK-LABEL: @test_svst1b_vnum_u64(
152 // CHECK-NEXT: entry:
153 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
154 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i8>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
155 // CHECK-NEXT: [[TMP2:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
156 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i8.p0(<vscale x 2 x i8> [[TMP2]], ptr [[TMP1]], i32 1, <vscale x 2 x i1> [[TMP0]])
157 // CHECK-NEXT: ret void
159 void test_svst1b_vnum_u64(svbool_t pg, uint8_t *base, int64_t vnum, svuint64_t data)
161 return SVE_ACLE_FUNC(svst1b_vnum,_u64,,)(pg, base, vnum, data);
164 // CHECK-LABEL: @test_svst1b_scatter_u32base_s32(
165 // CHECK-NEXT: entry:
166 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
167 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
168 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
169 // CHECK-NEXT: ret void
171 void test_svst1b_scatter_u32base_s32(svbool_t pg, svuint32_t bases, svint32_t data)
173 return SVE_ACLE_FUNC(svst1b_scatter,_u32base,,_s32)(pg, bases, data);
176 // CHECK-LABEL: @test_svst1b_scatter_u64base_s64(
177 // CHECK-NEXT: entry:
178 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
179 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
180 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
181 // CHECK-NEXT: ret void
183 void test_svst1b_scatter_u64base_s64(svbool_t pg, svuint64_t bases, svint64_t data)
185 return SVE_ACLE_FUNC(svst1b_scatter,_u64base,,_s64)(pg, bases, data);
188 // CHECK-LABEL: @test_svst1b_scatter_u32base_u32(
189 // CHECK-NEXT: entry:
190 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
191 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
192 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
193 // CHECK-NEXT: ret void
195 void test_svst1b_scatter_u32base_u32(svbool_t pg, svuint32_t bases, svuint32_t data)
197 return SVE_ACLE_FUNC(svst1b_scatter,_u32base,,_u32)(pg, bases, data);
200 // CHECK-LABEL: @test_svst1b_scatter_u64base_u64(
201 // CHECK-NEXT: entry:
202 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
203 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
204 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
205 // CHECK-NEXT: ret void
207 void test_svst1b_scatter_u64base_u64(svbool_t pg, svuint64_t bases, svuint64_t data)
209 return SVE_ACLE_FUNC(svst1b_scatter,_u64base,,_u64)(pg, bases, data);
212 // CHECK-LABEL: @test_svst1b_scatter_s32offset_s32(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
215 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
216 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
217 // CHECK-NEXT: ret void
219 void test_svst1b_scatter_s32offset_s32(svbool_t pg, int8_t *base, svint32_t offsets, svint32_t data)
221 return SVE_ACLE_FUNC(svst1b_scatter_,s32,offset,_s32)(pg, base, offsets, data);
224 // CHECK-LABEL: @test_svst1b_scatter_s64offset_s64(
225 // CHECK-NEXT: entry:
226 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
227 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
228 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
229 // CHECK-NEXT: ret void
231 void test_svst1b_scatter_s64offset_s64(svbool_t pg, int8_t *base, svint64_t offsets, svint64_t data)
233 return SVE_ACLE_FUNC(svst1b_scatter_,s64,offset,_s64)(pg, base, offsets, data);
236 // CHECK-LABEL: @test_svst1b_scatter_s32offset_u32(
237 // CHECK-NEXT: entry:
238 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
239 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
240 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i8(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
241 // CHECK-NEXT: ret void
243 void test_svst1b_scatter_s32offset_u32(svbool_t pg, uint8_t *base, svint32_t offsets, svuint32_t data)
245 return SVE_ACLE_FUNC(svst1b_scatter_,s32,offset,_u32)(pg, base, offsets, data);
248 // CHECK-LABEL: @test_svst1b_scatter_s64offset_u64(
249 // CHECK-NEXT: entry:
250 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
251 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
252 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
253 // CHECK-NEXT: ret void
255 void test_svst1b_scatter_s64offset_u64(svbool_t pg, uint8_t *base, svint64_t offsets, svuint64_t data)
257 return SVE_ACLE_FUNC(svst1b_scatter_,s64,offset,_u64)(pg, base, offsets, data);
260 // CHECK-LABEL: @test_svst1b_scatter_u32offset_s32(
261 // CHECK-NEXT: entry:
262 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
263 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
264 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
265 // CHECK-NEXT: ret void
267 void test_svst1b_scatter_u32offset_s32(svbool_t pg, int8_t *base, svuint32_t offsets, svint32_t data)
269 return SVE_ACLE_FUNC(svst1b_scatter_,u32,offset,_s32)(pg, base, offsets, data);
272 // CHECK-LABEL: @test_svst1b_scatter_u64offset_s64(
273 // CHECK-NEXT: entry:
274 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
275 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
276 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
277 // CHECK-NEXT: ret void
279 void test_svst1b_scatter_u64offset_s64(svbool_t pg, int8_t *base, svuint64_t offsets, svint64_t data)
281 return SVE_ACLE_FUNC(svst1b_scatter_,u64,offset,_s64)(pg, base, offsets, data);
284 // CHECK-LABEL: @test_svst1b_scatter_u32offset_u32(
285 // CHECK-NEXT: entry:
286 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
287 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
288 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i8(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
289 // CHECK-NEXT: ret void
291 void test_svst1b_scatter_u32offset_u32(svbool_t pg, uint8_t *base, svuint32_t offsets, svuint32_t data)
293 return SVE_ACLE_FUNC(svst1b_scatter_,u32,offset,_u32)(pg, base, offsets, data);
296 // CHECK-LABEL: @test_svst1b_scatter_u64offset_u64(
297 // CHECK-NEXT: entry:
298 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
299 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
300 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i8(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
301 // CHECK-NEXT: ret void
303 void test_svst1b_scatter_u64offset_u64(svbool_t pg, uint8_t *base, svuint64_t offsets, svuint64_t data)
305 return SVE_ACLE_FUNC(svst1b_scatter_,u64,offset,_u64)(pg, base, offsets, data);
308 // CHECK-LABEL: @test_svst1b_scatter_u32base_offset_s32(
309 // CHECK-NEXT: entry:
310 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
311 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
312 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
313 // CHECK-NEXT: ret void
315 void test_svst1b_scatter_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset, svint32_t data)
317 return SVE_ACLE_FUNC(svst1b_scatter,_u32base,_offset,_s32)(pg, bases, offset, data);
320 // CHECK-LABEL: @test_svst1b_scatter_u64base_offset_s64(
321 // CHECK-NEXT: entry:
322 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
323 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
324 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
325 // CHECK-NEXT: ret void
327 void test_svst1b_scatter_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset, svint64_t data)
329 return SVE_ACLE_FUNC(svst1b_scatter,_u64base,_offset,_s64)(pg, bases, offset, data);
332 // CHECK-LABEL: @test_svst1b_scatter_u32base_offset_u32(
333 // CHECK-NEXT: entry:
334 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i8>
335 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
336 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i8.nxv4i32(<vscale x 4 x i8> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
337 // CHECK-NEXT: ret void
339 void test_svst1b_scatter_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset, svuint32_t data)
341 return SVE_ACLE_FUNC(svst1b_scatter,_u32base,_offset,_u32)(pg, bases, offset, data);
344 // CHECK-LABEL: @test_svst1b_scatter_u64base_offset_u64(
345 // CHECK-NEXT: entry:
346 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i8>
347 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
348 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i8.nxv2i64(<vscale x 2 x i8> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
349 // CHECK-NEXT: ret void
351 void test_svst1b_scatter_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset, svuint64_t data)
353 return SVE_ACLE_FUNC(svst1b_scatter,_u64base,_offset,_u64)(pg, bases, offset, data);