1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -o - -emit-llvm %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -o - -emit-llvm %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
5 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
6 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #if defined __ARM_FEATURE_SME
11 #define MODE_ATTR __arm_streaming
16 #ifdef SVE_OVERLOADED_FORMS
17 // A simple used,unused... macro, long enough to represent any SVE builtin.
18 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
20 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
23 // CHECK-LABEL: @test_svst1h_s32(
25 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
26 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
27 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
28 // CHECK-NEXT: ret void
30 void test_svst1h_s32(svbool_t pg
, int16_t *base
, svint32_t data
) MODE_ATTR
32 return SVE_ACLE_FUNC(svst1h
,_s32
,,)(pg
, base
, data
);
35 // CHECK-LABEL: @test_svst1h_s64(
37 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
38 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
39 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
40 // CHECK-NEXT: ret void
42 void test_svst1h_s64(svbool_t pg
, int16_t *base
, svint64_t data
) MODE_ATTR
44 return SVE_ACLE_FUNC(svst1h
,_s64
,,)(pg
, base
, data
);
47 // CHECK-LABEL: @test_svst1h_u32(
49 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
50 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
51 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 4 x i1> [[TMP0]])
52 // CHECK-NEXT: ret void
54 void test_svst1h_u32(svbool_t pg
, uint16_t *base
, svuint32_t data
) MODE_ATTR
56 return SVE_ACLE_FUNC(svst1h
,_u32
,,)(pg
, base
, data
);
59 // CHECK-LABEL: @test_svst1h_u64(
61 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
62 // CHECK-NEXT: [[TMP1:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
63 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP1]], ptr [[BASE:%.*]], i32 1, <vscale x 2 x i1> [[TMP0]])
64 // CHECK-NEXT: ret void
66 void test_svst1h_u64(svbool_t pg
, uint16_t *base
, svuint64_t data
) MODE_ATTR
68 return SVE_ACLE_FUNC(svst1h
,_u64
,,)(pg
, base
, data
);
71 // CHECK-LABEL: @test_svst1h_vnum_s32(
73 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
74 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
75 // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
76 // CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
77 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
78 // CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
79 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
80 // CHECK-NEXT: ret void
82 void test_svst1h_vnum_s32(svbool_t pg
, int16_t *base
, int64_t vnum
, svint32_t data
) MODE_ATTR
84 return SVE_ACLE_FUNC(svst1h_vnum
,_s32
,,)(pg
, base
, vnum
, data
);
87 // CHECK-LABEL: @test_svst1h_vnum_s64(
89 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
90 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
91 // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
92 // CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
93 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
94 // CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
95 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
96 // CHECK-NEXT: ret void
98 void test_svst1h_vnum_s64(svbool_t pg
, int16_t *base
, int64_t vnum
, svint64_t data
) MODE_ATTR
100 return SVE_ACLE_FUNC(svst1h_vnum
,_s64
,,)(pg
, base
, vnum
, data
);
103 // CHECK-LABEL: @test_svst1h_vnum_u32(
104 // CHECK-NEXT: entry:
105 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
106 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
107 // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 3
108 // CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
109 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
110 // CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
111 // CHECK-NEXT: tail call void @llvm.masked.store.nxv4i16.p0(<vscale x 4 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 4 x i1> [[TMP0]])
112 // CHECK-NEXT: ret void
114 void test_svst1h_vnum_u32(svbool_t pg
, uint16_t *base
, int64_t vnum
, svuint32_t data
) MODE_ATTR
116 return SVE_ACLE_FUNC(svst1h_vnum
,_u32
,,)(pg
, base
, vnum
, data
);
119 // CHECK-LABEL: @test_svst1h_vnum_u64(
120 // CHECK-NEXT: entry:
121 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
122 // CHECK-NEXT: [[TMP1:%.*]] = tail call i64 @llvm.vscale.i64()
123 // CHECK-NEXT: [[TMP2:%.*]] = shl nuw nsw i64 [[TMP1]], 2
124 // CHECK-NEXT: [[DOTIDX:%.*]] = mul i64 [[VNUM:%.*]], [[TMP2]]
125 // CHECK-NEXT: [[TMP3:%.*]] = getelementptr i8, ptr [[BASE:%.*]], i64 [[DOTIDX]]
126 // CHECK-NEXT: [[TMP4:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
127 // CHECK-NEXT: tail call void @llvm.masked.store.nxv2i16.p0(<vscale x 2 x i16> [[TMP4]], ptr [[TMP3]], i32 1, <vscale x 2 x i1> [[TMP0]])
128 // CHECK-NEXT: ret void
130 void test_svst1h_vnum_u64(svbool_t pg
, uint16_t *base
, int64_t vnum
, svuint64_t data
) MODE_ATTR
132 return SVE_ACLE_FUNC(svst1h_vnum
,_u64
,,)(pg
, base
, vnum
, data
);
135 #ifndef __ARM_FEATURE_SME
137 // CHECK-LABEL: @test_svst1h_scatter_u32base_s32(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
140 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
141 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
142 // CHECK-NEXT: ret void
144 void test_svst1h_scatter_u32base_s32(svbool_t pg
, svuint32_t bases
, svint32_t data
)
146 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,,_s32
)(pg
, bases
, data
);
149 // CHECK-LABEL: @test_svst1h_scatter_u64base_s64(
150 // CHECK-NEXT: entry:
151 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
152 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
153 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
154 // CHECK-NEXT: ret void
156 void test_svst1h_scatter_u64base_s64(svbool_t pg
, svuint64_t bases
, svint64_t data
)
158 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,,_s64
)(pg
, bases
, data
);
161 // CHECK-LABEL: @test_svst1h_scatter_u32base_u32(
162 // CHECK-NEXT: entry:
163 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
164 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
165 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
166 // CHECK-NEXT: ret void
168 void test_svst1h_scatter_u32base_u32(svbool_t pg
, svuint32_t bases
, svuint32_t data
)
170 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,,_u32
)(pg
, bases
, data
);
173 // CHECK-LABEL: @test_svst1h_scatter_u64base_u64(
174 // CHECK-NEXT: entry:
175 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
176 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
177 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
178 // CHECK-NEXT: ret void
180 void test_svst1h_scatter_u64base_u64(svbool_t pg
, svuint64_t bases
, svuint64_t data
)
182 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,,_u64
)(pg
, bases
, data
);
185 // CHECK-LABEL: @test_svst1h_scatter_s32offset_s32(
186 // CHECK-NEXT: entry:
187 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
188 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
189 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
190 // CHECK-NEXT: ret void
192 void test_svst1h_scatter_s32offset_s32(svbool_t pg
, int16_t *base
, svint32_t offsets
, svint32_t data
)
194 return SVE_ACLE_FUNC(svst1h_scatter_
,s32
,offset
,_s32
)(pg
, base
, offsets
, data
);
197 // CHECK-LABEL: @test_svst1h_scatter_s64offset_s64(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
200 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
201 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
202 // CHECK-NEXT: ret void
204 void test_svst1h_scatter_s64offset_s64(svbool_t pg
, int16_t *base
, svint64_t offsets
, svint64_t data
)
206 return SVE_ACLE_FUNC(svst1h_scatter_
,s64
,offset
,_s64
)(pg
, base
, offsets
, data
);
209 // CHECK-LABEL: @test_svst1h_scatter_s32offset_u32(
210 // CHECK-NEXT: entry:
211 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
212 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
213 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
214 // CHECK-NEXT: ret void
216 void test_svst1h_scatter_s32offset_u32(svbool_t pg
, uint16_t *base
, svint32_t offsets
, svuint32_t data
)
218 return SVE_ACLE_FUNC(svst1h_scatter_
,s32
,offset
,_u32
)(pg
, base
, offsets
, data
);
221 // CHECK-LABEL: @test_svst1h_scatter_s64offset_u64(
222 // CHECK-NEXT: entry:
223 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
224 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
225 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
226 // CHECK-NEXT: ret void
228 void test_svst1h_scatter_s64offset_u64(svbool_t pg
, uint16_t *base
, svint64_t offsets
, svuint64_t data
)
230 return SVE_ACLE_FUNC(svst1h_scatter_
,s64
,offset
,_u64
)(pg
, base
, offsets
, data
);
233 // CHECK-LABEL: @test_svst1h_scatter_u32offset_s32(
234 // CHECK-NEXT: entry:
235 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
236 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
237 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
238 // CHECK-NEXT: ret void
240 void test_svst1h_scatter_u32offset_s32(svbool_t pg
, int16_t *base
, svuint32_t offsets
, svint32_t data
)
242 return SVE_ACLE_FUNC(svst1h_scatter_
,u32
,offset
,_s32
)(pg
, base
, offsets
, data
);
245 // CHECK-LABEL: @test_svst1h_scatter_u64offset_s64(
246 // CHECK-NEXT: entry:
247 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
248 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
249 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
250 // CHECK-NEXT: ret void
252 void test_svst1h_scatter_u64offset_s64(svbool_t pg
, int16_t *base
, svuint64_t offsets
, svint64_t data
)
254 return SVE_ACLE_FUNC(svst1h_scatter_
,u64
,offset
,_s64
)(pg
, base
, offsets
, data
);
257 // CHECK-LABEL: @test_svst1h_scatter_u32offset_u32(
258 // CHECK-NEXT: entry:
259 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
260 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
261 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
262 // CHECK-NEXT: ret void
264 void test_svst1h_scatter_u32offset_u32(svbool_t pg
, uint16_t *base
, svuint32_t offsets
, svuint32_t data
)
266 return SVE_ACLE_FUNC(svst1h_scatter_
,u32
,offset
,_u32
)(pg
, base
, offsets
, data
);
269 // CHECK-LABEL: @test_svst1h_scatter_u64offset_u64(
270 // CHECK-NEXT: entry:
271 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
272 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
273 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
274 // CHECK-NEXT: ret void
276 void test_svst1h_scatter_u64offset_u64(svbool_t pg
, uint16_t *base
, svuint64_t offsets
, svuint64_t data
)
278 return SVE_ACLE_FUNC(svst1h_scatter_
,u64
,offset
,_u64
)(pg
, base
, offsets
, data
);
281 // CHECK-LABEL: @test_svst1h_scatter_u32base_offset_s32(
282 // CHECK-NEXT: entry:
283 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
284 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
285 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
286 // CHECK-NEXT: ret void
288 void test_svst1h_scatter_u32base_offset_s32(svbool_t pg
, svuint32_t bases
, int64_t offset
, svint32_t data
)
290 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,_offset
,_s32
)(pg
, bases
, offset
, data
);
293 // CHECK-LABEL: @test_svst1h_scatter_u64base_offset_s64(
294 // CHECK-NEXT: entry:
295 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
296 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
297 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
298 // CHECK-NEXT: ret void
300 void test_svst1h_scatter_u64base_offset_s64(svbool_t pg
, svuint64_t bases
, int64_t offset
, svint64_t data
)
302 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,_offset
,_s64
)(pg
, bases
, offset
, data
);
305 // CHECK-LABEL: @test_svst1h_scatter_u32base_offset_u32(
306 // CHECK-NEXT: entry:
307 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
308 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
309 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
310 // CHECK-NEXT: ret void
312 void test_svst1h_scatter_u32base_offset_u32(svbool_t pg
, svuint32_t bases
, int64_t offset
, svuint32_t data
)
314 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,_offset
,_u32
)(pg
, bases
, offset
, data
);
317 // CHECK-LABEL: @test_svst1h_scatter_u64base_offset_u64(
318 // CHECK-NEXT: entry:
319 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
320 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
321 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
322 // CHECK-NEXT: ret void
324 void test_svst1h_scatter_u64base_offset_u64(svbool_t pg
, svuint64_t bases
, int64_t offset
, svuint64_t data
)
326 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,_offset
,_u64
)(pg
, bases
, offset
, data
);
329 // CHECK-LABEL: @test_svst1h_scatter_s32index_s32(
330 // CHECK-NEXT: entry:
331 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
332 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
333 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
334 // CHECK-NEXT: ret void
336 void test_svst1h_scatter_s32index_s32(svbool_t pg
, int16_t *base
, svint32_t indices
, svint32_t data
)
338 return SVE_ACLE_FUNC(svst1h_scatter_
,s32
,index
,_s32
)(pg
, base
, indices
, data
);
341 // CHECK-LABEL: @test_svst1h_scatter_s64index_s64(
342 // CHECK-NEXT: entry:
343 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
344 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
345 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
346 // CHECK-NEXT: ret void
348 void test_svst1h_scatter_s64index_s64(svbool_t pg
, int16_t *base
, svint64_t indices
, svint64_t data
)
350 return SVE_ACLE_FUNC(svst1h_scatter_
,s64
,index
,_s64
)(pg
, base
, indices
, data
);
353 // CHECK-LABEL: @test_svst1h_scatter_s32index_u32(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
356 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
357 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.sxtw.index.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
358 // CHECK-NEXT: ret void
360 void test_svst1h_scatter_s32index_u32(svbool_t pg
, uint16_t *base
, svint32_t indices
, svuint32_t data
)
362 return SVE_ACLE_FUNC(svst1h_scatter_
,s32
,index
,_u32
)(pg
, base
, indices
, data
);
365 // CHECK-LABEL: @test_svst1h_scatter_s64index_u64(
366 // CHECK-NEXT: entry:
367 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
368 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
369 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
370 // CHECK-NEXT: ret void
372 void test_svst1h_scatter_s64index_u64(svbool_t pg
, uint16_t *base
, svint64_t indices
, svuint64_t data
)
374 return SVE_ACLE_FUNC(svst1h_scatter_
,s64
,index
,_u64
)(pg
, base
, indices
, data
);
377 // CHECK-LABEL: @test_svst1h_scatter_u32index_s32(
378 // CHECK-NEXT: entry:
379 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
380 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
381 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
382 // CHECK-NEXT: ret void
384 void test_svst1h_scatter_u32index_s32(svbool_t pg
, int16_t *base
, svuint32_t indices
, svint32_t data
)
386 return SVE_ACLE_FUNC(svst1h_scatter_
,u32
,index
,_s32
)(pg
, base
, indices
, data
);
389 // CHECK-LABEL: @test_svst1h_scatter_u64index_s64(
390 // CHECK-NEXT: entry:
391 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
392 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
393 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
394 // CHECK-NEXT: ret void
396 void test_svst1h_scatter_u64index_s64(svbool_t pg
, int16_t *base
, svuint64_t indices
, svint64_t data
)
398 return SVE_ACLE_FUNC(svst1h_scatter_
,u64
,index
,_s64
)(pg
, base
, indices
, data
);
401 // CHECK-LABEL: @test_svst1h_scatter_u32index_u32(
402 // CHECK-NEXT: entry:
403 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
404 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
405 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.uxtw.index.nxv4i16(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
406 // CHECK-NEXT: ret void
408 void test_svst1h_scatter_u32index_u32(svbool_t pg
, uint16_t *base
, svuint32_t indices
, svuint32_t data
)
410 return SVE_ACLE_FUNC(svst1h_scatter_
,u32
,index
,_u32
)(pg
, base
, indices
, data
);
413 // CHECK-LABEL: @test_svst1h_scatter_u64index_u64(
414 // CHECK-NEXT: entry:
415 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
416 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
417 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.index.nxv2i16(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
418 // CHECK-NEXT: ret void
420 void test_svst1h_scatter_u64index_u64(svbool_t pg
, uint16_t *base
, svuint64_t indices
, svuint64_t data
)
422 return SVE_ACLE_FUNC(svst1h_scatter_
,u64
,index
,_u64
)(pg
, base
, indices
, data
);
425 // CHECK-LABEL: @test_svst1h_scatter_u32base_index_s32(
426 // CHECK-NEXT: entry:
427 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
428 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
429 // CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX:%.*]], 1
430 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP2]])
431 // CHECK-NEXT: ret void
433 void test_svst1h_scatter_u32base_index_s32(svbool_t pg
, svuint32_t bases
, int64_t index
, svint32_t data
)
435 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,_index
,_s32
)(pg
, bases
, index
, data
);
438 // CHECK-LABEL: @test_svst1h_scatter_u64base_index_s64(
439 // CHECK-NEXT: entry:
440 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
441 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
442 // CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX:%.*]], 1
443 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP2]])
444 // CHECK-NEXT: ret void
446 void test_svst1h_scatter_u64base_index_s64(svbool_t pg
, svuint64_t bases
, int64_t index
, svint64_t data
)
448 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,_index
,_s64
)(pg
, bases
, index
, data
);
451 // CHECK-LABEL: @test_svst1h_scatter_u32base_index_u32(
452 // CHECK-NEXT: entry:
453 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x i16>
454 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
455 // CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX:%.*]], 1
456 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i16> [[TMP0]], <vscale x 4 x i1> [[TMP1]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP2]])
457 // CHECK-NEXT: ret void
459 void test_svst1h_scatter_u32base_index_u32(svbool_t pg
, svuint32_t bases
, int64_t index
, svuint32_t data
)
461 return SVE_ACLE_FUNC(svst1h_scatter
,_u32base
,_index
,_u32
)(pg
, bases
, index
, data
);
464 // CHECK-LABEL: @test_svst1h_scatter_u64base_index_u64(
465 // CHECK-NEXT: entry:
466 // CHECK-NEXT: [[TMP0:%.*]] = trunc <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x i16>
467 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
468 // CHECK-NEXT: [[TMP2:%.*]] = shl i64 [[INDEX:%.*]], 1
469 // CHECK-NEXT: tail call void @llvm.aarch64.sve.st1.scatter.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i16> [[TMP0]], <vscale x 2 x i1> [[TMP1]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP2]])
470 // CHECK-NEXT: ret void
472 void test_svst1h_scatter_u64base_index_u64(svbool_t pg
, svuint64_t bases
, int64_t index
, svuint64_t data
)
474 return SVE_ACLE_FUNC(svst1h_scatter
,_u64base
,_index
,_u64
)(pg
, bases
, index
, data
);