1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svldff1sh_s32(
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
20 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]])
21 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
22 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
24 // CPP-CHECK-LABEL: @_Z18test_svldff1sh_s32u10__SVBool_tPKs(
25 // CPP-CHECK-NEXT: entry:
26 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
27 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]])
28 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
29 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
31 svint32_t
test_svldff1sh_s32(svbool_t pg
, const int16_t *base
)
33 return svldff1sh_s32(pg
, base
);
36 // CHECK-LABEL: @test_svldff1sh_s64(
38 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
39 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]])
40 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
41 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
43 // CPP-CHECK-LABEL: @_Z18test_svldff1sh_s64u10__SVBool_tPKs(
44 // CPP-CHECK-NEXT: entry:
45 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
46 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]])
47 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
48 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
50 svint64_t
test_svldff1sh_s64(svbool_t pg
, const int16_t *base
)
52 return svldff1sh_s64(pg
, base
);
55 // CHECK-LABEL: @test_svldff1sh_u32(
57 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
58 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]])
59 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
60 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
62 // CPP-CHECK-LABEL: @_Z18test_svldff1sh_u32u10__SVBool_tPKs(
63 // CPP-CHECK-NEXT: entry:
64 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
65 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]])
66 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
67 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
69 svuint32_t
test_svldff1sh_u32(svbool_t pg
, const int16_t *base
)
71 return svldff1sh_u32(pg
, base
);
74 // CHECK-LABEL: @test_svldff1sh_u64(
76 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
77 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]])
78 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
79 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
81 // CPP-CHECK-LABEL: @_Z18test_svldff1sh_u64u10__SVBool_tPKs(
82 // CPP-CHECK-NEXT: entry:
83 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
84 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]])
85 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
86 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
88 svuint64_t
test_svldff1sh_u64(svbool_t pg
, const int16_t *base
)
90 return svldff1sh_u64(pg
, base
);
93 // CHECK-LABEL: @test_svldff1sh_vnum_s32(
95 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
96 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
97 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[TMP1]])
98 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
99 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
101 // CPP-CHECK-LABEL: @_Z23test_svldff1sh_vnum_s32u10__SVBool_tPKsl(
102 // CPP-CHECK-NEXT: entry:
103 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
104 // CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
105 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[TMP1]])
106 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
107 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
109 svint32_t
test_svldff1sh_vnum_s32(svbool_t pg
, const int16_t *base
, int64_t vnum
)
111 return svldff1sh_vnum_s32(pg
, base
, vnum
);
114 // CHECK-LABEL: @test_svldff1sh_vnum_s64(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
117 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
118 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[TMP1]])
119 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
120 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
122 // CPP-CHECK-LABEL: @_Z23test_svldff1sh_vnum_s64u10__SVBool_tPKsl(
123 // CPP-CHECK-NEXT: entry:
124 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
125 // CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
126 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[TMP1]])
127 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
128 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
130 svint64_t
test_svldff1sh_vnum_s64(svbool_t pg
, const int16_t *base
, int64_t vnum
)
132 return svldff1sh_vnum_s64(pg
, base
, vnum
);
135 // CHECK-LABEL: @test_svldff1sh_vnum_u32(
136 // CHECK-NEXT: entry:
137 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
138 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
139 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[TMP1]])
140 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
141 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
143 // CPP-CHECK-LABEL: @_Z23test_svldff1sh_vnum_u32u10__SVBool_tPKsl(
144 // CPP-CHECK-NEXT: entry:
145 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
146 // CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 4 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
147 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[TMP1]])
148 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
149 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
151 svuint32_t
test_svldff1sh_vnum_u32(svbool_t pg
, const int16_t *base
, int64_t vnum
)
153 return svldff1sh_vnum_u32(pg
, base
, vnum
);
156 // CHECK-LABEL: @test_svldff1sh_vnum_u64(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
159 // CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
160 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[TMP1]])
161 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
162 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
164 // CPP-CHECK-LABEL: @_Z23test_svldff1sh_vnum_u64u10__SVBool_tPKsl(
165 // CPP-CHECK-NEXT: entry:
166 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
167 // CPP-CHECK-NEXT: [[TMP1:%.*]] = getelementptr <vscale x 2 x i16>, ptr [[BASE:%.*]], i64 [[VNUM:%.*]]
168 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[TMP1]])
169 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
170 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
172 svuint64_t
test_svldff1sh_vnum_u64(svbool_t pg
, const int16_t *base
, int64_t vnum
)
174 return svldff1sh_vnum_u64(pg
, base
, vnum
);
177 // CHECK-LABEL: @test_svldff1sh_gather_u32base_s32(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
180 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
181 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
182 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
184 // CPP-CHECK-LABEL: @_Z33test_svldff1sh_gather_u32base_s32u10__SVBool_tu12__SVUint32_t(
185 // CPP-CHECK-NEXT: entry:
186 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
187 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
188 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
189 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
191 svint32_t
test_svldff1sh_gather_u32base_s32(svbool_t pg
, svuint32_t bases
) {
192 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _s32
, )(pg
, bases
);
195 // CHECK-LABEL: @test_svldff1sh_gather_u64base_s64(
196 // CHECK-NEXT: entry:
197 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
198 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
199 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
200 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
202 // CPP-CHECK-LABEL: @_Z33test_svldff1sh_gather_u64base_s64u10__SVBool_tu12__SVUint64_t(
203 // CPP-CHECK-NEXT: entry:
204 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
205 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
206 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
207 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
209 svint64_t
test_svldff1sh_gather_u64base_s64(svbool_t pg
, svuint64_t bases
) {
210 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _s64
, )(pg
, bases
);
213 // CHECK-LABEL: @test_svldff1sh_gather_u32base_u32(
214 // CHECK-NEXT: entry:
215 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
216 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
217 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
218 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
220 // CPP-CHECK-LABEL: @_Z33test_svldff1sh_gather_u32base_u32u10__SVBool_tu12__SVUint32_t(
221 // CPP-CHECK-NEXT: entry:
222 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
223 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
224 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
225 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
227 svuint32_t
test_svldff1sh_gather_u32base_u32(svbool_t pg
, svuint32_t bases
) {
228 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _u32
, )(pg
, bases
);
231 // CHECK-LABEL: @test_svldff1sh_gather_u64base_u64(
232 // CHECK-NEXT: entry:
233 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
234 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
235 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
236 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
238 // CPP-CHECK-LABEL: @_Z33test_svldff1sh_gather_u64base_u64u10__SVBool_tu12__SVUint64_t(
239 // CPP-CHECK-NEXT: entry:
240 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
241 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
242 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
243 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
245 svuint64_t
test_svldff1sh_gather_u64base_u64(svbool_t pg
, svuint64_t bases
) {
246 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _u64
, )(pg
, bases
);
249 // CHECK-LABEL: @test_svldff1sh_gather_s32offset_s32(
250 // CHECK-NEXT: entry:
251 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
252 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
253 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
254 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
256 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_s32offset_s32u10__SVBool_tPKsu11__SVInt32_t(
257 // CPP-CHECK-NEXT: entry:
258 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
259 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
260 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
261 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
263 svint32_t
test_svldff1sh_gather_s32offset_s32(svbool_t pg
, const int16_t *base
, svint32_t offsets
) {
264 return SVE_ACLE_FUNC(svldff1sh_gather_
, s32
, offset_s32
, )(pg
, base
, offsets
);
267 // CHECK-LABEL: @test_svldff1sh_gather_s64offset_s64(
268 // CHECK-NEXT: entry:
269 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
270 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
271 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
272 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
274 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_s64offset_s64u10__SVBool_tPKsu11__SVInt64_t(
275 // CPP-CHECK-NEXT: entry:
276 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
277 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
278 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
279 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
281 svint64_t
test_svldff1sh_gather_s64offset_s64(svbool_t pg
, const int16_t *base
, svint64_t offsets
) {
282 return SVE_ACLE_FUNC(svldff1sh_gather_
, s64
, offset_s64
, )(pg
, base
, offsets
);
285 // CHECK-LABEL: @test_svldff1sh_gather_s32offset_u32(
286 // CHECK-NEXT: entry:
287 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
288 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
289 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
290 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
292 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_s32offset_u32u10__SVBool_tPKsu11__SVInt32_t(
293 // CPP-CHECK-NEXT: entry:
294 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
295 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
296 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
297 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
299 svuint32_t
test_svldff1sh_gather_s32offset_u32(svbool_t pg
, const int16_t *base
, svint32_t offsets
) {
300 return SVE_ACLE_FUNC(svldff1sh_gather_
, s32
, offset_u32
, )(pg
, base
, offsets
);
303 // CHECK-LABEL: @test_svldff1sh_gather_s64offset_u64(
304 // CHECK-NEXT: entry:
305 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
306 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
307 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
308 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
310 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_s64offset_u64u10__SVBool_tPKsu11__SVInt64_t(
311 // CPP-CHECK-NEXT: entry:
312 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
313 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
314 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
315 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
317 svuint64_t
test_svldff1sh_gather_s64offset_u64(svbool_t pg
, const int16_t *base
, svint64_t offsets
) {
318 return SVE_ACLE_FUNC(svldff1sh_gather_
, s64
, offset_u64
, )(pg
, base
, offsets
);
321 // CHECK-LABEL: @test_svldff1sh_gather_u32offset_s32(
322 // CHECK-NEXT: entry:
323 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
324 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
325 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
326 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
328 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_u32offset_s32u10__SVBool_tPKsu12__SVUint32_t(
329 // CPP-CHECK-NEXT: entry:
330 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
331 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
332 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
333 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
335 svint32_t
test_svldff1sh_gather_u32offset_s32(svbool_t pg
, const int16_t *base
, svuint32_t offsets
) {
336 return SVE_ACLE_FUNC(svldff1sh_gather_
, u32
, offset_s32
, )(pg
, base
, offsets
);
339 // CHECK-LABEL: @test_svldff1sh_gather_u64offset_s64(
340 // CHECK-NEXT: entry:
341 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
342 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
343 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
344 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
346 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_u64offset_s64u10__SVBool_tPKsu12__SVUint64_t(
347 // CPP-CHECK-NEXT: entry:
348 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
349 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
350 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
351 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
353 svint64_t
test_svldff1sh_gather_u64offset_s64(svbool_t pg
, const int16_t *base
, svuint64_t offsets
) {
354 return SVE_ACLE_FUNC(svldff1sh_gather_
, u64
, offset_s64
, )(pg
, base
, offsets
);
357 // CHECK-LABEL: @test_svldff1sh_gather_u32offset_u32(
358 // CHECK-NEXT: entry:
359 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
360 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
361 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
362 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
364 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_u32offset_u32u10__SVBool_tPKsu12__SVUint32_t(
365 // CPP-CHECK-NEXT: entry:
366 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
367 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
368 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
369 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
371 svuint32_t
test_svldff1sh_gather_u32offset_u32(svbool_t pg
, const int16_t *base
, svuint32_t offsets
) {
372 return SVE_ACLE_FUNC(svldff1sh_gather_
, u32
, offset_u32
, )(pg
, base
, offsets
);
375 // CHECK-LABEL: @test_svldff1sh_gather_u64offset_u64(
376 // CHECK-NEXT: entry:
377 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
378 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
379 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
380 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
382 // CPP-CHECK-LABEL: @_Z35test_svldff1sh_gather_u64offset_u64u10__SVBool_tPKsu12__SVUint64_t(
383 // CPP-CHECK-NEXT: entry:
384 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
385 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
386 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
387 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
389 svuint64_t
test_svldff1sh_gather_u64offset_u64(svbool_t pg
, const int16_t *base
, svuint64_t offsets
) {
390 return SVE_ACLE_FUNC(svldff1sh_gather_
, u64
, offset_u64
, )(pg
, base
, offsets
);
393 // CHECK-LABEL: @test_svldff1sh_gather_u32base_offset_s32(
394 // CHECK-NEXT: entry:
395 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
396 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
397 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
398 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
400 // CPP-CHECK-LABEL: @_Z40test_svldff1sh_gather_u32base_offset_s32u10__SVBool_tu12__SVUint32_tl(
401 // CPP-CHECK-NEXT: entry:
402 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
403 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
404 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
405 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
407 svint32_t
test_svldff1sh_gather_u32base_offset_s32(svbool_t pg
, svuint32_t bases
, int64_t offset
) {
408 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _offset_s32
, )(pg
, bases
, offset
);
411 // CHECK-LABEL: @test_svldff1sh_gather_u64base_offset_s64(
412 // CHECK-NEXT: entry:
413 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
414 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
415 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
416 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
418 // CPP-CHECK-LABEL: @_Z40test_svldff1sh_gather_u64base_offset_s64u10__SVBool_tu12__SVUint64_tl(
419 // CPP-CHECK-NEXT: entry:
420 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
421 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
422 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
423 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
425 svint64_t
test_svldff1sh_gather_u64base_offset_s64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
426 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _offset_s64
, )(pg
, bases
, offset
);
429 // CHECK-LABEL: @test_svldff1sh_gather_u32base_offset_u32(
430 // CHECK-NEXT: entry:
431 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
432 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
433 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
434 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
436 // CPP-CHECK-LABEL: @_Z40test_svldff1sh_gather_u32base_offset_u32u10__SVBool_tu12__SVUint32_tl(
437 // CPP-CHECK-NEXT: entry:
438 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
439 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
440 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
441 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
443 svuint32_t
test_svldff1sh_gather_u32base_offset_u32(svbool_t pg
, svuint32_t bases
, int64_t offset
) {
444 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _offset_u32
, )(pg
, bases
, offset
);
447 // CHECK-LABEL: @test_svldff1sh_gather_u64base_offset_u64(
448 // CHECK-NEXT: entry:
449 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
450 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
451 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
452 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
454 // CPP-CHECK-LABEL: @_Z40test_svldff1sh_gather_u64base_offset_u64u10__SVBool_tu12__SVUint64_tl(
455 // CPP-CHECK-NEXT: entry:
456 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
457 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
458 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
459 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
461 svuint64_t
test_svldff1sh_gather_u64base_offset_u64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
462 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _offset_u64
, )(pg
, bases
, offset
);
465 // CHECK-LABEL: @test_svldff1sh_gather_s32index_s32(
466 // CHECK-NEXT: entry:
467 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
468 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
469 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
470 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
472 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_s32index_s32u10__SVBool_tPKsu11__SVInt32_t(
473 // CPP-CHECK-NEXT: entry:
474 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
475 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
476 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
477 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
479 svint32_t
test_svldff1sh_gather_s32index_s32(svbool_t pg
, const int16_t *base
, svint32_t indices
) {
480 return SVE_ACLE_FUNC(svldff1sh_gather_
, s32
, index_s32
, )(pg
, base
, indices
);
483 // CHECK-LABEL: @test_svldff1sh_gather_s64index_s64(
484 // CHECK-NEXT: entry:
485 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
486 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
487 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
488 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
490 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_s64index_s64u10__SVBool_tPKsu11__SVInt64_t(
491 // CPP-CHECK-NEXT: entry:
492 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
493 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
494 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
495 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
497 svint64_t
test_svldff1sh_gather_s64index_s64(svbool_t pg
, const int16_t *base
, svint64_t indices
) {
498 return SVE_ACLE_FUNC(svldff1sh_gather_
, s64
, index_s64
, )(pg
, base
, indices
);
501 // CHECK-LABEL: @test_svldff1sh_gather_s32index_u32(
502 // CHECK-NEXT: entry:
503 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
504 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
505 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
506 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
508 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_s32index_u32u10__SVBool_tPKsu11__SVInt32_t(
509 // CPP-CHECK-NEXT: entry:
510 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
511 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.sxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
512 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
513 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
515 svuint32_t
test_svldff1sh_gather_s32index_u32(svbool_t pg
, const int16_t *base
, svint32_t indices
) {
516 return SVE_ACLE_FUNC(svldff1sh_gather_
, s32
, index_u32
, )(pg
, base
, indices
);
519 // CHECK-LABEL: @test_svldff1sh_gather_s64index_u64(
520 // CHECK-NEXT: entry:
521 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
522 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
523 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
524 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
526 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_s64index_u64u10__SVBool_tPKsu11__SVInt64_t(
527 // CPP-CHECK-NEXT: entry:
528 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
529 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
530 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
531 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
533 svuint64_t
test_svldff1sh_gather_s64index_u64(svbool_t pg
, const int16_t *base
, svint64_t indices
) {
534 return SVE_ACLE_FUNC(svldff1sh_gather_
, s64
, index_u64
, )(pg
, base
, indices
);
537 // CHECK-LABEL: @test_svldff1sh_gather_u32index_s32(
538 // CHECK-NEXT: entry:
539 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
540 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
541 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
542 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
544 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_u32index_s32u10__SVBool_tPKsu12__SVUint32_t(
545 // CPP-CHECK-NEXT: entry:
546 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
547 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
548 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
549 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
551 svint32_t
test_svldff1sh_gather_u32index_s32(svbool_t pg
, const int16_t *base
, svuint32_t indices
) {
552 return SVE_ACLE_FUNC(svldff1sh_gather_
, u32
, index_s32
, )(pg
, base
, indices
);
555 // CHECK-LABEL: @test_svldff1sh_gather_u64index_s64(
556 // CHECK-NEXT: entry:
557 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
558 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
559 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
560 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
562 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_u64index_s64u10__SVBool_tPKsu12__SVUint64_t(
563 // CPP-CHECK-NEXT: entry:
564 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
565 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
566 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
567 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
569 svint64_t
test_svldff1sh_gather_u64index_s64(svbool_t pg
, const int16_t *base
, svuint64_t indices
) {
570 return SVE_ACLE_FUNC(svldff1sh_gather_
, u64
, index_s64
, )(pg
, base
, indices
);
573 // CHECK-LABEL: @test_svldff1sh_gather_u32index_u32(
574 // CHECK-NEXT: entry:
575 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
576 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
577 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
578 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
580 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_u32index_u32u10__SVBool_tPKsu12__SVUint32_t(
581 // CPP-CHECK-NEXT: entry:
582 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
583 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.uxtw.index.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[INDICES:%.*]])
584 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
585 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
587 svuint32_t
test_svldff1sh_gather_u32index_u32(svbool_t pg
, const int16_t *base
, svuint32_t indices
) {
588 return SVE_ACLE_FUNC(svldff1sh_gather_
, u32
, index_u32
, )(pg
, base
, indices
);
591 // CHECK-LABEL: @test_svldff1sh_gather_u64index_u64(
592 // CHECK-NEXT: entry:
593 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
594 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
595 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
596 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
598 // CPP-CHECK-LABEL: @_Z34test_svldff1sh_gather_u64index_u64u10__SVBool_tPKsu12__SVUint64_t(
599 // CPP-CHECK-NEXT: entry:
600 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
601 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
602 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
603 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
605 svuint64_t
test_svldff1sh_gather_u64index_u64(svbool_t pg
, const int16_t *base
, svuint64_t indices
) {
606 return SVE_ACLE_FUNC(svldff1sh_gather_
, u64
, index_u64
, )(pg
, base
, indices
);
609 // CHECK-LABEL: @test_svldff1sh_gather_u32base_index_s32(
610 // CHECK-NEXT: entry:
611 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
612 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
613 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
614 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
615 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
617 // CPP-CHECK-LABEL: @_Z39test_svldff1sh_gather_u32base_index_s32u10__SVBool_tu12__SVUint32_tl(
618 // CPP-CHECK-NEXT: entry:
619 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
620 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
621 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
622 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
623 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
625 svint32_t
test_svldff1sh_gather_u32base_index_s32(svbool_t pg
, svuint32_t bases
, int64_t index
) {
626 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _index_s32
, )(pg
, bases
, index
);
629 // CHECK-LABEL: @test_svldff1sh_gather_u64base_index_s64(
630 // CHECK-NEXT: entry:
631 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
632 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
633 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
634 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
635 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
637 // CPP-CHECK-LABEL: @_Z39test_svldff1sh_gather_u64base_index_s64u10__SVBool_tu12__SVUint64_tl(
638 // CPP-CHECK-NEXT: entry:
639 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
640 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
641 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
642 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
643 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
645 svint64_t
test_svldff1sh_gather_u64base_index_s64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
646 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _index_s64
, )(pg
, bases
, index
);
649 // CHECK-LABEL: @test_svldff1sh_gather_u32base_index_u32(
650 // CHECK-NEXT: entry:
651 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
652 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
653 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
654 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
655 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
657 // CPP-CHECK-LABEL: @_Z39test_svldff1sh_gather_u32base_index_u32u10__SVBool_tu12__SVUint32_tl(
658 // CPP-CHECK-NEXT: entry:
659 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
660 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
661 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
662 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
663 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
665 svuint32_t
test_svldff1sh_gather_u32base_index_u32(svbool_t pg
, svuint32_t bases
, int64_t index
) {
666 return SVE_ACLE_FUNC(svldff1sh_gather
, _u32base
, _index_u32
, )(pg
, bases
, index
);
669 // CHECK-LABEL: @test_svldff1sh_gather_u64base_index_u64(
670 // CHECK-NEXT: entry:
671 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
672 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
673 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
674 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
675 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
677 // CPP-CHECK-LABEL: @_Z39test_svldff1sh_gather_u64base_index_u64u10__SVBool_tu12__SVUint64_tl(
678 // CPP-CHECK-NEXT: entry:
679 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
680 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
681 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldff1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
682 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
683 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
685 svuint64_t
test_svldff1sh_gather_u64base_index_u64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
686 return SVE_ACLE_FUNC(svldff1sh_gather
, _u64base
, _index_u64
, )(pg
, bases
, index
);