1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
18 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_s32(
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
21 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
22 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
23 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
25 // CPP-CHECK-LABEL: @_Z33test_svldnt1sh_gather_u32base_s32u10__SVBool_tu12__SVUint32_t(
26 // CPP-CHECK-NEXT: entry:
27 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
28 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
29 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
30 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
32 svint32_t
test_svldnt1sh_gather_u32base_s32(svbool_t pg
, svuint32_t bases
) {
33 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _s32
, )(pg
, bases
);
36 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_s64(
38 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
39 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
40 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
41 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
43 // CPP-CHECK-LABEL: @_Z33test_svldnt1sh_gather_u64base_s64u10__SVBool_tu12__SVUint64_t(
44 // CPP-CHECK-NEXT: entry:
45 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
46 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
47 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
48 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
50 svint64_t
test_svldnt1sh_gather_u64base_s64(svbool_t pg
, svuint64_t bases
) {
51 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _s64
, )(pg
, bases
);
54 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_u32(
56 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
57 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
58 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
59 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
61 // CPP-CHECK-LABEL: @_Z33test_svldnt1sh_gather_u32base_u32u10__SVBool_tu12__SVUint32_t(
62 // CPP-CHECK-NEXT: entry:
63 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
64 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
65 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
66 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
68 svuint32_t
test_svldnt1sh_gather_u32base_u32(svbool_t pg
, svuint32_t bases
) {
69 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _u32
, )(pg
, bases
);
72 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_u64(
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
76 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
77 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
79 // CPP-CHECK-LABEL: @_Z33test_svldnt1sh_gather_u64base_u64u10__SVBool_tu12__SVUint64_t(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
82 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
83 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
86 svuint64_t
test_svldnt1sh_gather_u64base_u64(svbool_t pg
, svuint64_t bases
) {
87 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _u64
, )(pg
, bases
);
90 // CHECK-LABEL: @test_svldnt1sh_gather_s64offset_s64(
92 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
93 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
94 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
95 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
97 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_s64offset_s64u10__SVBool_tPKsu11__SVInt64_t(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
100 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
101 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
102 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
104 svint64_t
test_svldnt1sh_gather_s64offset_s64(svbool_t pg
, const int16_t *base
, svint64_t offsets
) {
105 return SVE_ACLE_FUNC(svldnt1sh_gather_
, s64
, offset_s64
, )(pg
, base
, offsets
);
108 // CHECK-LABEL: @test_svldnt1sh_gather_s64offset_u64(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
111 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
112 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
113 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
115 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_s64offset_u64u10__SVBool_tPKsu11__SVInt64_t(
116 // CPP-CHECK-NEXT: entry:
117 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
118 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
119 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
120 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
122 svuint64_t
test_svldnt1sh_gather_s64offset_u64(svbool_t pg
, const int16_t *base
, svint64_t offsets
) {
123 return SVE_ACLE_FUNC(svldnt1sh_gather_
, s64
, offset_u64
, )(pg
, base
, offsets
);
126 // CHECK-LABEL: @test_svldnt1sh_gather_u32offset_s32(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
129 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
130 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
131 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
133 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_u32offset_s32u10__SVBool_tPKsu12__SVUint32_t(
134 // CPP-CHECK-NEXT: entry:
135 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
136 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
137 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
138 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
140 svint32_t
test_svldnt1sh_gather_u32offset_s32(svbool_t pg
, const int16_t *base
, svuint32_t offsets
) {
141 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u32
, offset_s32
, )(pg
, base
, offsets
);
144 // CHECK-LABEL: @test_svldnt1sh_gather_u64offset_s64(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
147 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
148 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
149 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
151 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_u64offset_s64u10__SVBool_tPKsu12__SVUint64_t(
152 // CPP-CHECK-NEXT: entry:
153 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
154 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
155 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
156 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
158 svint64_t
test_svldnt1sh_gather_u64offset_s64(svbool_t pg
, const int16_t *base
, svuint64_t offsets
) {
159 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u64
, offset_s64
, )(pg
, base
, offsets
);
162 // CHECK-LABEL: @test_svldnt1sh_gather_u32offset_u32(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
165 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
166 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
167 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
169 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_u32offset_u32u10__SVBool_tPKsu12__SVUint32_t(
170 // CPP-CHECK-NEXT: entry:
171 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
172 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i16(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
173 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
174 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
176 svuint32_t
test_svldnt1sh_gather_u32offset_u32(svbool_t pg
, const int16_t *base
, svuint32_t offsets
) {
177 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u32
, offset_u32
, )(pg
, base
, offsets
);
180 // CHECK-LABEL: @test_svldnt1sh_gather_u64offset_u64(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
184 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
185 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
187 // CPP-CHECK-LABEL: @_Z35test_svldnt1sh_gather_u64offset_u64u10__SVBool_tPKsu12__SVUint64_t(
188 // CPP-CHECK-NEXT: entry:
189 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
190 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
191 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
192 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
194 svuint64_t
test_svldnt1sh_gather_u64offset_u64(svbool_t pg
, const int16_t *base
, svuint64_t offsets
) {
195 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u64
, offset_u64
, )(pg
, base
, offsets
);
198 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_offset_s32(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
201 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
202 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
203 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
205 // CPP-CHECK-LABEL: @_Z40test_svldnt1sh_gather_u32base_offset_s32u10__SVBool_tu12__SVUint32_tl(
206 // CPP-CHECK-NEXT: entry:
207 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
208 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
209 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
210 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
212 svint32_t
test_svldnt1sh_gather_u32base_offset_s32(svbool_t pg
, svuint32_t bases
, int64_t offset
) {
213 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _offset_s32
, )(pg
, bases
, offset
);
216 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_offset_s64(
217 // CHECK-NEXT: entry:
218 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
219 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
220 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
221 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
223 // CPP-CHECK-LABEL: @_Z40test_svldnt1sh_gather_u64base_offset_s64u10__SVBool_tu12__SVUint64_tl(
224 // CPP-CHECK-NEXT: entry:
225 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
226 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
227 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
228 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
230 svint64_t
test_svldnt1sh_gather_u64base_offset_s64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
231 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _offset_s64
, )(pg
, bases
, offset
);
234 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_offset_u32(
235 // CHECK-NEXT: entry:
236 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
237 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
238 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
239 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
241 // CPP-CHECK-LABEL: @_Z40test_svldnt1sh_gather_u32base_offset_u32u10__SVBool_tu12__SVUint32_tl(
242 // CPP-CHECK-NEXT: entry:
243 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
244 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
245 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 4 x i16> [[TMP1]] to <vscale x 4 x i32>
246 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
248 svuint32_t
test_svldnt1sh_gather_u32base_offset_u32(svbool_t pg
, svuint32_t bases
, int64_t offset
) {
249 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _offset_u32
, )(pg
, bases
, offset
);
252 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_offset_u64(
253 // CHECK-NEXT: entry:
254 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
255 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
256 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
257 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
259 // CPP-CHECK-LABEL: @_Z40test_svldnt1sh_gather_u64base_offset_u64u10__SVBool_tu12__SVUint64_tl(
260 // CPP-CHECK-NEXT: entry:
261 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
262 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
263 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
264 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
266 svuint64_t
test_svldnt1sh_gather_u64base_offset_u64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
267 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _offset_u64
, )(pg
, bases
, offset
);
270 // CHECK-LABEL: @test_svldnt1sh_gather_s64index_s64(
271 // CHECK-NEXT: entry:
272 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
273 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
274 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
275 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
277 // CPP-CHECK-LABEL: @_Z34test_svldnt1sh_gather_s64index_s64u10__SVBool_tPKsu11__SVInt64_t(
278 // CPP-CHECK-NEXT: entry:
279 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
280 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
281 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
282 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
284 svint64_t
test_svldnt1sh_gather_s64index_s64(svbool_t pg
, const int16_t *base
, svint64_t indices
) {
285 return SVE_ACLE_FUNC(svldnt1sh_gather_
, s64
, index_s64
, )(pg
, base
, indices
);
288 // CHECK-LABEL: @test_svldnt1sh_gather_s64index_u64(
289 // CHECK-NEXT: entry:
290 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
291 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
292 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
293 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
295 // CPP-CHECK-LABEL: @_Z34test_svldnt1sh_gather_s64index_u64u10__SVBool_tPKsu11__SVInt64_t(
296 // CPP-CHECK-NEXT: entry:
297 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
298 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
299 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
300 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
302 svuint64_t
test_svldnt1sh_gather_s64index_u64(svbool_t pg
, const int16_t *base
, svint64_t indices
) {
303 return SVE_ACLE_FUNC(svldnt1sh_gather_
, s64
, index_u64
, )(pg
, base
, indices
);
306 // CHECK-LABEL: @test_svldnt1sh_gather_u64index_s64(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
309 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
310 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
311 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
313 // CPP-CHECK-LABEL: @_Z34test_svldnt1sh_gather_u64index_s64u10__SVBool_tPKsu12__SVUint64_t(
314 // CPP-CHECK-NEXT: entry:
315 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
316 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
317 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
318 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
320 svint64_t
test_svldnt1sh_gather_u64index_s64(svbool_t pg
, const int16_t *base
, svuint64_t indices
) {
321 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u64
, index_s64
, )(pg
, base
, indices
);
324 // CHECK-LABEL: @test_svldnt1sh_gather_u64index_u64(
325 // CHECK-NEXT: entry:
326 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
327 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
328 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
329 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
331 // CPP-CHECK-LABEL: @_Z34test_svldnt1sh_gather_u64index_u64u10__SVBool_tPKsu12__SVUint64_t(
332 // CPP-CHECK-NEXT: entry:
333 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
334 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i16(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
335 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i16> [[TMP1]] to <vscale x 2 x i64>
336 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
338 svuint64_t
test_svldnt1sh_gather_u64index_u64(svbool_t pg
, const int16_t *base
, svuint64_t indices
) {
339 return SVE_ACLE_FUNC(svldnt1sh_gather_
, u64
, index_u64
, )(pg
, base
, indices
);
342 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_index_s32(
343 // CHECK-NEXT: entry:
344 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
345 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
346 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
347 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
348 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
350 // CPP-CHECK-LABEL: @_Z39test_svldnt1sh_gather_u32base_index_s32u10__SVBool_tu12__SVUint32_tl(
351 // CPP-CHECK-NEXT: entry:
352 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
353 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
354 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
355 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
356 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
358 svint32_t
test_svldnt1sh_gather_u32base_index_s32(svbool_t pg
, svuint32_t bases
, int64_t index
) {
359 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _index_s32
, )(pg
, bases
, index
);
362 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_index_s64(
363 // CHECK-NEXT: entry:
364 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
365 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
366 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
367 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
368 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
370 // CPP-CHECK-LABEL: @_Z39test_svldnt1sh_gather_u64base_index_s64u10__SVBool_tu12__SVUint64_tl(
371 // CPP-CHECK-NEXT: entry:
372 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
373 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
374 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
375 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
376 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
378 svint64_t
test_svldnt1sh_gather_u64base_index_s64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
379 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _index_s64
, )(pg
, bases
, index
);
382 // CHECK-LABEL: @test_svldnt1sh_gather_u32base_index_u32(
383 // CHECK-NEXT: entry:
384 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
385 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
386 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
387 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
388 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
390 // CPP-CHECK-LABEL: @_Z39test_svldnt1sh_gather_u32base_index_u32u10__SVBool_tu12__SVUint32_tl(
391 // CPP-CHECK-NEXT: entry:
392 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
393 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
394 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i16.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
395 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 4 x i16> [[TMP2]] to <vscale x 4 x i32>
396 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP3]]
398 svuint32_t
test_svldnt1sh_gather_u32base_index_u32(svbool_t pg
, svuint32_t bases
, int64_t index
) {
399 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u32base
, _index_u32
, )(pg
, bases
, index
);
402 // CHECK-LABEL: @test_svldnt1sh_gather_u64base_index_u64(
403 // CHECK-NEXT: entry:
404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
405 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
406 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
407 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
408 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
410 // CPP-CHECK-LABEL: @_Z39test_svldnt1sh_gather_u64base_index_u64u10__SVBool_tu12__SVUint64_tl(
411 // CPP-CHECK-NEXT: entry:
412 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
413 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 1
414 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i16> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i16.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
415 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i16> [[TMP2]] to <vscale x 2 x i64>
416 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
418 svuint64_t
test_svldnt1sh_gather_u64base_index_u64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
419 return SVE_ACLE_FUNC(svldnt1sh_gather
, _u64base
, _index_u64
, )(pg
, bases
, index
);