1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
18 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_s64(
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
21 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
22 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
23 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
25 // CPP-CHECK-LABEL: @_Z33test_svldnt1sw_gather_u64base_s64u10__SVBool_tu12__SVUint64_t(
26 // CPP-CHECK-NEXT: entry:
27 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
28 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
29 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
30 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
32 svint64_t
test_svldnt1sw_gather_u64base_s64(svbool_t pg
, svuint64_t bases
) {
33 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _s64
, )(pg
, bases
);
36 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_u64(
38 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
39 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
40 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
41 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
43 // CPP-CHECK-LABEL: @_Z33test_svldnt1sw_gather_u64base_u64u10__SVBool_tu12__SVUint64_t(
44 // CPP-CHECK-NEXT: entry:
45 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
46 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
47 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
48 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
50 svuint64_t
test_svldnt1sw_gather_u64base_u64(svbool_t pg
, svuint64_t bases
) {
51 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _u64
, )(pg
, bases
);
54 // CHECK-LABEL: @test_svldnt1sw_gather_s64offset_s64(
56 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
57 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
58 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
59 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
61 // CPP-CHECK-LABEL: @_Z35test_svldnt1sw_gather_s64offset_s64u10__SVBool_tPKiu11__SVInt64_t(
62 // CPP-CHECK-NEXT: entry:
63 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
64 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
65 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
66 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
68 svint64_t
test_svldnt1sw_gather_s64offset_s64(svbool_t pg
, const int32_t *base
, svint64_t offsets
) {
69 return SVE_ACLE_FUNC(svldnt1sw_gather_
, s64
, offset_s64
, )(pg
, base
, offsets
);
72 // CHECK-LABEL: @test_svldnt1sw_gather_s64offset_u64(
74 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
76 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
77 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
79 // CPP-CHECK-LABEL: @_Z35test_svldnt1sw_gather_s64offset_u64u10__SVBool_tPKiu11__SVInt64_t(
80 // CPP-CHECK-NEXT: entry:
81 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
82 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
83 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
86 svuint64_t
test_svldnt1sw_gather_s64offset_u64(svbool_t pg
, const int32_t *base
, svint64_t offsets
) {
87 return SVE_ACLE_FUNC(svldnt1sw_gather_
, s64
, offset_u64
, )(pg
, base
, offsets
);
90 // CHECK-LABEL: @test_svldnt1sw_gather_u64offset_s64(
92 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
93 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
94 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
95 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
97 // CPP-CHECK-LABEL: @_Z35test_svldnt1sw_gather_u64offset_s64u10__SVBool_tPKiu12__SVUint64_t(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
100 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
101 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
102 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
104 svint64_t
test_svldnt1sw_gather_u64offset_s64(svbool_t pg
, const int32_t *base
, svuint64_t offsets
) {
105 return SVE_ACLE_FUNC(svldnt1sw_gather_
, u64
, offset_s64
, )(pg
, base
, offsets
);
108 // CHECK-LABEL: @test_svldnt1sw_gather_u64offset_u64(
109 // CHECK-NEXT: entry:
110 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
111 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
112 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
113 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
115 // CPP-CHECK-LABEL: @_Z35test_svldnt1sw_gather_u64offset_u64u10__SVBool_tPKiu12__SVUint64_t(
116 // CPP-CHECK-NEXT: entry:
117 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
118 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
119 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
120 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
122 svuint64_t
test_svldnt1sw_gather_u64offset_u64(svbool_t pg
, const int32_t *base
, svuint64_t offsets
) {
123 return SVE_ACLE_FUNC(svldnt1sw_gather_
, u64
, offset_u64
, )(pg
, base
, offsets
);
126 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_offset_s64(
127 // CHECK-NEXT: entry:
128 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
129 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
130 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
131 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
133 // CPP-CHECK-LABEL: @_Z40test_svldnt1sw_gather_u64base_offset_s64u10__SVBool_tu12__SVUint64_tl(
134 // CPP-CHECK-NEXT: entry:
135 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
136 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
137 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
138 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
140 svint64_t
test_svldnt1sw_gather_u64base_offset_s64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
141 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _offset_s64
, )(pg
, bases
, offset
);
144 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_offset_u64(
145 // CHECK-NEXT: entry:
146 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
147 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
148 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
149 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
151 // CPP-CHECK-LABEL: @_Z40test_svldnt1sw_gather_u64base_offset_u64u10__SVBool_tu12__SVUint64_tl(
152 // CPP-CHECK-NEXT: entry:
153 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
154 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
155 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
156 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
158 svuint64_t
test_svldnt1sw_gather_u64base_offset_u64(svbool_t pg
, svuint64_t bases
, int64_t offset
) {
159 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _offset_u64
, )(pg
, bases
, offset
);
162 // CHECK-LABEL: @test_svldnt1sw_gather_s64index_s64(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
165 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
166 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
167 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
169 // CPP-CHECK-LABEL: @_Z34test_svldnt1sw_gather_s64index_s64u10__SVBool_tPKiu11__SVInt64_t(
170 // CPP-CHECK-NEXT: entry:
171 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
172 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
173 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
174 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
176 svint64_t
test_svldnt1sw_gather_s64index_s64(svbool_t pg
, const int32_t *base
, svint64_t indices
) {
177 return SVE_ACLE_FUNC(svldnt1sw_gather_
, s64
, index_s64
, )(pg
, base
, indices
);
180 // CHECK-LABEL: @test_svldnt1sw_gather_s64index_u64(
181 // CHECK-NEXT: entry:
182 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
184 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
185 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
187 // CPP-CHECK-LABEL: @_Z34test_svldnt1sw_gather_s64index_u64u10__SVBool_tPKiu11__SVInt64_t(
188 // CPP-CHECK-NEXT: entry:
189 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
190 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
191 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
192 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
194 svuint64_t
test_svldnt1sw_gather_s64index_u64(svbool_t pg
, const int32_t *base
, svint64_t indices
) {
195 return SVE_ACLE_FUNC(svldnt1sw_gather_
, s64
, index_u64
, )(pg
, base
, indices
);
198 // CHECK-LABEL: @test_svldnt1sw_gather_u64index_s64(
199 // CHECK-NEXT: entry:
200 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
201 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
202 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
203 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
205 // CPP-CHECK-LABEL: @_Z34test_svldnt1sw_gather_u64index_s64u10__SVBool_tPKiu12__SVUint64_t(
206 // CPP-CHECK-NEXT: entry:
207 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
208 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
209 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
210 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
212 svint64_t
test_svldnt1sw_gather_u64index_s64(svbool_t pg
, const int32_t *base
, svuint64_t indices
) {
213 return SVE_ACLE_FUNC(svldnt1sw_gather_
, u64
, index_s64
, )(pg
, base
, indices
);
216 // CHECK-LABEL: @test_svldnt1sw_gather_u64index_u64(
217 // CHECK-NEXT: entry:
218 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
219 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
220 // CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
221 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
223 // CPP-CHECK-LABEL: @_Z34test_svldnt1sw_gather_u64index_u64u10__SVBool_tPKiu12__SVUint64_t(
224 // CPP-CHECK-NEXT: entry:
225 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
226 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i32(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
227 // CPP-CHECK-NEXT: [[TMP2:%.*]] = sext <vscale x 2 x i32> [[TMP1]] to <vscale x 2 x i64>
228 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
230 svuint64_t
test_svldnt1sw_gather_u64index_u64(svbool_t pg
, const int32_t *base
, svuint64_t indices
) {
231 return SVE_ACLE_FUNC(svldnt1sw_gather_
, u64
, index_u64
, )(pg
, base
, indices
);
234 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_index_s64(
235 // CHECK-NEXT: entry:
236 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
237 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
238 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
239 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i32> [[TMP2]] to <vscale x 2 x i64>
240 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
242 // CPP-CHECK-LABEL: @_Z39test_svldnt1sw_gather_u64base_index_s64u10__SVBool_tu12__SVUint64_tl(
243 // CPP-CHECK-NEXT: entry:
244 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
245 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
246 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
247 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i32> [[TMP2]] to <vscale x 2 x i64>
248 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
250 svint64_t
test_svldnt1sw_gather_u64base_index_s64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
251 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _index_s64
, )(pg
, bases
, index
);
254 // CHECK-LABEL: @test_svldnt1sw_gather_u64base_index_u64(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
257 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
258 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
259 // CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i32> [[TMP2]] to <vscale x 2 x i64>
260 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
262 // CPP-CHECK-LABEL: @_Z39test_svldnt1sw_gather_u64base_index_u64u10__SVBool_tu12__SVUint64_tl(
263 // CPP-CHECK-NEXT: entry:
264 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
265 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
266 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i32.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
267 // CPP-CHECK-NEXT: [[TMP3:%.*]] = sext <vscale x 2 x i32> [[TMP2]] to <vscale x 2 x i64>
268 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP3]]
270 svuint64_t
test_svldnt1sw_gather_u64base_index_u64(svbool_t pg
, svuint64_t bases
, int64_t index
) {
271 return SVE_ACLE_FUNC(svldnt1sw_gather
, _u64base
, _index_u64
, )(pg
, bases
, index
);