[Flang] remove whole-archive option for AIX linker (#76039)
[llvm-project.git] / clang / test / CodeGen / aarch64-sve2-intrinsics / acle_sve2_ldnt1.c
blobe02d1a77ec6f559118e613e22d03524efdd7b6e1
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
4 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - %s | FileCheck %s
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve2 -S -O1 -Werror -Wall -emit-llvm -o - -x c++ %s | FileCheck %s -check-prefix=CPP-CHECK
7 // REQUIRES: aarch64-registered-target
9 #include <arm_sve.h>
11 #ifdef SVE_OVERLOADED_FORMS
12 // A simple used,unused... macro, long enough to represent any SVE builtin.
13 #define SVE_ACLE_FUNC(A1, A2_UNUSED, A3, A4_UNUSED) A1##A3
14 #else
15 #define SVE_ACLE_FUNC(A1, A2, A3, A4) A1##A2##A3##A4
16 #endif
18 // CHECK-LABEL: @test_svldnt1_gather_u32base_s32(
19 // CHECK-NEXT: entry:
20 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
21 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
22 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
24 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u32base_s32u10__SVBool_tu12__SVUint32_t(
25 // CPP-CHECK-NEXT: entry:
26 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
27 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
28 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
30 svint32_t test_svldnt1_gather_u32base_s32(svbool_t pg, svuint32_t bases) {
31 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _s32, )(pg, bases);
34 // CHECK-LABEL: @test_svldnt1_gather_u64base_s64(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
37 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
38 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
40 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u64base_s64u10__SVBool_tu12__SVUint64_t(
41 // CPP-CHECK-NEXT: entry:
42 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
43 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
44 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
46 svint64_t test_svldnt1_gather_u64base_s64(svbool_t pg, svuint64_t bases) {
47 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _s64, )(pg, bases);
50 // CHECK-LABEL: @test_svldnt1_gather_u32base_u32(
51 // CHECK-NEXT: entry:
52 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
53 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
54 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
56 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u32base_u32u10__SVBool_tu12__SVUint32_t(
57 // CPP-CHECK-NEXT: entry:
58 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
59 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
60 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
62 svuint32_t test_svldnt1_gather_u32base_u32(svbool_t pg, svuint32_t bases) {
63 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _u32, )(pg, bases);
66 // CHECK-LABEL: @test_svldnt1_gather_u64base_u64(
67 // CHECK-NEXT: entry:
68 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
69 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
70 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
72 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u64base_u64u10__SVBool_tu12__SVUint64_t(
73 // CPP-CHECK-NEXT: entry:
74 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
75 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
76 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
78 svuint64_t test_svldnt1_gather_u64base_u64(svbool_t pg, svuint64_t bases) {
79 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _u64, )(pg, bases);
82 // CHECK-LABEL: @test_svldnt1_gather_u32base_f32(
83 // CHECK-NEXT: entry:
84 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
85 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
86 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
88 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u32base_f32u10__SVBool_tu12__SVUint32_t(
89 // CPP-CHECK-NEXT: entry:
90 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
91 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 0)
92 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
94 svfloat32_t test_svldnt1_gather_u32base_f32(svbool_t pg, svuint32_t bases) {
95 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _f32, )(pg, bases);
98 // CHECK-LABEL: @test_svldnt1_gather_u64base_f64(
99 // CHECK-NEXT: entry:
100 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
101 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
102 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
104 // CPP-CHECK-LABEL: @_Z31test_svldnt1_gather_u64base_f64u10__SVBool_tu12__SVUint64_t(
105 // CPP-CHECK-NEXT: entry:
106 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
107 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 0)
108 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
110 svfloat64_t test_svldnt1_gather_u64base_f64(svbool_t pg, svuint64_t bases) {
111 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _f64, )(pg, bases);
114 // CHECK-LABEL: @test_svldnt1_gather_s64offset_s64(
115 // CHECK-NEXT: entry:
116 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
117 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
118 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
120 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_s64offset_s64u10__SVBool_tPKlu11__SVInt64_t(
121 // CPP-CHECK-NEXT: entry:
122 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
123 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
124 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
126 svint64_t test_svldnt1_gather_s64offset_s64(svbool_t pg, const int64_t *base, svint64_t offsets) {
127 return SVE_ACLE_FUNC(svldnt1_gather_, s64, offset, _s64)(pg, base, offsets);
130 // CHECK-LABEL: @test_svldnt1_gather_s64offset_u64(
131 // CHECK-NEXT: entry:
132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
133 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
134 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
136 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_s64offset_u64u10__SVBool_tPKmu11__SVInt64_t(
137 // CPP-CHECK-NEXT: entry:
138 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
139 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
140 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
142 svuint64_t test_svldnt1_gather_s64offset_u64(svbool_t pg, const uint64_t *base, svint64_t offsets) {
143 return SVE_ACLE_FUNC(svldnt1_gather_, s64, offset, _u64)(pg, base, offsets);
146 // CHECK-LABEL: @test_svldnt1_gather_s64offset_f64(
147 // CHECK-NEXT: entry:
148 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
149 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
150 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
152 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_s64offset_f64u10__SVBool_tPKdu11__SVInt64_t(
153 // CPP-CHECK-NEXT: entry:
154 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
155 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
156 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
158 svfloat64_t test_svldnt1_gather_s64offset_f64(svbool_t pg, const float64_t *base, svint64_t offsets) {
159 return SVE_ACLE_FUNC(svldnt1_gather_, s64, offset, _f64)(pg, base, offsets);
162 // CHECK-LABEL: @test_svldnt1_gather_u32offset_s32(
163 // CHECK-NEXT: entry:
164 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
165 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
166 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
168 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u32offset_s32u10__SVBool_tPKiu12__SVUint32_t(
169 // CPP-CHECK-NEXT: entry:
170 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
171 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
172 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
174 svint32_t test_svldnt1_gather_u32offset_s32(svbool_t pg, const int32_t *base, svuint32_t offsets) {
175 return SVE_ACLE_FUNC(svldnt1_gather_, u32, offset, _s32)(pg, base, offsets);
178 // CHECK-LABEL: @test_svldnt1_gather_u64offset_s64(
179 // CHECK-NEXT: entry:
180 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
181 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
182 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
184 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u64offset_s64u10__SVBool_tPKlu12__SVUint64_t(
185 // CPP-CHECK-NEXT: entry:
186 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
187 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
188 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
190 svint64_t test_svldnt1_gather_u64offset_s64(svbool_t pg, const int64_t *base, svuint64_t offsets) {
191 return SVE_ACLE_FUNC(svldnt1_gather_, u64, offset, _s64)(pg, base, offsets);
194 // CHECK-LABEL: @test_svldnt1_gather_u32offset_u32(
195 // CHECK-NEXT: entry:
196 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
197 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
198 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
200 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u32offset_u32u10__SVBool_tPKju12__SVUint32_t(
201 // CPP-CHECK-NEXT: entry:
202 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
203 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4i32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
204 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
206 svuint32_t test_svldnt1_gather_u32offset_u32(svbool_t pg, const uint32_t *base, svuint32_t offsets) {
207 return SVE_ACLE_FUNC(svldnt1_gather_, u32, offset, _u32)(pg, base, offsets);
210 // CHECK-LABEL: @test_svldnt1_gather_u64offset_u64(
211 // CHECK-NEXT: entry:
212 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
213 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
214 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
216 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u64offset_u64u10__SVBool_tPKmu12__SVUint64_t(
217 // CPP-CHECK-NEXT: entry:
218 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
219 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
220 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
222 svuint64_t test_svldnt1_gather_u64offset_u64(svbool_t pg, const uint64_t *base, svuint64_t offsets) {
223 return SVE_ACLE_FUNC(svldnt1_gather_, u64, offset, _u64)(pg, base, offsets);
226 // CHECK-LABEL: @test_svldnt1_gather_u32offset_f32(
227 // CHECK-NEXT: entry:
228 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
229 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
230 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
232 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u32offset_f32u10__SVBool_tPKfu12__SVUint32_t(
233 // CPP-CHECK-NEXT: entry:
234 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
235 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.uxtw.nxv4f32(<vscale x 4 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 4 x i32> [[OFFSETS:%.*]])
236 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
238 svfloat32_t test_svldnt1_gather_u32offset_f32(svbool_t pg, const float32_t *base, svuint32_t offsets) {
239 return SVE_ACLE_FUNC(svldnt1_gather_, u32, offset, _f32)(pg, base, offsets);
242 // CHECK-LABEL: @test_svldnt1_gather_u64offset_f64(
243 // CHECK-NEXT: entry:
244 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
245 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
246 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
248 // CPP-CHECK-LABEL: @_Z33test_svldnt1_gather_u64offset_f64u10__SVBool_tPKdu12__SVUint64_t(
249 // CPP-CHECK-NEXT: entry:
250 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
251 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[OFFSETS:%.*]])
252 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
254 svfloat64_t test_svldnt1_gather_u64offset_f64(svbool_t pg, const float64_t *base, svuint64_t offsets) {
255 return SVE_ACLE_FUNC(svldnt1_gather_, u64, offset, _f64)(pg, base, offsets);
258 // CHECK-LABEL: @test_svldnt1_gather_u32base_offset_s32(
259 // CHECK-NEXT: entry:
260 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
261 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
262 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
264 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u32base_offset_s32u10__SVBool_tu12__SVUint32_tl(
265 // CPP-CHECK-NEXT: entry:
266 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
267 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
268 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
270 svint32_t test_svldnt1_gather_u32base_offset_s32(svbool_t pg, svuint32_t bases, int64_t offset) {
271 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _offset_s32, )(pg, bases, offset);
274 // CHECK-LABEL: @test_svldnt1_gather_u64base_offset_s64(
275 // CHECK-NEXT: entry:
276 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
277 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
278 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
280 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u64base_offset_s64u10__SVBool_tu12__SVUint64_tl(
281 // CPP-CHECK-NEXT: entry:
282 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
283 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
284 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
286 svint64_t test_svldnt1_gather_u64base_offset_s64(svbool_t pg, svuint64_t bases, int64_t offset) {
287 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _offset_s64, )(pg, bases, offset);
290 // CHECK-LABEL: @test_svldnt1_gather_u32base_offset_u32(
291 // CHECK-NEXT: entry:
292 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
293 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
294 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
296 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u32base_offset_u32u10__SVBool_tu12__SVUint32_tl(
297 // CPP-CHECK-NEXT: entry:
298 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
299 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
300 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
302 svuint32_t test_svldnt1_gather_u32base_offset_u32(svbool_t pg, svuint32_t bases, int64_t offset) {
303 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _offset_u32, )(pg, bases, offset);
306 // CHECK-LABEL: @test_svldnt1_gather_u64base_offset_u64(
307 // CHECK-NEXT: entry:
308 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
309 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
310 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
312 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u64base_offset_u64u10__SVBool_tu12__SVUint64_tl(
313 // CPP-CHECK-NEXT: entry:
314 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
315 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
316 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
318 svuint64_t test_svldnt1_gather_u64base_offset_u64(svbool_t pg, svuint64_t bases, int64_t offset) {
319 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _offset_u64, )(pg, bases, offset);
322 // CHECK-LABEL: @test_svldnt1_gather_u32base_offset_f32(
323 // CHECK-NEXT: entry:
324 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
325 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
326 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
328 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u32base_offset_f32u10__SVBool_tu12__SVUint32_tl(
329 // CPP-CHECK-NEXT: entry:
330 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
331 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[OFFSET:%.*]])
332 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
334 svfloat32_t test_svldnt1_gather_u32base_offset_f32(svbool_t pg, svuint32_t bases, int64_t offset) {
335 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _offset_f32, )(pg, bases, offset);
338 // CHECK-LABEL: @test_svldnt1_gather_u64base_offset_f64(
339 // CHECK-NEXT: entry:
340 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
341 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
342 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
344 // CPP-CHECK-LABEL: @_Z38test_svldnt1_gather_u64base_offset_f64u10__SVBool_tu12__SVUint64_tl(
345 // CPP-CHECK-NEXT: entry:
346 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
347 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[OFFSET:%.*]])
348 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
350 svfloat64_t test_svldnt1_gather_u64base_offset_f64(svbool_t pg, svuint64_t bases, int64_t offset) {
351 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _offset_f64, )(pg, bases, offset);
354 // CHECK-LABEL: @test_svldnt1_gather_s64index_s64(
355 // CHECK-NEXT: entry:
356 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
357 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
358 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
360 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_s64index_s64u10__SVBool_tPKlu11__SVInt64_t(
361 // CPP-CHECK-NEXT: entry:
362 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
363 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
364 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
366 svint64_t test_svldnt1_gather_s64index_s64(svbool_t pg, const int64_t *base, svint64_t indices) {
367 return SVE_ACLE_FUNC(svldnt1_gather_, s64, index, _s64)(pg, base, indices);
370 // CHECK-LABEL: @test_svldnt1_gather_s64index_u64(
371 // CHECK-NEXT: entry:
372 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
373 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
374 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
376 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_s64index_u64u10__SVBool_tPKmu11__SVInt64_t(
377 // CPP-CHECK-NEXT: entry:
378 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
379 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
380 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
382 svuint64_t test_svldnt1_gather_s64index_u64(svbool_t pg, const uint64_t *base, svint64_t indices) {
383 return SVE_ACLE_FUNC(svldnt1_gather_, s64, index, _u64)(pg, base, indices);
386 // CHECK-LABEL: @test_svldnt1_gather_s64index_f64(
387 // CHECK-NEXT: entry:
388 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
389 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
390 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
392 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_s64index_f64u10__SVBool_tPKdu11__SVInt64_t(
393 // CPP-CHECK-NEXT: entry:
394 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
395 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
396 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
398 svfloat64_t test_svldnt1_gather_s64index_f64(svbool_t pg, const float64_t *base, svint64_t indices) {
399 return SVE_ACLE_FUNC(svldnt1_gather_, s64, index, _f64)(pg, base, indices);
402 // CHECK-LABEL: @test_svldnt1_gather_u64index_s64(
403 // CHECK-NEXT: entry:
404 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
405 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
406 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
408 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_u64index_s64u10__SVBool_tPKlu12__SVUint64_t(
409 // CPP-CHECK-NEXT: entry:
410 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
411 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
412 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
414 svint64_t test_svldnt1_gather_u64index_s64(svbool_t pg, const int64_t *base, svuint64_t indices) {
415 return SVE_ACLE_FUNC(svldnt1_gather_, u64, index, _s64)(pg, base, indices);
418 // CHECK-LABEL: @test_svldnt1_gather_u64index_u64(
419 // CHECK-NEXT: entry:
420 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
421 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
422 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
424 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_u64index_u64u10__SVBool_tPKmu12__SVUint64_t(
425 // CPP-CHECK-NEXT: entry:
426 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
427 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.index.nxv2i64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
428 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
430 svuint64_t test_svldnt1_gather_u64index_u64(svbool_t pg, const uint64_t *base, svuint64_t indices) {
431 return SVE_ACLE_FUNC(svldnt1_gather_, u64, index, _u64)(pg, base, indices);
434 // CHECK-LABEL: @test_svldnt1_gather_u64index_f64(
435 // CHECK-NEXT: entry:
436 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
437 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
438 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
440 // CPP-CHECK-LABEL: @_Z32test_svldnt1_gather_u64index_f64u10__SVBool_tPKdu12__SVUint64_t(
441 // CPP-CHECK-NEXT: entry:
442 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
443 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.index.nxv2f64(<vscale x 2 x i1> [[TMP0]], ptr [[BASE:%.*]], <vscale x 2 x i64> [[INDICES:%.*]])
444 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
446 svfloat64_t test_svldnt1_gather_u64index_f64(svbool_t pg, const float64_t *base, svuint64_t indices) {
447 return SVE_ACLE_FUNC(svldnt1_gather_, u64, index, _f64)(pg, base, indices);
450 // CHECK-LABEL: @test_svldnt1_gather_u32base_index_s32(
451 // CHECK-NEXT: entry:
452 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
453 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
454 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
455 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
457 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u32base_index_s32u10__SVBool_tu12__SVUint32_tl(
458 // CPP-CHECK-NEXT: entry:
459 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
460 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
461 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
462 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
464 svint32_t test_svldnt1_gather_u32base_index_s32(svbool_t pg, svuint32_t bases, int64_t index) {
465 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _index_s32, )(pg, bases, index);
468 // CHECK-LABEL: @test_svldnt1_gather_u64base_index_s64(
469 // CHECK-NEXT: entry:
470 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
471 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
472 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
473 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
475 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u64base_index_s64u10__SVBool_tu12__SVUint64_tl(
476 // CPP-CHECK-NEXT: entry:
477 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
478 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
479 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
480 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
482 svint64_t test_svldnt1_gather_u64base_index_s64(svbool_t pg, svuint64_t bases, int64_t index) {
483 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _index_s64, )(pg, bases, index);
486 // CHECK-LABEL: @test_svldnt1_gather_u32base_index_u32(
487 // CHECK-NEXT: entry:
488 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
489 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
490 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
491 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
493 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u32base_index_u32u10__SVBool_tu12__SVUint32_tl(
494 // CPP-CHECK-NEXT: entry:
495 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
496 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
497 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4i32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
498 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP2]]
500 svuint32_t test_svldnt1_gather_u32base_index_u32(svbool_t pg, svuint32_t bases, int64_t index) {
501 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _index_u32, )(pg, bases, index);
504 // CHECK-LABEL: @test_svldnt1_gather_u64base_index_u64(
505 // CHECK-NEXT: entry:
506 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
507 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
508 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
509 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
511 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u64base_index_u64u10__SVBool_tu12__SVUint64_tl(
512 // CPP-CHECK-NEXT: entry:
513 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
514 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
515 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2i64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
516 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP2]]
518 svuint64_t test_svldnt1_gather_u64base_index_u64(svbool_t pg, svuint64_t bases, int64_t index) {
519 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _index_u64, )(pg, bases, index);
522 // CHECK-LABEL: @test_svldnt1_gather_u32base_index_f32(
523 // CHECK-NEXT: entry:
524 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
525 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
526 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
527 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP2]]
529 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u32base_index_f32u10__SVBool_tu12__SVUint32_tl(
530 // CPP-CHECK-NEXT: entry:
531 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
532 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 2
533 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv4f32.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[BASES:%.*]], i64 [[TMP1]])
534 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP2]]
536 svfloat32_t test_svldnt1_gather_u32base_index_f32(svbool_t pg, svuint32_t bases, int64_t index) {
537 return SVE_ACLE_FUNC(svldnt1_gather, _u32base, _index_f32, )(pg, bases, index);
540 // CHECK-LABEL: @test_svldnt1_gather_u64base_index_f64(
541 // CHECK-NEXT: entry:
542 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
543 // CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
544 // CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
545 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
547 // CPP-CHECK-LABEL: @_Z37test_svldnt1_gather_u64base_index_f64u10__SVBool_tu12__SVUint64_tl(
548 // CPP-CHECK-NEXT: entry:
549 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
550 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl i64 [[INDEX:%.*]], 3
551 // CPP-CHECK-NEXT: [[TMP2:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ldnt1.gather.scalar.offset.nxv2f64.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[BASES:%.*]], i64 [[TMP1]])
552 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP2]]
554 svfloat64_t test_svldnt1_gather_u64base_index_f64(svbool_t pg, svuint64_t bases, int64_t index) {
555 return SVE_ACLE_FUNC(svldnt1_gather, _u64base, _index_f64, )(pg, bases, index);