MCAsmInfo: remove unused DwarfSectionSizeRequired
[llvm-project.git] / clang / test / CodeGen / AArch64 / sve-intrinsics / acle_sve_clastb.c
blobcaa5dd9381ab240cc1f4bdcbde2edfcd9865b8f8
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #include <arm_sve.h>
12 #if defined __ARM_FEATURE_SME
13 #define MODE_ATTR __arm_streaming
14 #else
15 #define MODE_ATTR
16 #endif
18 #ifdef SVE_OVERLOADED_FORMS
19 // A simple used,unused... macro, long enough to represent any SVE builtin.
20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
21 #else
22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
23 #endif
25 // CHECK-LABEL: @test_svclastb_s8(
26 // CHECK-NEXT: entry:
27 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
28 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
30 // CPP-CHECK-LABEL: @_Z16test_svclastb_s8u10__SVBool_tu10__SVInt8_tS0_(
31 // CPP-CHECK-NEXT: entry:
32 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
33 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
35 svint8_t test_svclastb_s8(svbool_t pg, svint8_t fallback, svint8_t data) MODE_ATTR
37 return SVE_ACLE_FUNC(svclastb,_s8,,)(pg, fallback, data);
40 // CHECK-LABEL: @test_svclastb_s16(
41 // CHECK-NEXT: entry:
42 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
43 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[FALLBACK:%.*]], <vscale x 8 x i16> [[DATA:%.*]])
44 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
46 // CPP-CHECK-LABEL: @_Z17test_svclastb_s16u10__SVBool_tu11__SVInt16_tS0_(
47 // CPP-CHECK-NEXT: entry:
48 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
49 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[FALLBACK:%.*]], <vscale x 8 x i16> [[DATA:%.*]])
50 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
52 svint16_t test_svclastb_s16(svbool_t pg, svint16_t fallback, svint16_t data) MODE_ATTR
54 return SVE_ACLE_FUNC(svclastb,_s16,,)(pg, fallback, data);
57 // CHECK-LABEL: @test_svclastb_s32(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
60 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[FALLBACK:%.*]], <vscale x 4 x i32> [[DATA:%.*]])
61 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
63 // CPP-CHECK-LABEL: @_Z17test_svclastb_s32u10__SVBool_tu11__SVInt32_tS0_(
64 // CPP-CHECK-NEXT: entry:
65 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
66 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[FALLBACK:%.*]], <vscale x 4 x i32> [[DATA:%.*]])
67 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
69 svint32_t test_svclastb_s32(svbool_t pg, svint32_t fallback, svint32_t data) MODE_ATTR
71 return SVE_ACLE_FUNC(svclastb,_s32,,)(pg, fallback, data);
74 // CHECK-LABEL: @test_svclastb_s64(
75 // CHECK-NEXT: entry:
76 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
77 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[FALLBACK:%.*]], <vscale x 2 x i64> [[DATA:%.*]])
78 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
80 // CPP-CHECK-LABEL: @_Z17test_svclastb_s64u10__SVBool_tu11__SVInt64_tS0_(
81 // CPP-CHECK-NEXT: entry:
82 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
83 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[FALLBACK:%.*]], <vscale x 2 x i64> [[DATA:%.*]])
84 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
86 svint64_t test_svclastb_s64(svbool_t pg, svint64_t fallback, svint64_t data) MODE_ATTR
88 return SVE_ACLE_FUNC(svclastb,_s64,,)(pg, fallback, data);
91 // CHECK-LABEL: @test_svclastb_u8(
92 // CHECK-NEXT: entry:
93 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
94 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
96 // CPP-CHECK-LABEL: @_Z16test_svclastb_u8u10__SVBool_tu11__SVUint8_tS0_(
97 // CPP-CHECK-NEXT: entry:
98 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.clastb.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], <vscale x 16 x i8> [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
99 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
101 svuint8_t test_svclastb_u8(svbool_t pg, svuint8_t fallback, svuint8_t data) MODE_ATTR
103 return SVE_ACLE_FUNC(svclastb,_u8,,)(pg, fallback, data);
106 // CHECK-LABEL: @test_svclastb_u16(
107 // CHECK-NEXT: entry:
108 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
109 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[FALLBACK:%.*]], <vscale x 8 x i16> [[DATA:%.*]])
110 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
112 // CPP-CHECK-LABEL: @_Z17test_svclastb_u16u10__SVBool_tu12__SVUint16_tS0_(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
115 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.clastb.nxv8i16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x i16> [[FALLBACK:%.*]], <vscale x 8 x i16> [[DATA:%.*]])
116 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP1]]
118 svuint16_t test_svclastb_u16(svbool_t pg, svuint16_t fallback, svuint16_t data) MODE_ATTR
120 return SVE_ACLE_FUNC(svclastb,_u16,,)(pg, fallback, data);
123 // CHECK-LABEL: @test_svclastb_u32(
124 // CHECK-NEXT: entry:
125 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
126 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[FALLBACK:%.*]], <vscale x 4 x i32> [[DATA:%.*]])
127 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
129 // CPP-CHECK-LABEL: @_Z17test_svclastb_u32u10__SVBool_tu12__SVUint32_tS0_(
130 // CPP-CHECK-NEXT: entry:
131 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
132 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.clastb.nxv4i32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x i32> [[FALLBACK:%.*]], <vscale x 4 x i32> [[DATA:%.*]])
133 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP1]]
135 svuint32_t test_svclastb_u32(svbool_t pg, svuint32_t fallback, svuint32_t data) MODE_ATTR
137 return SVE_ACLE_FUNC(svclastb,_u32,,)(pg, fallback, data);
140 // CHECK-LABEL: @test_svclastb_u64(
141 // CHECK-NEXT: entry:
142 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
143 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[FALLBACK:%.*]], <vscale x 2 x i64> [[DATA:%.*]])
144 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
146 // CPP-CHECK-LABEL: @_Z17test_svclastb_u64u10__SVBool_tu12__SVUint64_tS0_(
147 // CPP-CHECK-NEXT: entry:
148 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
149 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.clastb.nxv2i64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x i64> [[FALLBACK:%.*]], <vscale x 2 x i64> [[DATA:%.*]])
150 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP1]]
152 svuint64_t test_svclastb_u64(svbool_t pg, svuint64_t fallback, svuint64_t data) MODE_ATTR
154 return SVE_ACLE_FUNC(svclastb,_u64,,)(pg, fallback, data);
157 // CHECK-LABEL: @test_svclastb_f16(
158 // CHECK-NEXT: entry:
159 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
160 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.clastb.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[FALLBACK:%.*]], <vscale x 8 x half> [[DATA:%.*]])
161 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
163 // CPP-CHECK-LABEL: @_Z17test_svclastb_f16u10__SVBool_tu13__SVFloat16_tS0_(
164 // CPP-CHECK-NEXT: entry:
165 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
166 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.clastb.nxv8f16(<vscale x 8 x i1> [[TMP0]], <vscale x 8 x half> [[FALLBACK:%.*]], <vscale x 8 x half> [[DATA:%.*]])
167 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP1]]
169 svfloat16_t test_svclastb_f16(svbool_t pg, svfloat16_t fallback, svfloat16_t data) MODE_ATTR
171 return SVE_ACLE_FUNC(svclastb,_f16,,)(pg, fallback, data);
174 // CHECK-LABEL: @test_svclastb_f32(
175 // CHECK-NEXT: entry:
176 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
177 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.clastb.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[FALLBACK:%.*]], <vscale x 4 x float> [[DATA:%.*]])
178 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
180 // CPP-CHECK-LABEL: @_Z17test_svclastb_f32u10__SVBool_tu13__SVFloat32_tS0_(
181 // CPP-CHECK-NEXT: entry:
182 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
183 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.clastb.nxv4f32(<vscale x 4 x i1> [[TMP0]], <vscale x 4 x float> [[FALLBACK:%.*]], <vscale x 4 x float> [[DATA:%.*]])
184 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP1]]
186 svfloat32_t test_svclastb_f32(svbool_t pg, svfloat32_t fallback, svfloat32_t data) MODE_ATTR
188 return SVE_ACLE_FUNC(svclastb,_f32,,)(pg, fallback, data);
191 // CHECK-LABEL: @test_svclastb_f64(
192 // CHECK-NEXT: entry:
193 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
194 // CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.clastb.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[FALLBACK:%.*]], <vscale x 2 x double> [[DATA:%.*]])
195 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
197 // CPP-CHECK-LABEL: @_Z17test_svclastb_f64u10__SVBool_tu13__SVFloat64_tS0_(
198 // CPP-CHECK-NEXT: entry:
199 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
200 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.clastb.nxv2f64(<vscale x 2 x i1> [[TMP0]], <vscale x 2 x double> [[FALLBACK:%.*]], <vscale x 2 x double> [[DATA:%.*]])
201 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP1]]
203 svfloat64_t test_svclastb_f64(svbool_t pg, svfloat64_t fallback, svfloat64_t data) MODE_ATTR
205 return SVE_ACLE_FUNC(svclastb,_f64,,)(pg, fallback, data);
208 // CHECK-LABEL: @test_svclastb_n_s8(
209 // CHECK-NEXT: entry:
210 // CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], i8 [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
211 // CHECK-NEXT: ret i8 [[TMP0]]
213 // CPP-CHECK-LABEL: @_Z18test_svclastb_n_s8u10__SVBool_tau10__SVInt8_t(
214 // CPP-CHECK-NEXT: entry:
215 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], i8 [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
216 // CPP-CHECK-NEXT: ret i8 [[TMP0]]
218 int8_t test_svclastb_n_s8(svbool_t pg, int8_t fallback, svint8_t data) MODE_ATTR
220 return SVE_ACLE_FUNC(svclastb,_n_s8,,)(pg, fallback, data);
223 // CHECK-LABEL: @test_svclastb_n_s16(
224 // CHECK-NEXT: entry:
225 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
226 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[FALLBACK:%.*]] to half
227 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x half>
228 // CHECK-NEXT: [[TMP3:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[TMP1]], <vscale x 8 x half> [[TMP2]])
229 // CHECK-NEXT: [[TMP4:%.*]] = bitcast half [[TMP3]] to i16
230 // CHECK-NEXT: ret i16 [[TMP4]]
232 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_s16u10__SVBool_tsu11__SVInt16_t(
233 // CPP-CHECK-NEXT: entry:
234 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
235 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[FALLBACK:%.*]] to half
236 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x half>
237 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[TMP1]], <vscale x 8 x half> [[TMP2]])
238 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast half [[TMP3]] to i16
239 // CPP-CHECK-NEXT: ret i16 [[TMP4]]
241 int16_t test_svclastb_n_s16(svbool_t pg, int16_t fallback, svint16_t data) MODE_ATTR
243 return SVE_ACLE_FUNC(svclastb,_n_s16,,)(pg, fallback, data);
246 // CHECK-LABEL: @test_svclastb_n_s32(
247 // CHECK-NEXT: entry:
248 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
249 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[FALLBACK:%.*]] to float
250 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x float>
251 // CHECK-NEXT: [[TMP3:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[TMP1]], <vscale x 4 x float> [[TMP2]])
252 // CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[TMP3]] to i32
253 // CHECK-NEXT: ret i32 [[TMP4]]
255 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_s32u10__SVBool_tiu11__SVInt32_t(
256 // CPP-CHECK-NEXT: entry:
257 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
258 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[FALLBACK:%.*]] to float
259 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x float>
260 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[TMP1]], <vscale x 4 x float> [[TMP2]])
261 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[TMP3]] to i32
262 // CPP-CHECK-NEXT: ret i32 [[TMP4]]
264 int32_t test_svclastb_n_s32(svbool_t pg, int32_t fallback, svint32_t data) MODE_ATTR
266 return SVE_ACLE_FUNC(svclastb,_n_s32,,)(pg, fallback, data);
269 // CHECK-LABEL: @test_svclastb_n_s64(
270 // CHECK-NEXT: entry:
271 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
272 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[FALLBACK:%.*]] to double
273 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x double>
274 // CHECK-NEXT: [[TMP3:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[TMP1]], <vscale x 2 x double> [[TMP2]])
275 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
276 // CHECK-NEXT: ret i64 [[TMP4]]
278 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_s64u10__SVBool_tlu11__SVInt64_t(
279 // CPP-CHECK-NEXT: entry:
280 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
281 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[FALLBACK:%.*]] to double
282 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x double>
283 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[TMP1]], <vscale x 2 x double> [[TMP2]])
284 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
285 // CPP-CHECK-NEXT: ret i64 [[TMP4]]
287 int64_t test_svclastb_n_s64(svbool_t pg, int64_t fallback, svint64_t data) MODE_ATTR
289 return SVE_ACLE_FUNC(svclastb,_n_s64,,)(pg, fallback, data);
292 // CHECK-LABEL: @test_svclastb_n_u8(
293 // CHECK-NEXT: entry:
294 // CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], i8 [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
295 // CHECK-NEXT: ret i8 [[TMP0]]
297 // CPP-CHECK-LABEL: @_Z18test_svclastb_n_u8u10__SVBool_thu11__SVUint8_t(
298 // CPP-CHECK-NEXT: entry:
299 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i8 @llvm.aarch64.sve.clastb.n.nxv16i8(<vscale x 16 x i1> [[PG:%.*]], i8 [[FALLBACK:%.*]], <vscale x 16 x i8> [[DATA:%.*]])
300 // CPP-CHECK-NEXT: ret i8 [[TMP0]]
302 uint8_t test_svclastb_n_u8(svbool_t pg, uint8_t fallback, svuint8_t data) MODE_ATTR
304 return SVE_ACLE_FUNC(svclastb,_n_u8,,)(pg, fallback, data);
307 // CHECK-LABEL: @test_svclastb_n_u16(
308 // CHECK-NEXT: entry:
309 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
310 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[FALLBACK:%.*]] to half
311 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x half>
312 // CHECK-NEXT: [[TMP3:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[TMP1]], <vscale x 8 x half> [[TMP2]])
313 // CHECK-NEXT: [[TMP4:%.*]] = bitcast half [[TMP3]] to i16
314 // CHECK-NEXT: ret i16 [[TMP4]]
316 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_u16u10__SVBool_ttu12__SVUint16_t(
317 // CPP-CHECK-NEXT: entry:
318 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
319 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i16 [[FALLBACK:%.*]] to half
320 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 8 x i16> [[DATA:%.*]] to <vscale x 8 x half>
321 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[TMP1]], <vscale x 8 x half> [[TMP2]])
322 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast half [[TMP3]] to i16
323 // CPP-CHECK-NEXT: ret i16 [[TMP4]]
325 uint16_t test_svclastb_n_u16(svbool_t pg, uint16_t fallback, svuint16_t data) MODE_ATTR
327 return SVE_ACLE_FUNC(svclastb,_n_u16,,)(pg, fallback, data);
330 // CHECK-LABEL: @test_svclastb_n_u32(
331 // CHECK-NEXT: entry:
332 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
333 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[FALLBACK:%.*]] to float
334 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x float>
335 // CHECK-NEXT: [[TMP3:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[TMP1]], <vscale x 4 x float> [[TMP2]])
336 // CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[TMP3]] to i32
337 // CHECK-NEXT: ret i32 [[TMP4]]
339 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_u32u10__SVBool_tju12__SVUint32_t(
340 // CPP-CHECK-NEXT: entry:
341 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
342 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i32 [[FALLBACK:%.*]] to float
343 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 4 x i32> [[DATA:%.*]] to <vscale x 4 x float>
344 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[TMP1]], <vscale x 4 x float> [[TMP2]])
345 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast float [[TMP3]] to i32
346 // CPP-CHECK-NEXT: ret i32 [[TMP4]]
348 uint32_t test_svclastb_n_u32(svbool_t pg, uint32_t fallback, svuint32_t data) MODE_ATTR
350 return SVE_ACLE_FUNC(svclastb,_n_u32,,)(pg, fallback, data);
353 // CHECK-LABEL: @test_svclastb_n_u64(
354 // CHECK-NEXT: entry:
355 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
356 // CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[FALLBACK:%.*]] to double
357 // CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x double>
358 // CHECK-NEXT: [[TMP3:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[TMP1]], <vscale x 2 x double> [[TMP2]])
359 // CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
360 // CHECK-NEXT: ret i64 [[TMP4]]
362 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_u64u10__SVBool_tmu12__SVUint64_t(
363 // CPP-CHECK-NEXT: entry:
364 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
365 // CPP-CHECK-NEXT: [[TMP1:%.*]] = bitcast i64 [[FALLBACK:%.*]] to double
366 // CPP-CHECK-NEXT: [[TMP2:%.*]] = bitcast <vscale x 2 x i64> [[DATA:%.*]] to <vscale x 2 x double>
367 // CPP-CHECK-NEXT: [[TMP3:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[TMP1]], <vscale x 2 x double> [[TMP2]])
368 // CPP-CHECK-NEXT: [[TMP4:%.*]] = bitcast double [[TMP3]] to i64
369 // CPP-CHECK-NEXT: ret i64 [[TMP4]]
371 uint64_t test_svclastb_n_u64(svbool_t pg, uint64_t fallback, svuint64_t data) MODE_ATTR
373 return SVE_ACLE_FUNC(svclastb,_n_u64,,)(pg, fallback, data);
376 // CHECK-LABEL: @test_svclastb_n_f16(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
379 // CHECK-NEXT: [[TMP1:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[FALLBACK:%.*]], <vscale x 8 x half> [[DATA:%.*]])
380 // CHECK-NEXT: ret half [[TMP1]]
382 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_f16u10__SVBool_tDhu13__SVFloat16_t(
383 // CPP-CHECK-NEXT: entry:
384 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv8i1(<vscale x 16 x i1> [[PG:%.*]])
385 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call half @llvm.aarch64.sve.clastb.n.nxv8f16(<vscale x 8 x i1> [[TMP0]], half [[FALLBACK:%.*]], <vscale x 8 x half> [[DATA:%.*]])
386 // CPP-CHECK-NEXT: ret half [[TMP1]]
388 float16_t test_svclastb_n_f16(svbool_t pg, float16_t fallback, svfloat16_t data) MODE_ATTR
390 return SVE_ACLE_FUNC(svclastb,_n_f16,,)(pg, fallback, data);
393 // CHECK-LABEL: @test_svclastb_n_f32(
394 // CHECK-NEXT: entry:
395 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
396 // CHECK-NEXT: [[TMP1:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[FALLBACK:%.*]], <vscale x 4 x float> [[DATA:%.*]])
397 // CHECK-NEXT: ret float [[TMP1]]
399 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_f32u10__SVBool_tfu13__SVFloat32_t(
400 // CPP-CHECK-NEXT: entry:
401 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv4i1(<vscale x 16 x i1> [[PG:%.*]])
402 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call float @llvm.aarch64.sve.clastb.n.nxv4f32(<vscale x 4 x i1> [[TMP0]], float [[FALLBACK:%.*]], <vscale x 4 x float> [[DATA:%.*]])
403 // CPP-CHECK-NEXT: ret float [[TMP1]]
405 float32_t test_svclastb_n_f32(svbool_t pg, float32_t fallback, svfloat32_t data) MODE_ATTR
407 return SVE_ACLE_FUNC(svclastb,_n_f32,,)(pg, fallback, data);
410 // CHECK-LABEL: @test_svclastb_n_f64(
411 // CHECK-NEXT: entry:
412 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
413 // CHECK-NEXT: [[TMP1:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[FALLBACK:%.*]], <vscale x 2 x double> [[DATA:%.*]])
414 // CHECK-NEXT: ret double [[TMP1]]
416 // CPP-CHECK-LABEL: @_Z19test_svclastb_n_f64u10__SVBool_tdu13__SVFloat64_t(
417 // CPP-CHECK-NEXT: entry:
418 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i1> @llvm.aarch64.sve.convert.from.svbool.nxv2i1(<vscale x 16 x i1> [[PG:%.*]])
419 // CPP-CHECK-NEXT: [[TMP1:%.*]] = tail call double @llvm.aarch64.sve.clastb.n.nxv2f64(<vscale x 2 x i1> [[TMP0]], double [[FALLBACK:%.*]], <vscale x 2 x double> [[DATA:%.*]])
420 // CPP-CHECK-NEXT: ret double [[TMP1]]
422 float64_t test_svclastb_n_f64(svbool_t pg, float64_t fallback, svfloat64_t data) MODE_ATTR
424 return SVE_ACLE_FUNC(svclastb,_n_f64,,)(pg, fallback, data);