1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svext_s8(
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 0)
20 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
22 // CPP-CHECK-LABEL: @_Z13test_svext_s8u10__SVInt8_tS_(
23 // CPP-CHECK-NEXT: entry:
24 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 0)
25 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
27 svint8_t
test_svext_s8(svint8_t op1
, svint8_t op2
)
29 return SVE_ACLE_FUNC(svext
,_s8
,,)(op1
, op2
, 0);
32 // CHECK-LABEL: @test_svext_s8_1(
34 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 255)
35 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
37 // CPP-CHECK-LABEL: @_Z15test_svext_s8_1u10__SVInt8_tS_(
38 // CPP-CHECK-NEXT: entry:
39 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 255)
40 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
42 svint8_t
test_svext_s8_1(svint8_t op1
, svint8_t op2
)
44 return SVE_ACLE_FUNC(svext
,_s8
,,)(op1
, op2
, 255);
47 // CHECK-LABEL: @test_svext_s16(
49 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 0)
50 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
52 // CPP-CHECK-LABEL: @_Z14test_svext_s16u11__SVInt16_tS_(
53 // CPP-CHECK-NEXT: entry:
54 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 0)
55 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
57 svint16_t
test_svext_s16(svint16_t op1
, svint16_t op2
)
59 return SVE_ACLE_FUNC(svext
,_s16
,,)(op1
, op2
, 0);
62 // CHECK-LABEL: @test_svext_s16_1(
64 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 127)
65 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
67 // CPP-CHECK-LABEL: @_Z16test_svext_s16_1u11__SVInt16_tS_(
68 // CPP-CHECK-NEXT: entry:
69 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 127)
70 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
72 svint16_t
test_svext_s16_1(svint16_t op1
, svint16_t op2
)
74 return SVE_ACLE_FUNC(svext
,_s16
,,)(op1
, op2
, 127);
77 // CHECK-LABEL: @test_svext_s32(
79 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 0)
80 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
82 // CPP-CHECK-LABEL: @_Z14test_svext_s32u11__SVInt32_tS_(
83 // CPP-CHECK-NEXT: entry:
84 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 0)
85 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
87 svint32_t
test_svext_s32(svint32_t op1
, svint32_t op2
)
89 return SVE_ACLE_FUNC(svext
,_s32
,,)(op1
, op2
, 0);
92 // CHECK-LABEL: @test_svext_s32_1(
94 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 63)
95 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
97 // CPP-CHECK-LABEL: @_Z16test_svext_s32_1u11__SVInt32_tS_(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 63)
100 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
102 svint32_t
test_svext_s32_1(svint32_t op1
, svint32_t op2
)
104 return SVE_ACLE_FUNC(svext
,_s32
,,)(op1
, op2
, 63);
107 // CHECK-LABEL: @test_svext_s64(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 0)
110 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
112 // CPP-CHECK-LABEL: @_Z14test_svext_s64u11__SVInt64_tS_(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 0)
115 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
117 svint64_t
test_svext_s64(svint64_t op1
, svint64_t op2
)
119 return SVE_ACLE_FUNC(svext
,_s64
,,)(op1
, op2
, 0);
122 // CHECK-LABEL: @test_svext_s64_1(
123 // CHECK-NEXT: entry:
124 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 31)
125 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
127 // CPP-CHECK-LABEL: @_Z16test_svext_s64_1u11__SVInt64_tS_(
128 // CPP-CHECK-NEXT: entry:
129 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 31)
130 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
132 svint64_t
test_svext_s64_1(svint64_t op1
, svint64_t op2
)
134 return SVE_ACLE_FUNC(svext
,_s64
,,)(op1
, op2
, 31);
137 // CHECK-LABEL: @test_svext_u8(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 255)
140 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
142 // CPP-CHECK-LABEL: @_Z13test_svext_u8u11__SVUint8_tS_(
143 // CPP-CHECK-NEXT: entry:
144 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.ext.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i32 255)
145 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
147 svuint8_t
test_svext_u8(svuint8_t op1
, svuint8_t op2
)
149 return SVE_ACLE_FUNC(svext
,_u8
,,)(op1
, op2
, 255);
152 // CHECK-LABEL: @test_svext_u16(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 127)
155 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
157 // CPP-CHECK-LABEL: @_Z14test_svext_u16u12__SVUint16_tS_(
158 // CPP-CHECK-NEXT: entry:
159 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.ext.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i32 127)
160 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
162 svuint16_t
test_svext_u16(svuint16_t op1
, svuint16_t op2
)
164 return SVE_ACLE_FUNC(svext
,_u16
,,)(op1
, op2
, 127);
167 // CHECK-LABEL: @test_svext_u32(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 63)
170 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
172 // CPP-CHECK-LABEL: @_Z14test_svext_u32u12__SVUint32_tS_(
173 // CPP-CHECK-NEXT: entry:
174 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.ext.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i32 63)
175 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
177 svuint32_t
test_svext_u32(svuint32_t op1
, svuint32_t op2
)
179 return SVE_ACLE_FUNC(svext
,_u32
,,)(op1
, op2
, 63);
182 // CHECK-LABEL: @test_svext_u64(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 31)
185 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
187 // CPP-CHECK-LABEL: @_Z14test_svext_u64u12__SVUint64_tS_(
188 // CPP-CHECK-NEXT: entry:
189 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.ext.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i32 31)
190 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
192 svuint64_t
test_svext_u64(svuint64_t op1
, svuint64_t op2
)
194 return SVE_ACLE_FUNC(svext
,_u64
,,)(op1
, op2
, 31);
197 // CHECK-LABEL: @test_svext_f16(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i32 127)
200 // CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
202 // CPP-CHECK-LABEL: @_Z14test_svext_f16u13__SVFloat16_tS_(
203 // CPP-CHECK-NEXT: entry:
204 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x half> @llvm.aarch64.sve.ext.nxv8f16(<vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], i32 127)
205 // CPP-CHECK-NEXT: ret <vscale x 8 x half> [[TMP0]]
207 svfloat16_t
test_svext_f16(svfloat16_t op1
, svfloat16_t op2
)
209 return SVE_ACLE_FUNC(svext
,_f16
,,)(op1
, op2
, 127);
212 // CHECK-LABEL: @test_svext_f32(
213 // CHECK-NEXT: entry:
214 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 63)
215 // CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
217 // CPP-CHECK-LABEL: @_Z14test_svext_f32u13__SVFloat32_tS_(
218 // CPP-CHECK-NEXT: entry:
219 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x float> @llvm.aarch64.sve.ext.nxv4f32(<vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], i32 63)
220 // CPP-CHECK-NEXT: ret <vscale x 4 x float> [[TMP0]]
222 svfloat32_t
test_svext_f32(svfloat32_t op1
, svfloat32_t op2
)
224 return SVE_ACLE_FUNC(svext
,_f32
,,)(op1
, op2
, 63);
227 // CHECK-LABEL: @test_svext_f64(
228 // CHECK-NEXT: entry:
229 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 31)
230 // CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
232 // CPP-CHECK-LABEL: @_Z14test_svext_f64u13__SVFloat64_tS_(
233 // CPP-CHECK-NEXT: entry:
234 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x double> @llvm.aarch64.sve.ext.nxv2f64(<vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], i32 31)
235 // CPP-CHECK-NEXT: ret <vscale x 2 x double> [[TMP0]]
237 svfloat64_t
test_svext_f64(svfloat64_t op1
, svfloat64_t op2
)
239 return SVE_ACLE_FUNC(svext
,_f64
,,)(op1
, op2
, 31);