1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -fclang-abi-compat=latest -DSVE_OVERLOADED_FORMS -triple aarch64 -target-feature +sve -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 // RUN: %clang_cc1 -fclang-abi-compat=latest -triple aarch64 -target-feature +sme -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
12 #if defined __ARM_FEATURE_SME
13 #define MODE_ATTR __arm_streaming
18 #ifdef SVE_OVERLOADED_FORMS
19 // A simple used,unused... macro, long enough to represent any SVE builtin.
20 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
22 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
25 // CHECK-LABEL: @test_svqadd_s8(
27 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
28 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
30 // CPP-CHECK-LABEL: @_Z14test_svqadd_s8u10__SVInt8_tS_(
31 // CPP-CHECK-NEXT: entry:
32 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
33 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
35 svint8_t
test_svqadd_s8(svint8_t op1
, svint8_t op2
) MODE_ATTR
37 return SVE_ACLE_FUNC(svqadd
,_s8
,,)(op1
, op2
);
40 // CHECK-LABEL: @test_svqadd_s16(
42 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
43 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
45 // CPP-CHECK-LABEL: @_Z15test_svqadd_s16u11__SVInt16_tS_(
46 // CPP-CHECK-NEXT: entry:
47 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
48 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
50 svint16_t
test_svqadd_s16(svint16_t op1
, svint16_t op2
) MODE_ATTR
52 return SVE_ACLE_FUNC(svqadd
,_s16
,,)(op1
, op2
);
55 // CHECK-LABEL: @test_svqadd_s32(
57 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
58 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
60 // CPP-CHECK-LABEL: @_Z15test_svqadd_s32u11__SVInt32_tS_(
61 // CPP-CHECK-NEXT: entry:
62 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
63 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
65 svint32_t
test_svqadd_s32(svint32_t op1
, svint32_t op2
) MODE_ATTR
67 return SVE_ACLE_FUNC(svqadd
,_s32
,,)(op1
, op2
);
70 // CHECK-LABEL: @test_svqadd_s64(
72 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
73 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
75 // CPP-CHECK-LABEL: @_Z15test_svqadd_s64u11__SVInt64_tS_(
76 // CPP-CHECK-NEXT: entry:
77 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
78 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
80 svint64_t
test_svqadd_s64(svint64_t op1
, svint64_t op2
) MODE_ATTR
82 return SVE_ACLE_FUNC(svqadd
,_s64
,,)(op1
, op2
);
85 // CHECK-LABEL: @test_svqadd_u8(
87 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
88 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
90 // CPP-CHECK-LABEL: @_Z14test_svqadd_u8u11__SVUint8_tS_(
91 // CPP-CHECK-NEXT: entry:
92 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]])
93 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
95 svuint8_t
test_svqadd_u8(svuint8_t op1
, svuint8_t op2
) MODE_ATTR
97 return SVE_ACLE_FUNC(svqadd
,_u8
,,)(op1
, op2
);
100 // CHECK-LABEL: @test_svqadd_u16(
101 // CHECK-NEXT: entry:
102 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
103 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
105 // CPP-CHECK-LABEL: @_Z15test_svqadd_u16u12__SVUint16_tS_(
106 // CPP-CHECK-NEXT: entry:
107 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]])
108 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
110 svuint16_t
test_svqadd_u16(svuint16_t op1
, svuint16_t op2
) MODE_ATTR
112 return SVE_ACLE_FUNC(svqadd
,_u16
,,)(op1
, op2
);
115 // CHECK-LABEL: @test_svqadd_u32(
116 // CHECK-NEXT: entry:
117 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
118 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
120 // CPP-CHECK-LABEL: @_Z15test_svqadd_u32u12__SVUint32_tS_(
121 // CPP-CHECK-NEXT: entry:
122 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]])
123 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
125 svuint32_t
test_svqadd_u32(svuint32_t op1
, svuint32_t op2
) MODE_ATTR
127 return SVE_ACLE_FUNC(svqadd
,_u32
,,)(op1
, op2
);
130 // CHECK-LABEL: @test_svqadd_u64(
131 // CHECK-NEXT: entry:
132 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
133 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
135 // CPP-CHECK-LABEL: @_Z15test_svqadd_u64u12__SVUint64_tS_(
136 // CPP-CHECK-NEXT: entry:
137 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]])
138 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
140 svuint64_t
test_svqadd_u64(svuint64_t op1
, svuint64_t op2
) MODE_ATTR
142 return SVE_ACLE_FUNC(svqadd
,_u64
,,)(op1
, op2
);
145 // CHECK-LABEL: @test_svqadd_n_s8(
146 // CHECK-NEXT: entry:
147 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
148 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
149 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
150 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
152 // CPP-CHECK-LABEL: @_Z16test_svqadd_n_s8u10__SVInt8_ta(
153 // CPP-CHECK-NEXT: entry:
154 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
155 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
156 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.sqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
157 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
159 svint8_t
test_svqadd_n_s8(svint8_t op1
, int8_t op2
) MODE_ATTR
161 return SVE_ACLE_FUNC(svqadd
,_n_s8
,,)(op1
, op2
);
164 // CHECK-LABEL: @test_svqadd_n_s16(
165 // CHECK-NEXT: entry:
166 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
167 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
168 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
169 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
171 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_s16u11__SVInt16_ts(
172 // CPP-CHECK-NEXT: entry:
173 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
174 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
175 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.sqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
176 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
178 svint16_t
test_svqadd_n_s16(svint16_t op1
, int16_t op2
) MODE_ATTR
180 return SVE_ACLE_FUNC(svqadd
,_n_s16
,,)(op1
, op2
);
183 // CHECK-LABEL: @test_svqadd_n_s32(
184 // CHECK-NEXT: entry:
185 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
186 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
187 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
188 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
190 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_s32u11__SVInt32_ti(
191 // CPP-CHECK-NEXT: entry:
192 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
193 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
194 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
195 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
197 svint32_t
test_svqadd_n_s32(svint32_t op1
, int32_t op2
) MODE_ATTR
199 return SVE_ACLE_FUNC(svqadd
,_n_s32
,,)(op1
, op2
);
202 // CHECK-LABEL: @test_svqadd_n_s64(
203 // CHECK-NEXT: entry:
204 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
205 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
206 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
207 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
209 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_s64u11__SVInt64_tl(
210 // CPP-CHECK-NEXT: entry:
211 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
212 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
213 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.sqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
214 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
216 svint64_t
test_svqadd_n_s64(svint64_t op1
, int64_t op2
) MODE_ATTR
218 return SVE_ACLE_FUNC(svqadd
,_n_s64
,,)(op1
, op2
);
221 // CHECK-LABEL: @test_svqadd_n_u8(
222 // CHECK-NEXT: entry:
223 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
224 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
225 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
226 // CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
228 // CPP-CHECK-LABEL: @_Z16test_svqadd_n_u8u11__SVUint8_th(
229 // CPP-CHECK-NEXT: entry:
230 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[OP2:%.*]], i64 0
231 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[DOTSPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
232 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 16 x i8> @llvm.aarch64.sve.uqadd.x.nxv16i8(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[DOTSPLAT]])
233 // CPP-CHECK-NEXT: ret <vscale x 16 x i8> [[TMP0]]
235 svuint8_t
test_svqadd_n_u8(svuint8_t op1
, uint8_t op2
) MODE_ATTR
237 return SVE_ACLE_FUNC(svqadd
,_n_u8
,,)(op1
, op2
);
240 // CHECK-LABEL: @test_svqadd_n_u16(
241 // CHECK-NEXT: entry:
242 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
243 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
244 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
245 // CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
247 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_u16u12__SVUint16_tt(
248 // CPP-CHECK-NEXT: entry:
249 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[OP2:%.*]], i64 0
250 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[DOTSPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
251 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 8 x i16> @llvm.aarch64.sve.uqadd.x.nxv8i16(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[DOTSPLAT]])
252 // CPP-CHECK-NEXT: ret <vscale x 8 x i16> [[TMP0]]
254 svuint16_t
test_svqadd_n_u16(svuint16_t op1
, uint16_t op2
) MODE_ATTR
256 return SVE_ACLE_FUNC(svqadd
,_n_u16
,,)(op1
, op2
);
259 // CHECK-LABEL: @test_svqadd_n_u32(
260 // CHECK-NEXT: entry:
261 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
262 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
263 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
264 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
266 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_u32u12__SVUint32_tj(
267 // CPP-CHECK-NEXT: entry:
268 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[OP2:%.*]], i64 0
269 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[DOTSPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
270 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqadd.x.nxv4i32(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[DOTSPLAT]])
271 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
273 svuint32_t
test_svqadd_n_u32(svuint32_t op1
, uint32_t op2
) MODE_ATTR
275 return SVE_ACLE_FUNC(svqadd
,_n_u32
,,)(op1
, op2
);
278 // CHECK-LABEL: @test_svqadd_n_u64(
279 // CHECK-NEXT: entry:
280 // CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
281 // CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
282 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
283 // CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
285 // CPP-CHECK-LABEL: @_Z17test_svqadd_n_u64u12__SVUint64_tm(
286 // CPP-CHECK-NEXT: entry:
287 // CPP-CHECK-NEXT: [[DOTSPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[OP2:%.*]], i64 0
288 // CPP-CHECK-NEXT: [[DOTSPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[DOTSPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
289 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 2 x i64> @llvm.aarch64.sve.uqadd.x.nxv2i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[DOTSPLAT]])
290 // CPP-CHECK-NEXT: ret <vscale x 2 x i64> [[TMP0]]
292 svuint64_t
test_svqadd_n_u64(svuint64_t op1
, uint64_t op2
) MODE_ATTR
294 return SVE_ACLE_FUNC(svqadd
,_n_u64
,,)(op1
, op2
);