[clang] Handle __declspec() attributes in using
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-vector-arith-ops.c
blobed4d7c5dbef457179b19d90fd8e9cb6cf2343b78
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
3 // RUN: -disable-O0-optnone \
4 // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s
6 // REQUIRES: aarch64-registered-target
8 #include <arm_sve.h>
10 // ADDITION
12 // CHECK-LABEL: @add_i8(
13 // CHECK-NEXT: entry:
14 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
15 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
17 svint8_t add_i8(svint8_t a, svint8_t b) {
18 return a + b;
21 // CHECK-LABEL: @add_i16(
22 // CHECK-NEXT: entry:
23 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
24 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
26 svint16_t add_i16(svint16_t a, svint16_t b) {
27 return a + b;
30 // CHECK-LABEL: @add_i32(
31 // CHECK-NEXT: entry:
32 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
33 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
35 svint32_t add_i32(svint32_t a, svint32_t b) {
36 return a + b;
39 // CHECK-LABEL: @add_i64(
40 // CHECK-NEXT: entry:
41 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
42 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
44 svint64_t add_i64(svint64_t a, svint64_t b) {
45 return a + b;
48 // CHECK-LABEL: @add_u8(
49 // CHECK-NEXT: entry:
50 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
51 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
53 svuint8_t add_u8(svuint8_t a, svuint8_t b) {
54 return a + b;
57 // CHECK-LABEL: @add_u16(
58 // CHECK-NEXT: entry:
59 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
60 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
62 svuint16_t add_u16(svuint16_t a, svuint16_t b) {
63 return a + b;
66 // CHECK-LABEL: @add_u32(
67 // CHECK-NEXT: entry:
68 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
69 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
71 svuint32_t add_u32(svuint32_t a, svuint32_t b) {
72 return a + b;
75 // CHECK-LABEL: @add_u64(
76 // CHECK-NEXT: entry:
77 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
78 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
80 svuint64_t add_u64(svuint64_t a, svuint64_t b) {
81 return a + b;
84 // CHECK-LABEL: @add_f16(
85 // CHECK-NEXT: entry:
86 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
87 // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]]
89 svfloat16_t add_f16(svfloat16_t a, svfloat16_t b) {
90 return a + b;
93 // CHECK-LABEL: @add_f32(
94 // CHECK-NEXT: entry:
95 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
96 // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]]
98 svfloat32_t add_f32(svfloat32_t a, svfloat32_t b) {
99 return a + b;
102 // CHECK-LABEL: @add_f64(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
105 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
107 svfloat64_t add_f64(svfloat64_t a, svfloat64_t b) {
108 return a + b;
111 // CHECK-LABEL: @add_inplace_i8(
112 // CHECK-NEXT: entry:
113 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
114 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
116 svint8_t add_inplace_i8(svint8_t a, svint8_t b) {
117 return a += b;
120 // CHECK-LABEL: @add_inplace_i16(
121 // CHECK-NEXT: entry:
122 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
123 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
125 svint16_t add_inplace_i16(svint16_t a, svint16_t b) {
126 return a += b;
129 // CHECK-LABEL: @add_inplace_i32(
130 // CHECK-NEXT: entry:
131 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
132 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
134 svint32_t add_inplace_i32(svint32_t a, svint32_t b) {
135 return a += b;
138 // CHECK-LABEL: @add_inplace_i64(
139 // CHECK-NEXT: entry:
140 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
141 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
143 svint64_t add_inplace_i64(svint64_t a, svint64_t b) {
144 return a += b;
147 // CHECK-LABEL: @add_inplace_u8(
148 // CHECK-NEXT: entry:
149 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
150 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
152 svuint8_t add_inplace_u8(svuint8_t a, svuint8_t b) {
153 return a += b;
156 // CHECK-LABEL: @add_inplace_u16(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
159 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
161 svuint16_t add_inplace_u16(svuint16_t a, svuint16_t b) {
162 return a += b;
165 // CHECK-LABEL: @add_inplace_u32(
166 // CHECK-NEXT: entry:
167 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
168 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
170 svuint32_t add_inplace_u32(svuint32_t a, svuint32_t b) {
171 return a += b;
174 // CHECK-LABEL: @add_inplace_u64(
175 // CHECK-NEXT: entry:
176 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
177 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
179 svuint64_t add_inplace_u64(svuint64_t a, svuint64_t b) {
180 return a += b;
183 // CHECK-LABEL: @add_inplace_f16(
184 // CHECK-NEXT: entry:
185 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
186 // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]]
188 svfloat16_t add_inplace_f16(svfloat16_t a, svfloat16_t b) {
189 return a += b;
192 // CHECK-LABEL: @add_inplace_f32(
193 // CHECK-NEXT: entry:
194 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
195 // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]]
197 svfloat32_t add_inplace_f32(svfloat32_t a, svfloat32_t b) {
198 return a += b;
201 // CHECK-LABEL: @add_inplace_f64(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
204 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
206 svfloat64_t add_inplace_f64(svfloat64_t a, svfloat64_t b) {
207 return a += b;
210 // CHECK-LABEL: @add_scalar_i8(
211 // CHECK-NEXT: entry:
212 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
213 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
214 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
215 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
217 svint8_t add_scalar_i8(svint8_t a, int8_t b) {
218 return a + b;
221 // CHECK-LABEL: @add_scalar_i16(
222 // CHECK-NEXT: entry:
223 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
224 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
225 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
226 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
228 svint16_t add_scalar_i16(svint16_t a, int16_t b) {
229 return a + b;
232 // CHECK-LABEL: @add_scalar_i32(
233 // CHECK-NEXT: entry:
234 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
235 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
236 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
237 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
239 svint32_t add_scalar_i32(svint32_t a, int32_t b) {
240 return a + b;
243 // CHECK-LABEL: @add_scalar_i64(
244 // CHECK-NEXT: entry:
245 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
246 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
247 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
248 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
250 svint64_t add_scalar_i64(svint64_t a, int64_t b) {
251 return a + b;
254 // CHECK-LABEL: @add_scalar_u8(
255 // CHECK-NEXT: entry:
256 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
257 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
258 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
259 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
261 svuint8_t add_scalar_u8(svuint8_t a, uint8_t b) {
262 return a + b;
265 // CHECK-LABEL: @add_scalar_u16(
266 // CHECK-NEXT: entry:
267 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
268 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
269 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
270 // CHECK-NEXT: ret <vscale x 8 x i16> [[ADD]]
272 svuint16_t add_scalar_u16(svuint16_t a, uint16_t b) {
273 return a + b;
276 // CHECK-LABEL: @add_scalar_u32(
277 // CHECK-NEXT: entry:
278 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
279 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
280 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
281 // CHECK-NEXT: ret <vscale x 4 x i32> [[ADD]]
283 svuint32_t add_scalar_u32(svuint32_t a, uint32_t b) {
284 return a + b;
287 // CHECK-LABEL: @add_scalar_u64(
288 // CHECK-NEXT: entry:
289 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
290 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
291 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
292 // CHECK-NEXT: ret <vscale x 2 x i64> [[ADD]]
294 svuint64_t add_scalar_u64(svuint64_t a, uint64_t b) {
295 return a + b;
298 // CHECK-LABEL: @add_scalar_f16(
299 // CHECK-NEXT: entry:
300 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0
301 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
302 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]]
303 // CHECK-NEXT: ret <vscale x 8 x half> [[ADD]]
305 svfloat16_t add_scalar_f16(svfloat16_t a, __fp16 b) {
306 return a + b;
309 // CHECK-LABEL: @add_scalar_f32(
310 // CHECK-NEXT: entry:
311 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
312 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
313 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]]
314 // CHECK-NEXT: ret <vscale x 4 x float> [[ADD]]
316 svfloat32_t add_scalar_f32(svfloat32_t a, float b) {
317 return a + b;
320 // CHECK-LABEL: @add_scalar_f64(
321 // CHECK-NEXT: entry:
322 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0
323 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
324 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]]
325 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
327 svfloat64_t add_scalar_f64(svfloat64_t a, double b) {
328 return a + b;
331 // CHECK-LABEL: @add_i8_i_lit(
332 // CHECK-NEXT: entry:
333 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
334 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
336 svint8_t add_i8_i_lit(svint8_t a) {
337 return a + 0;
340 // CHECK-LABEL: @add_i8_il_lit(
341 // CHECK-NEXT: entry:
342 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
343 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
345 svint8_t add_i8_il_lit(svint8_t a) {
346 return a + 0l;
349 // CHECK-LABEL: @add_i8_ill_lit(
350 // CHECK-NEXT: entry:
351 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
352 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
354 svint8_t add_i8_ill_lit(svint8_t a) {
355 return a + 0ll;
358 // CHECK-LABEL: @add_i8_u_lit(
359 // CHECK-NEXT: entry:
360 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
361 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
363 svint8_t add_i8_u_lit(svint8_t a) {
364 return a + 0u;
367 // CHECK-LABEL: @add_i8_ul_lit(
368 // CHECK-NEXT: entry:
369 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
370 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
372 svint8_t add_i8_ul_lit(svint8_t a) {
373 return a + 0ul;
376 // CHECK-LABEL: @add_i8_ull_lit(
377 // CHECK-NEXT: entry:
378 // CHECK-NEXT: [[ADD:%.*]] = add <vscale x 16 x i8> [[A:%.*]], zeroinitializer
379 // CHECK-NEXT: ret <vscale x 16 x i8> [[ADD]]
381 svint8_t add_i8_ull_lit(svint8_t a) {
382 return a + 0ull;
385 // CHECK-LABEL: @add_f64_i_lit(
386 // CHECK-NEXT: entry:
387 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
388 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
390 svfloat64_t add_f64_i_lit(svfloat64_t a) {
391 return a + 0;
394 // CHECK-LABEL: @add_f64_il_lit(
395 // CHECK-NEXT: entry:
396 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
397 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
399 svfloat64_t add_f64_il_lit(svfloat64_t a) {
400 return a + 0l;
403 // CHECK-LABEL: @add_f64_ill_lit(
404 // CHECK-NEXT: entry:
405 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
406 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
408 svfloat64_t add_f64_ill_lit(svfloat64_t a) {
409 return a + 0ll;
412 // CHECK-LABEL: @add_f64_u_lit(
413 // CHECK-NEXT: entry:
414 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
415 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
417 svfloat64_t add_f64_u_lit(svfloat64_t a) {
418 return a + 0u;
421 // CHECK-LABEL: @add_f64_ul_lit(
422 // CHECK-NEXT: entry:
423 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
424 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
426 svfloat64_t add_f64_ul_lit(svfloat64_t a) {
427 return a + 0ul;
430 // CHECK-LABEL: @add_f64_ull_lit(
431 // CHECK-NEXT: entry:
432 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
433 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
435 svfloat64_t add_f64_ull_lit(svfloat64_t a) {
436 return a + 0ull;
439 // CHECK-LABEL: @add_f64_f_lit(
440 // CHECK-NEXT: entry:
441 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
442 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
444 svfloat64_t add_f64_f_lit(svfloat64_t a) {
445 return a + 0.f;
448 // CHECK-LABEL: @add_f64_d_lit(
449 // CHECK-NEXT: entry:
450 // CHECK-NEXT: [[ADD:%.*]] = fadd <vscale x 2 x double> [[A:%.*]], zeroinitializer
451 // CHECK-NEXT: ret <vscale x 2 x double> [[ADD]]
453 svfloat64_t add_f64_d_lit(svfloat64_t a) {
454 return a + 0.;
457 // SUBTRACTION
459 // CHECK-LABEL: @sub_i8(
460 // CHECK-NEXT: entry:
461 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
462 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
464 svint8_t sub_i8(svint8_t a, svint8_t b) {
465 return a - b;
468 // CHECK-LABEL: @sub_i16(
469 // CHECK-NEXT: entry:
470 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
471 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
473 svint16_t sub_i16(svint16_t a, svint16_t b) {
474 return a - b;
477 // CHECK-LABEL: @sub_i32(
478 // CHECK-NEXT: entry:
479 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
480 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
482 svint32_t sub_i32(svint32_t a, svint32_t b) {
483 return a - b;
486 // CHECK-LABEL: @sub_i64(
487 // CHECK-NEXT: entry:
488 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
489 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
491 svint64_t sub_i64(svint64_t a, svint64_t b) {
492 return a - b;
495 // CHECK-LABEL: @sub_u8(
496 // CHECK-NEXT: entry:
497 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
498 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
500 svuint8_t sub_u8(svuint8_t a, svuint8_t b) {
501 return a - b;
504 // CHECK-LABEL: @sub_u16(
505 // CHECK-NEXT: entry:
506 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
507 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
509 svuint16_t sub_u16(svuint16_t a, svuint16_t b) {
510 return a - b;
513 // CHECK-LABEL: @sub_u32(
514 // CHECK-NEXT: entry:
515 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
516 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
518 svuint32_t sub_u32(svuint32_t a, svuint32_t b) {
519 return a - b;
522 // CHECK-LABEL: @sub_u64(
523 // CHECK-NEXT: entry:
524 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
525 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
527 svuint64_t sub_u64(svuint64_t a, svuint64_t b) {
528 return a - b;
531 // CHECK-LABEL: @sub_f16(
532 // CHECK-NEXT: entry:
533 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
534 // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]]
536 svfloat16_t sub_f16(svfloat16_t a, svfloat16_t b) {
537 return a - b;
540 // CHECK-LABEL: @sub_f32(
541 // CHECK-NEXT: entry:
542 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
543 // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]]
545 svfloat32_t sub_f32(svfloat32_t a, svfloat32_t b) {
546 return a - b;
549 // CHECK-LABEL: @sub_f64(
550 // CHECK-NEXT: entry:
551 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
552 // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]]
554 svfloat64_t sub_f64(svfloat64_t a, svfloat64_t b) {
555 return a - b;
558 // CHECK-LABEL: @sub_inplace_i8(
559 // CHECK-NEXT: entry:
560 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
561 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
563 svint8_t sub_inplace_i8(svint8_t a, svint8_t b) {
564 return a - b;
567 // CHECK-LABEL: @sub_inplace_i16(
568 // CHECK-NEXT: entry:
569 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
570 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
572 svint16_t sub_inplace_i16(svint16_t a, svint16_t b) {
573 return a - b;
576 // CHECK-LABEL: @sub_inplace_i32(
577 // CHECK-NEXT: entry:
578 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
579 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
581 svint32_t sub_inplace_i32(svint32_t a, svint32_t b) {
582 return a - b;
585 // CHECK-LABEL: @sub_inplace_i64(
586 // CHECK-NEXT: entry:
587 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
588 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
590 svint64_t sub_inplace_i64(svint64_t a, svint64_t b) {
591 return a - b;
594 // CHECK-LABEL: @sub_inplace_u8(
595 // CHECK-NEXT: entry:
596 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
597 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
599 svuint8_t sub_inplace_u8(svuint8_t a, svuint8_t b) {
600 return a - b;
603 // CHECK-LABEL: @sub_inplace_u16(
604 // CHECK-NEXT: entry:
605 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
606 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
608 svuint16_t sub_inplace_u16(svuint16_t a, svuint16_t b) {
609 return a - b;
612 // CHECK-LABEL: @sub_inplace_u32(
613 // CHECK-NEXT: entry:
614 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
615 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
617 svuint32_t sub_inplace_u32(svuint32_t a, svuint32_t b) {
618 return a - b;
621 // CHECK-LABEL: @sub_inplace_u64(
622 // CHECK-NEXT: entry:
623 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
624 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
626 svuint64_t sub_inplace_u64(svuint64_t a, svuint64_t b) {
627 return a - b;
630 // CHECK-LABEL: @sub_inplace_f16(
631 // CHECK-NEXT: entry:
632 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
633 // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]]
635 svfloat16_t sub_inplace_f16(svfloat16_t a, svfloat16_t b) {
636 return a - b;
639 // CHECK-LABEL: @sub_inplace_f32(
640 // CHECK-NEXT: entry:
641 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
642 // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]]
644 svfloat32_t sub_inplace_f32(svfloat32_t a, svfloat32_t b) {
645 return a - b;
648 // CHECK-LABEL: @sub_inplace_f64(
649 // CHECK-NEXT: entry:
650 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
651 // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]]
653 svfloat64_t sub_inplace_f64(svfloat64_t a, svfloat64_t b) {
654 return a - b;
657 // CHECK-LABEL: @sub_scalar_i8(
658 // CHECK-NEXT: entry:
659 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
660 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
661 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
662 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
664 svint8_t sub_scalar_i8(svint8_t a, int8_t b) {
665 return a - b;
668 // CHECK-LABEL: @sub_scalar_i16(
669 // CHECK-NEXT: entry:
670 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
671 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
672 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
673 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
675 svint16_t sub_scalar_i16(svint16_t a, int16_t b) {
676 return a - b;
679 // CHECK-LABEL: @sub_scalar_i32(
680 // CHECK-NEXT: entry:
681 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
682 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
683 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
684 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
686 svint32_t sub_scalar_i32(svint32_t a, int32_t b) {
687 return a - b;
690 // CHECK-LABEL: @sub_scalar_i64(
691 // CHECK-NEXT: entry:
692 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
693 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
694 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
695 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
697 svint64_t sub_scalar_i64(svint64_t a, int64_t b) {
698 return a - b;
701 // CHECK-LABEL: @sub_scalar_u8(
702 // CHECK-NEXT: entry:
703 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
704 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
705 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
706 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
708 svuint8_t sub_scalar_u8(svuint8_t a, uint8_t b) {
709 return a - b;
712 // CHECK-LABEL: @sub_scalar_u16(
713 // CHECK-NEXT: entry:
714 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
715 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
716 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
717 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
719 svuint16_t sub_scalar_u16(svuint16_t a, uint16_t b) {
720 return a - b;
723 // CHECK-LABEL: @sub_scalar_u32(
724 // CHECK-NEXT: entry:
725 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
726 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
727 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
728 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
730 svuint32_t sub_scalar_u32(svuint32_t a, uint32_t b) {
731 return a - b;
734 // CHECK-LABEL: @sub_scalar_u64(
735 // CHECK-NEXT: entry:
736 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
737 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
738 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
739 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
741 svuint64_t sub_scalar_u64(svuint64_t a, uint64_t b) {
742 return a - b;
745 // CHECK-LABEL: @sub_scalar_f16(
746 // CHECK-NEXT: entry:
747 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0
748 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
749 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]]
750 // CHECK-NEXT: ret <vscale x 8 x half> [[SUB]]
752 svfloat16_t sub_scalar_f16(svfloat16_t a, __fp16 b) {
753 return a - b;
756 // CHECK-LABEL: @sub_scalar_f32(
757 // CHECK-NEXT: entry:
758 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
759 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
760 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]]
761 // CHECK-NEXT: ret <vscale x 4 x float> [[SUB]]
763 svfloat32_t sub_scalar_f32(svfloat32_t a, float b) {
764 return a - b;
767 // CHECK-LABEL: @sub_scalar_f64(
768 // CHECK-NEXT: entry:
769 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0
770 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
771 // CHECK-NEXT: [[SUB:%.*]] = fsub <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]]
772 // CHECK-NEXT: ret <vscale x 2 x double> [[SUB]]
774 svfloat64_t sub_scalar_f64(svfloat64_t a, double b) {
775 return a - b;
778 // MULTIPLICATION
780 // CHECK-LABEL: @mul_i8(
781 // CHECK-NEXT: entry:
782 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
783 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
785 svint8_t mul_i8(svint8_t a, svint8_t b) {
786 return a * b;
789 // CHECK-LABEL: @mul_i16(
790 // CHECK-NEXT: entry:
791 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
792 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
794 svint16_t mul_i16(svint16_t a, svint16_t b) {
795 return a * b;
798 // CHECK-LABEL: @mul_i32(
799 // CHECK-NEXT: entry:
800 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
801 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
803 svint32_t mul_i32(svint32_t a, svint32_t b) {
804 return a * b;
807 // CHECK-LABEL: @mul_i64(
808 // CHECK-NEXT: entry:
809 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
810 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
812 svint64_t mul_i64(svint64_t a, svint64_t b) {
813 return a * b;
816 // CHECK-LABEL: @mul_u8(
817 // CHECK-NEXT: entry:
818 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
819 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
821 svuint8_t mul_u8(svuint8_t a, svuint8_t b) {
822 return a * b;
825 // CHECK-LABEL: @mul_u16(
826 // CHECK-NEXT: entry:
827 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
828 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
830 svuint16_t mul_u16(svuint16_t a, svuint16_t b) {
831 return a * b;
834 // CHECK-LABEL: @mul_u32(
835 // CHECK-NEXT: entry:
836 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
837 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
839 svuint32_t mul_u32(svuint32_t a, svuint32_t b) {
840 return a * b;
843 // CHECK-LABEL: @mul_u64(
844 // CHECK-NEXT: entry:
845 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
846 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
848 svuint64_t mul_u64(svuint64_t a, svuint64_t b) {
849 return a * b;
852 // CHECK-LABEL: @mul_f16(
853 // CHECK-NEXT: entry:
854 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
855 // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]]
857 svfloat16_t mul_f16(svfloat16_t a, svfloat16_t b) {
858 return a * b;
861 // CHECK-LABEL: @mul_f32(
862 // CHECK-NEXT: entry:
863 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
864 // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]]
866 svfloat32_t mul_f32(svfloat32_t a, svfloat32_t b) {
867 return a * b;
870 // CHECK-LABEL: @mul_f64(
871 // CHECK-NEXT: entry:
872 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
873 // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]]
875 svfloat64_t mul_f64(svfloat64_t a, svfloat64_t b) {
876 return a * b;
879 // CHECK-LABEL: @mul_inplace_i8(
880 // CHECK-NEXT: entry:
881 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
882 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
884 svint8_t mul_inplace_i8(svint8_t a, svint8_t b) {
885 return a * b;
888 // CHECK-LABEL: @mul_inplace_i16(
889 // CHECK-NEXT: entry:
890 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
891 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
893 svint16_t mul_inplace_i16(svint16_t a, svint16_t b) {
894 return a * b;
897 // CHECK-LABEL: @mul_inplace_i32(
898 // CHECK-NEXT: entry:
899 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
900 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
902 svint32_t mul_inplace_i32(svint32_t a, svint32_t b) {
903 return a * b;
906 // CHECK-LABEL: @mul_inplace_i64(
907 // CHECK-NEXT: entry:
908 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
909 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
911 svint64_t mul_inplace_i64(svint64_t a, svint64_t b) {
912 return a * b;
915 // CHECK-LABEL: @mul_inplace_u8(
916 // CHECK-NEXT: entry:
917 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
918 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
920 svuint8_t mul_inplace_u8(svuint8_t a, svuint8_t b) {
921 return a * b;
924 // CHECK-LABEL: @mul_inplace_u16(
925 // CHECK-NEXT: entry:
926 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
927 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
929 svuint16_t mul_inplace_u16(svuint16_t a, svuint16_t b) {
930 return a * b;
933 // CHECK-LABEL: @mul_inplace_u32(
934 // CHECK-NEXT: entry:
935 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
936 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
938 svuint32_t mul_inplace_u32(svuint32_t a, svuint32_t b) {
939 return a * b;
942 // CHECK-LABEL: @mul_inplace_u64(
943 // CHECK-NEXT: entry:
944 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
945 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
947 svuint64_t mul_inplace_u64(svuint64_t a, svuint64_t b) {
948 return a * b;
951 // CHECK-LABEL: @mul_inplace_f16(
952 // CHECK-NEXT: entry:
953 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
954 // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]]
956 svfloat16_t mul_inplace_f16(svfloat16_t a, svfloat16_t b) {
957 return a * b;
960 // CHECK-LABEL: @mul_inplace_f32(
961 // CHECK-NEXT: entry:
962 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
963 // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]]
965 svfloat32_t mul_inplace_f32(svfloat32_t a, svfloat32_t b) {
966 return a * b;
969 // CHECK-LABEL: @mul_inplace_f64(
970 // CHECK-NEXT: entry:
971 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
972 // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]]
974 svfloat64_t mul_inplace_f64(svfloat64_t a, svfloat64_t b) {
975 return a * b;
978 // CHECK-LABEL: @mul_scalar_i8(
979 // CHECK-NEXT: entry:
980 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
981 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
982 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
983 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
985 svint8_t mul_scalar_i8(svint8_t a, int8_t b) {
986 return a * b;
989 // CHECK-LABEL: @mul_scalar_i16(
990 // CHECK-NEXT: entry:
991 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
992 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
993 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
994 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
996 svint16_t mul_scalar_i16(svint16_t a, int16_t b) {
997 return a * b;
1000 // CHECK-LABEL: @mul_scalar_i32(
1001 // CHECK-NEXT: entry:
1002 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1003 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1004 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1005 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
1007 svint32_t mul_scalar_i32(svint32_t a, int32_t b) {
1008 return a * b;
1011 // CHECK-LABEL: @mul_scalar_i64(
1012 // CHECK-NEXT: entry:
1013 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1014 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1015 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1016 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
1018 svint64_t mul_scalar_i64(svint64_t a, int64_t b) {
1019 return a * b;
1022 // CHECK-LABEL: @mul_scalar_u8(
1023 // CHECK-NEXT: entry:
1024 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
1025 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
1026 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
1027 // CHECK-NEXT: ret <vscale x 16 x i8> [[MUL]]
1029 svuint8_t mul_scalar_u8(svuint8_t a, uint8_t b) {
1030 return a * b;
1033 // CHECK-LABEL: @mul_scalar_u16(
1034 // CHECK-NEXT: entry:
1035 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
1036 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1037 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
1038 // CHECK-NEXT: ret <vscale x 8 x i16> [[MUL]]
1040 svuint16_t mul_scalar_u16(svuint16_t a, uint16_t b) {
1041 return a * b;
1044 // CHECK-LABEL: @mul_scalar_u32(
1045 // CHECK-NEXT: entry:
1046 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1047 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1048 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1049 // CHECK-NEXT: ret <vscale x 4 x i32> [[MUL]]
1051 svuint32_t mul_scalar_u32(svuint32_t a, uint32_t b) {
1052 return a * b;
1055 // CHECK-LABEL: @mul_scalar_u64(
1056 // CHECK-NEXT: entry:
1057 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1058 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1059 // CHECK-NEXT: [[MUL:%.*]] = mul <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1060 // CHECK-NEXT: ret <vscale x 2 x i64> [[MUL]]
1062 svuint64_t mul_scalar_u64(svuint64_t a, uint64_t b) {
1063 return a * b;
1066 // CHECK-LABEL: @mul_scalar_f16(
1067 // CHECK-NEXT: entry:
1068 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0
1069 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1070 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]]
1071 // CHECK-NEXT: ret <vscale x 8 x half> [[MUL]]
1073 svfloat16_t mul_scalar_f16(svfloat16_t a, __fp16 b) {
1074 return a * b;
1077 // CHECK-LABEL: @mul_scalar_f32(
1078 // CHECK-NEXT: entry:
1079 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
1080 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
1081 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]]
1082 // CHECK-NEXT: ret <vscale x 4 x float> [[MUL]]
1084 svfloat32_t mul_scalar_f32(svfloat32_t a, float b) {
1085 return a * b;
1088 // CHECK-LABEL: @mul_scalar_f64(
1089 // CHECK-NEXT: entry:
1090 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0
1091 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
1092 // CHECK-NEXT: [[MUL:%.*]] = fmul <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]]
1093 // CHECK-NEXT: ret <vscale x 2 x double> [[MUL]]
1095 svfloat64_t mul_scalar_f64(svfloat64_t a, double b) {
1096 return a * b;
1099 // DIVISION
1101 // CHECK-LABEL: @div_i8(
1102 // CHECK-NEXT: entry:
1103 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1104 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1106 svint8_t div_i8(svint8_t a, svint8_t b) {
1107 return a / b;
1110 // CHECK-LABEL: @div_i16(
1111 // CHECK-NEXT: entry:
1112 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1113 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1115 svint16_t div_i16(svint16_t a, svint16_t b) {
1116 return a / b;
1119 // CHECK-LABEL: @div_i32(
1120 // CHECK-NEXT: entry:
1121 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1122 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1124 svint32_t div_i32(svint32_t a, svint32_t b) {
1125 return a / b;
1128 // CHECK-LABEL: @div_i64(
1129 // CHECK-NEXT: entry:
1130 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1131 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1133 svint64_t div_i64(svint64_t a, svint64_t b) {
1134 return a / b;
1137 // CHECK-LABEL: @div_u8(
1138 // CHECK-NEXT: entry:
1139 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1140 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1142 svuint8_t div_u8(svuint8_t a, svuint8_t b) {
1143 return a / b;
1146 // CHECK-LABEL: @div_u16(
1147 // CHECK-NEXT: entry:
1148 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1149 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1151 svuint16_t div_u16(svuint16_t a, svuint16_t b) {
1152 return a / b;
1155 // CHECK-LABEL: @div_u32(
1156 // CHECK-NEXT: entry:
1157 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1158 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1160 svuint32_t div_u32(svuint32_t a, svuint32_t b) {
1161 return a / b;
1164 // CHECK-LABEL: @div_u64(
1165 // CHECK-NEXT: entry:
1166 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1167 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1169 svuint64_t div_u64(svuint64_t a, svuint64_t b) {
1170 return a / b;
1173 // CHECK-LABEL: @div_f16(
1174 // CHECK-NEXT: entry:
1175 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
1176 // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]]
1178 svfloat16_t div_f16(svfloat16_t a, svfloat16_t b) {
1179 return a / b;
1182 // CHECK-LABEL: @div_f32(
1183 // CHECK-NEXT: entry:
1184 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
1185 // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]]
1187 svfloat32_t div_f32(svfloat32_t a, svfloat32_t b) {
1188 return a / b;
1191 // CHECK-LABEL: @div_f64(
1192 // CHECK-NEXT: entry:
1193 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
1194 // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]]
1196 svfloat64_t div_f64(svfloat64_t a, svfloat64_t b) {
1197 return a / b;
1200 // CHECK-LABEL: @div_inplace_i8(
1201 // CHECK-NEXT: entry:
1202 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1203 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1205 svint8_t div_inplace_i8(svint8_t a, svint8_t b) {
1206 return a / b;
1209 // CHECK-LABEL: @div_inplace_i16(
1210 // CHECK-NEXT: entry:
1211 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1212 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1214 svint16_t div_inplace_i16(svint16_t a, svint16_t b) {
1215 return a / b;
1218 // CHECK-LABEL: @div_inplace_i32(
1219 // CHECK-NEXT: entry:
1220 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1221 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1223 svint32_t div_inplace_i32(svint32_t a, svint32_t b) {
1224 return a / b;
1227 // CHECK-LABEL: @div_inplace_i64(
1228 // CHECK-NEXT: entry:
1229 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1230 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1232 svint64_t div_inplace_i64(svint64_t a, svint64_t b) {
1233 return a / b;
1236 // CHECK-LABEL: @div_inplace_u8(
1237 // CHECK-NEXT: entry:
1238 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1239 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1241 svuint8_t div_inplace_u8(svuint8_t a, svuint8_t b) {
1242 return a / b;
1245 // CHECK-LABEL: @div_inplace_u16(
1246 // CHECK-NEXT: entry:
1247 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1248 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1250 svuint16_t div_inplace_u16(svuint16_t a, svuint16_t b) {
1251 return a / b;
1254 // CHECK-LABEL: @div_inplace_u32(
1255 // CHECK-NEXT: entry:
1256 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1257 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1259 svuint32_t div_inplace_u32(svuint32_t a, svuint32_t b) {
1260 return a / b;
1263 // CHECK-LABEL: @div_inplace_u64(
1264 // CHECK-NEXT: entry:
1265 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1266 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1268 svuint64_t div_inplace_u64(svuint64_t a, svuint64_t b) {
1269 return a / b;
1272 // CHECK-LABEL: @div_inplace_f16(
1273 // CHECK-NEXT: entry:
1274 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
1275 // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]]
1277 svfloat16_t div_inplace_f16(svfloat16_t a, svfloat16_t b) {
1278 return a / b;
1281 // CHECK-LABEL: @div_inplace_f32(
1282 // CHECK-NEXT: entry:
1283 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
1284 // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]]
1286 svfloat32_t div_inplace_f32(svfloat32_t a, svfloat32_t b) {
1287 return a / b;
1290 // CHECK-LABEL: @div_inplace_f64(
1291 // CHECK-NEXT: entry:
1292 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
1293 // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]]
1295 svfloat64_t div_inplace_f64(svfloat64_t a, svfloat64_t b) {
1296 return a / b;
1299 // CHECK-LABEL: @div_scalar_i8(
1300 // CHECK-NEXT: entry:
1301 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
1302 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
1303 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
1304 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1306 svint8_t div_scalar_i8(svint8_t a, int8_t b) {
1307 return a / b;
1310 // CHECK-LABEL: @div_scalar_i16(
1311 // CHECK-NEXT: entry:
1312 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
1313 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1314 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
1315 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1317 svint16_t div_scalar_i16(svint16_t a, int16_t b) {
1318 return a / b;
1321 // CHECK-LABEL: @div_scalar_i32(
1322 // CHECK-NEXT: entry:
1323 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1324 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1325 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1326 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1328 svint32_t div_scalar_i32(svint32_t a, int32_t b) {
1329 return a / b;
1332 // CHECK-LABEL: @div_scalar_i64(
1333 // CHECK-NEXT: entry:
1334 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1335 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1336 // CHECK-NEXT: [[DIV:%.*]] = sdiv <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1337 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1339 svint64_t div_scalar_i64(svint64_t a, int64_t b) {
1340 return a / b;
1343 // CHECK-LABEL: @div_scalar_u8(
1344 // CHECK-NEXT: entry:
1345 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
1346 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
1347 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
1348 // CHECK-NEXT: ret <vscale x 16 x i8> [[DIV]]
1350 svuint8_t div_scalar_u8(svuint8_t a, uint8_t b) {
1351 return a / b;
1354 // CHECK-LABEL: @div_scalar_u16(
1355 // CHECK-NEXT: entry:
1356 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
1357 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1358 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
1359 // CHECK-NEXT: ret <vscale x 8 x i16> [[DIV]]
1361 svuint16_t div_scalar_u16(svuint16_t a, uint16_t b) {
1362 return a / b;
1365 // CHECK-LABEL: @div_scalar_u32(
1366 // CHECK-NEXT: entry:
1367 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1368 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1369 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1370 // CHECK-NEXT: ret <vscale x 4 x i32> [[DIV]]
1372 svuint32_t div_scalar_u32(svuint32_t a, uint32_t b) {
1373 return a / b;
1376 // CHECK-LABEL: @div_scalar_u64(
1377 // CHECK-NEXT: entry:
1378 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1379 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1380 // CHECK-NEXT: [[DIV:%.*]] = udiv <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1381 // CHECK-NEXT: ret <vscale x 2 x i64> [[DIV]]
1383 svuint64_t div_scalar_u64(svuint64_t a, uint64_t b) {
1384 return a / b;
1387 // CHECK-LABEL: @div_scalar_f16(
1388 // CHECK-NEXT: entry:
1389 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x half> poison, half [[B:%.*]], i64 0
1390 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x half> [[SPLAT_SPLATINSERT]], <vscale x 8 x half> poison, <vscale x 8 x i32> zeroinitializer
1391 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 8 x half> [[A:%.*]], [[SPLAT_SPLAT]]
1392 // CHECK-NEXT: ret <vscale x 8 x half> [[DIV]]
1394 svfloat16_t div_scalar_f16(svfloat16_t a, __fp16 b) {
1395 return a / b;
1398 // CHECK-LABEL: @div_scalar_f32(
1399 // CHECK-NEXT: entry:
1400 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x float> poison, float [[B:%.*]], i64 0
1401 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x float> [[SPLAT_SPLATINSERT]], <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer
1402 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 4 x float> [[A:%.*]], [[SPLAT_SPLAT]]
1403 // CHECK-NEXT: ret <vscale x 4 x float> [[DIV]]
1405 svfloat32_t div_scalar_f32(svfloat32_t a, float b) {
1406 return a / b;
1409 // CHECK-LABEL: @div_scalar_f64(
1410 // CHECK-NEXT: entry:
1411 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x double> poison, double [[B:%.*]], i64 0
1412 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x double> [[SPLAT_SPLATINSERT]], <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer
1413 // CHECK-NEXT: [[DIV:%.*]] = fdiv <vscale x 2 x double> [[A:%.*]], [[SPLAT_SPLAT]]
1414 // CHECK-NEXT: ret <vscale x 2 x double> [[DIV]]
1416 svfloat64_t div_scalar_f64(svfloat64_t a, double b) {
1417 return a / b;
1420 // REMAINDER
1422 // CHECK-LABEL: @rem_i8(
1423 // CHECK-NEXT: entry:
1424 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1425 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1427 svint8_t rem_i8(svint8_t a, svint8_t b) {
1428 return a % b;
1431 // CHECK-LABEL: @rem_i16(
1432 // CHECK-NEXT: entry:
1433 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1434 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1436 svint16_t rem_i16(svint16_t a, svint16_t b) {
1437 return a % b;
1440 // CHECK-LABEL: @rem_i32(
1441 // CHECK-NEXT: entry:
1442 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1443 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1445 svint32_t rem_i32(svint32_t a, svint32_t b) {
1446 return a % b;
1449 // CHECK-LABEL: @rem_i64(
1450 // CHECK-NEXT: entry:
1451 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1452 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1454 svint64_t rem_i64(svint64_t a, svint64_t b) {
1455 return a % b;
1458 // CHECK-LABEL: @rem_u8(
1459 // CHECK-NEXT: entry:
1460 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1461 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1463 svuint8_t rem_u8(svuint8_t a, svuint8_t b) {
1464 return a % b;
1467 // CHECK-LABEL: @rem_u16(
1468 // CHECK-NEXT: entry:
1469 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1470 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1472 svuint16_t rem_u16(svuint16_t a, svuint16_t b) {
1473 return a % b;
1476 // CHECK-LABEL: @rem_u32(
1477 // CHECK-NEXT: entry:
1478 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1479 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1481 svuint32_t rem_u32(svuint32_t a, svuint32_t b) {
1482 return a % b;
1485 // CHECK-LABEL: @rem_u64(
1486 // CHECK-NEXT: entry:
1487 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1488 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1490 svuint64_t rem_u64(svuint64_t a, svuint64_t b) {
1491 return a % b;
1494 // CHECK-LABEL: @rem_inplace_i8(
1495 // CHECK-NEXT: entry:
1496 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1497 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1499 svint8_t rem_inplace_i8(svint8_t a, svint8_t b) {
1500 return a % b;
1503 // CHECK-LABEL: @rem_inplace_i16(
1504 // CHECK-NEXT: entry:
1505 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1506 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1508 svint16_t rem_inplace_i16(svint16_t a, svint16_t b) {
1509 return a % b;
1512 // CHECK-LABEL: @rem_inplace_i32(
1513 // CHECK-NEXT: entry:
1514 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1515 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1517 svint32_t rem_inplace_i32(svint32_t a, svint32_t b) {
1518 return a % b;
1521 // CHECK-LABEL: @rem_inplace_i64(
1522 // CHECK-NEXT: entry:
1523 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1524 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1526 svint64_t rem_inplace_i64(svint64_t a, svint64_t b) {
1527 return a % b;
1530 // CHECK-LABEL: @rem_inplace_u8(
1531 // CHECK-NEXT: entry:
1532 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
1533 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1535 svuint8_t rem_inplace_u8(svuint8_t a, svuint8_t b) {
1536 return a % b;
1539 // CHECK-LABEL: @rem_inplace_u16(
1540 // CHECK-NEXT: entry:
1541 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
1542 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1544 svuint16_t rem_inplace_u16(svuint16_t a, svuint16_t b) {
1545 return a % b;
1548 // CHECK-LABEL: @rem_inplace_u32(
1549 // CHECK-NEXT: entry:
1550 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
1551 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1553 svuint32_t rem_inplace_u32(svuint32_t a, svuint32_t b) {
1554 return a % b;
1557 // CHECK-LABEL: @rem_inplace_u64(
1558 // CHECK-NEXT: entry:
1559 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
1560 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1562 svuint64_t rem_inplace_u64(svuint64_t a, svuint64_t b) {
1563 return a % b;
1566 // CHECK-LABEL: @rem_scalar_i8(
1567 // CHECK-NEXT: entry:
1568 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
1569 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
1570 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
1571 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1573 svint8_t rem_scalar_i8(svint8_t a, int8_t b) {
1574 return a % b;
1577 // CHECK-LABEL: @rem_scalar_i16(
1578 // CHECK-NEXT: entry:
1579 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
1580 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1581 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
1582 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1584 svint16_t rem_scalar_i16(svint16_t a, int16_t b) {
1585 return a % b;
1588 // CHECK-LABEL: @rem_scalar_i32(
1589 // CHECK-NEXT: entry:
1590 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1591 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1592 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1593 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1595 svint32_t rem_scalar_i32(svint32_t a, int32_t b) {
1596 return a % b;
1599 // CHECK-LABEL: @rem_scalar_i64(
1600 // CHECK-NEXT: entry:
1601 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1602 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1603 // CHECK-NEXT: [[REM:%.*]] = srem <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1604 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1606 svint64_t rem_scalar_i64(svint64_t a, int64_t b) {
1607 return a % b;
1610 // CHECK-LABEL: @rem_scalar_u8(
1611 // CHECK-NEXT: entry:
1612 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 16 x i8> poison, i8 [[B:%.*]], i64 0
1613 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 16 x i8> [[SPLAT_SPLATINSERT]], <vscale x 16 x i8> poison, <vscale x 16 x i32> zeroinitializer
1614 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 16 x i8> [[A:%.*]], [[SPLAT_SPLAT]]
1615 // CHECK-NEXT: ret <vscale x 16 x i8> [[REM]]
1617 svuint8_t rem_scalar_u8(svuint8_t a, uint8_t b) {
1618 return a % b;
1621 // CHECK-LABEL: @rem_scalar_u16(
1622 // CHECK-NEXT: entry:
1623 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 8 x i16> poison, i16 [[B:%.*]], i64 0
1624 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 8 x i16> [[SPLAT_SPLATINSERT]], <vscale x 8 x i16> poison, <vscale x 8 x i32> zeroinitializer
1625 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 8 x i16> [[A:%.*]], [[SPLAT_SPLAT]]
1626 // CHECK-NEXT: ret <vscale x 8 x i16> [[REM]]
1628 svuint16_t rem_scalar_u16(svuint16_t a, uint16_t b) {
1629 return a % b;
1632 // CHECK-LABEL: @rem_scalar_u32(
1633 // CHECK-NEXT: entry:
1634 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 4 x i32> poison, i32 [[B:%.*]], i64 0
1635 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 4 x i32> [[SPLAT_SPLATINSERT]], <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer
1636 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 4 x i32> [[A:%.*]], [[SPLAT_SPLAT]]
1637 // CHECK-NEXT: ret <vscale x 4 x i32> [[REM]]
1639 svuint32_t rem_scalar_u32(svuint32_t a, uint32_t b) {
1640 return a % b;
1643 // CHECK-LABEL: @rem_scalar_u64(
1644 // CHECK-NEXT: entry:
1645 // CHECK-NEXT: [[SPLAT_SPLATINSERT:%.*]] = insertelement <vscale x 2 x i64> poison, i64 [[B:%.*]], i64 0
1646 // CHECK-NEXT: [[SPLAT_SPLAT:%.*]] = shufflevector <vscale x 2 x i64> [[SPLAT_SPLATINSERT]], <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer
1647 // CHECK-NEXT: [[REM:%.*]] = urem <vscale x 2 x i64> [[A:%.*]], [[SPLAT_SPLAT]]
1648 // CHECK-NEXT: ret <vscale x 2 x i64> [[REM]]
1650 svuint64_t rem_scalar_u64(svuint64_t a, uint64_t b) {
1651 return a % b;
1654 // UNARY PROMOTION
1656 // CHECK-LABEL: @prom_i8(
1657 // CHECK-NEXT: entry:
1658 // CHECK-NEXT: ret <vscale x 16 x i8> [[A:%.*]]
1660 svint8_t prom_i8(svint8_t a) {
1661 return +a;
1664 // CHECK-LABEL: @prom_i16(
1665 // CHECK-NEXT: entry:
1666 // CHECK-NEXT: ret <vscale x 8 x i16> [[A:%.*]]
1668 svint16_t prom_i16(svint16_t a) {
1669 return +a;
1672 // CHECK-LABEL: @prom_i32(
1673 // CHECK-NEXT: entry:
1674 // CHECK-NEXT: ret <vscale x 4 x i32> [[A:%.*]]
1676 svint32_t prom_i32(svint32_t a) {
1677 return +a;
1680 // CHECK-LABEL: @prom_i64(
1681 // CHECK-NEXT: entry:
1682 // CHECK-NEXT: ret <vscale x 2 x i64> [[A:%.*]]
1684 svint64_t prom_i64(svint64_t a) {
1685 return +a;
1688 // CHECK-LABEL: @prom_u8(
1689 // CHECK-NEXT: entry:
1690 // CHECK-NEXT: ret <vscale x 16 x i8> [[A:%.*]]
1692 svuint8_t prom_u8(svuint8_t a) {
1693 return +a;
1696 // CHECK-LABEL: @prom_u16(
1697 // CHECK-NEXT: entry:
1698 // CHECK-NEXT: ret <vscale x 8 x i16> [[A:%.*]]
1700 svuint16_t prom_u16(svuint16_t a) {
1701 return +a;
1704 // CHECK-LABEL: @prom_u32(
1705 // CHECK-NEXT: entry:
1706 // CHECK-NEXT: ret <vscale x 4 x i32> [[A:%.*]]
1708 svuint32_t prom_u32(svuint32_t a) {
1709 return +a;
1712 // CHECK-LABEL: @prom_u64(
1713 // CHECK-NEXT: entry:
1714 // CHECK-NEXT: ret <vscale x 2 x i64> [[A:%.*]]
1716 svuint64_t prom_u64(svuint64_t a) {
1717 return +a;
1720 // UNARY NEGATION
1722 // CHECK-LABEL: @neg_i8(
1723 // CHECK-NEXT: entry:
1724 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[A:%.*]]
1725 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
1727 svint8_t neg_i8(svint8_t a) {
1728 return -a;
1731 // CHECK-LABEL: @neg_i16(
1732 // CHECK-NEXT: entry:
1733 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> zeroinitializer, [[A:%.*]]
1734 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
1736 svint16_t neg_i16(svint16_t a) {
1737 return -a;
1740 // CHECK-LABEL: @neg_i32(
1741 // CHECK-NEXT: entry:
1742 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[A:%.*]]
1743 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
1745 svint32_t neg_i32(svint32_t a) {
1746 return -a;
1749 // CHECK-LABEL: @neg_i64(
1750 // CHECK-NEXT: entry:
1751 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> zeroinitializer, [[A:%.*]]
1752 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
1754 svint64_t neg_i64(svint64_t a) {
1755 return -a;
1758 // CHECK-LABEL: @neg_u8(
1759 // CHECK-NEXT: entry:
1760 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 16 x i8> zeroinitializer, [[A:%.*]]
1761 // CHECK-NEXT: ret <vscale x 16 x i8> [[SUB]]
1763 svuint8_t neg_u8(svuint8_t a) {
1764 return -a;
1767 // CHECK-LABEL: @neg_u16(
1768 // CHECK-NEXT: entry:
1769 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 8 x i16> zeroinitializer, [[A:%.*]]
1770 // CHECK-NEXT: ret <vscale x 8 x i16> [[SUB]]
1772 svuint16_t neg_u16(svuint16_t a) {
1773 return -a;
1776 // CHECK-LABEL: @neg_u32(
1777 // CHECK-NEXT: entry:
1778 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 4 x i32> zeroinitializer, [[A:%.*]]
1779 // CHECK-NEXT: ret <vscale x 4 x i32> [[SUB]]
1781 svuint32_t neg_u32(svuint32_t a) {
1782 return -a;
1785 // CHECK-LABEL: @neg_u64(
1786 // CHECK-NEXT: entry:
1787 // CHECK-NEXT: [[SUB:%.*]] = sub <vscale x 2 x i64> zeroinitializer, [[A:%.*]]
1788 // CHECK-NEXT: ret <vscale x 2 x i64> [[SUB]]
1790 svuint64_t neg_u64(svuint64_t a) {
1791 return -a;