Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / aarch64-sve-intrinsics / acle_sve_len.c
blobd942a3991faedb8b3ec4179094beb0dd09b7471e
1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,instcombine,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
8 #include <arm_sve.h>
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
13 #else
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
15 #endif
17 // CHECK-LABEL: @test_svlen_s8(
18 // CHECK-NEXT: entry:
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
20 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
21 // CHECK-NEXT: ret i64 [[TMP1]]
23 // CPP-CHECK-LABEL: @_Z13test_svlen_s8u10__SVInt8_t(
24 // CPP-CHECK-NEXT: entry:
25 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
26 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
27 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
29 uint64_t test_svlen_s8(svint8_t op)
31 return SVE_ACLE_FUNC(svlen,_s8,,)(op);
34 // CHECK-LABEL: @test_svlen_s16(
35 // CHECK-NEXT: entry:
36 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
37 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
38 // CHECK-NEXT: ret i64 [[TMP1]]
40 // CPP-CHECK-LABEL: @_Z14test_svlen_s16u11__SVInt16_t(
41 // CPP-CHECK-NEXT: entry:
42 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
43 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
44 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
46 uint64_t test_svlen_s16(svint16_t op)
48 return SVE_ACLE_FUNC(svlen,_s16,,)(op);
51 // CHECK-LABEL: @test_svlen_s32(
52 // CHECK-NEXT: entry:
53 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
54 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
55 // CHECK-NEXT: ret i64 [[TMP1]]
57 // CPP-CHECK-LABEL: @_Z14test_svlen_s32u11__SVInt32_t(
58 // CPP-CHECK-NEXT: entry:
59 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
60 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
61 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
63 uint64_t test_svlen_s32(svint32_t op)
65 return SVE_ACLE_FUNC(svlen,_s32,,)(op);
68 // CHECK-LABEL: @test_svlen_s64(
69 // CHECK-NEXT: entry:
70 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
71 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
72 // CHECK-NEXT: ret i64 [[TMP1]]
74 // CPP-CHECK-LABEL: @_Z14test_svlen_s64u11__SVInt64_t(
75 // CPP-CHECK-NEXT: entry:
76 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
77 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
78 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
80 uint64_t test_svlen_s64(svint64_t op)
82 return SVE_ACLE_FUNC(svlen,_s64,,)(op);
85 // CHECK-LABEL: @test_svlen_u8(
86 // CHECK-NEXT: entry:
87 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
88 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
89 // CHECK-NEXT: ret i64 [[TMP1]]
91 // CPP-CHECK-LABEL: @_Z13test_svlen_u8u11__SVUint8_t(
92 // CPP-CHECK-NEXT: entry:
93 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
94 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 4
95 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
97 uint64_t test_svlen_u8(svuint8_t op)
99 return SVE_ACLE_FUNC(svlen,_u8,,)(op);
102 // CHECK-LABEL: @test_svlen_u16(
103 // CHECK-NEXT: entry:
104 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
105 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
106 // CHECK-NEXT: ret i64 [[TMP1]]
108 // CPP-CHECK-LABEL: @_Z14test_svlen_u16u12__SVUint16_t(
109 // CPP-CHECK-NEXT: entry:
110 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
111 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
112 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
114 uint64_t test_svlen_u16(svuint16_t op)
116 return SVE_ACLE_FUNC(svlen,_u16,,)(op);
119 // CHECK-LABEL: @test_svlen_u32(
120 // CHECK-NEXT: entry:
121 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
122 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
123 // CHECK-NEXT: ret i64 [[TMP1]]
125 // CPP-CHECK-LABEL: @_Z14test_svlen_u32u12__SVUint32_t(
126 // CPP-CHECK-NEXT: entry:
127 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
128 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
129 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
131 uint64_t test_svlen_u32(svuint32_t op)
133 return SVE_ACLE_FUNC(svlen,_u32,,)(op);
136 // CHECK-LABEL: @test_svlen_u64(
137 // CHECK-NEXT: entry:
138 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
139 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
140 // CHECK-NEXT: ret i64 [[TMP1]]
142 // CPP-CHECK-LABEL: @_Z14test_svlen_u64u12__SVUint64_t(
143 // CPP-CHECK-NEXT: entry:
144 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
145 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
146 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
148 uint64_t test_svlen_u64(svuint64_t op)
150 return SVE_ACLE_FUNC(svlen,_u64,,)(op);
153 // CHECK-LABEL: @test_svlen_f16(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
156 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
157 // CHECK-NEXT: ret i64 [[TMP1]]
159 // CPP-CHECK-LABEL: @_Z14test_svlen_f16u13__SVFloat16_t(
160 // CPP-CHECK-NEXT: entry:
161 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
162 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 3
163 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
165 uint64_t test_svlen_f16(svfloat16_t op)
167 return SVE_ACLE_FUNC(svlen,_f16,,)(op);
170 // CHECK-LABEL: @test_svlen_f32(
171 // CHECK-NEXT: entry:
172 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
173 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
174 // CHECK-NEXT: ret i64 [[TMP1]]
176 // CPP-CHECK-LABEL: @_Z14test_svlen_f32u13__SVFloat32_t(
177 // CPP-CHECK-NEXT: entry:
178 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
179 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 2
180 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
182 uint64_t test_svlen_f32(svfloat32_t op)
184 return SVE_ACLE_FUNC(svlen,_f32,,)(op);
187 // CHECK-LABEL: @test_svlen_f64(
188 // CHECK-NEXT: entry:
189 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
190 // CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
191 // CHECK-NEXT: ret i64 [[TMP1]]
193 // CPP-CHECK-LABEL: @_Z14test_svlen_f64u13__SVFloat64_t(
194 // CPP-CHECK-NEXT: entry:
195 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.vscale.i64()
196 // CPP-CHECK-NEXT: [[TMP1:%.*]] = shl nuw nsw i64 [[TMP0]], 1
197 // CPP-CHECK-NEXT: ret i64 [[TMP1]]
199 uint64_t test_svlen_f64(svfloat64_t op)
201 return SVE_ACLE_FUNC(svlen,_f64,,)(op);