1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: aarch64-registered-target
3 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
4 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
5 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s
6 // RUN: %clang_cc1 -DSVE_OVERLOADED_FORMS -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -emit-llvm -o - -x c++ %s | opt -S -passes=mem2reg,tailcallelim | FileCheck %s -check-prefix=CPP-CHECK
7 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve -S -disable-O0-optnone -Werror -Wall -o /dev/null %s
10 #ifdef SVE_OVERLOADED_FORMS
11 // A simple used,unused... macro, long enough to represent any SVE builtin.
12 #define SVE_ACLE_FUNC(A1,A2_UNUSED,A3,A4_UNUSED) A1##A3
14 #define SVE_ACLE_FUNC(A1,A2,A3,A4) A1##A2##A3##A4
17 // CHECK-LABEL: @test_svqincw_n_s32(
19 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 31, i32 1)
20 // CHECK-NEXT: ret i32 [[TMP0]]
22 // CPP-CHECK-LABEL: @_Z18test_svqincw_n_s32i(
23 // CPP-CHECK-NEXT: entry:
24 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 31, i32 1)
25 // CPP-CHECK-NEXT: ret i32 [[TMP0]]
27 int32_t test_svqincw_n_s32(int32_t op
)
29 return SVE_ACLE_FUNC(svqincw
,_n_s32
,,)(op
, 1);
32 // CHECK-LABEL: @test_svqincw_n_s32_1(
34 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 31, i32 16)
35 // CHECK-NEXT: ret i32 [[TMP0]]
37 // CPP-CHECK-LABEL: @_Z20test_svqincw_n_s32_1i(
38 // CPP-CHECK-NEXT: entry:
39 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 31, i32 16)
40 // CPP-CHECK-NEXT: ret i32 [[TMP0]]
42 int32_t test_svqincw_n_s32_1(int32_t op
)
44 return SVE_ACLE_FUNC(svqincw
,_n_s32
,,)(op
, 16);
47 // CHECK-LABEL: @test_svqincw_n_s64(
49 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.sqincw.n64(i64 [[OP:%.*]], i32 31, i32 1)
50 // CHECK-NEXT: ret i64 [[TMP0]]
52 // CPP-CHECK-LABEL: @_Z18test_svqincw_n_s64l(
53 // CPP-CHECK-NEXT: entry:
54 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.sqincw.n64(i64 [[OP:%.*]], i32 31, i32 1)
55 // CPP-CHECK-NEXT: ret i64 [[TMP0]]
57 int64_t test_svqincw_n_s64(int64_t op
)
59 return SVE_ACLE_FUNC(svqincw
,_n_s64
,,)(op
, 1);
62 // CHECK-LABEL: @test_svqincw_n_u32(
64 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.uqincw.n32(i32 [[OP:%.*]], i32 31, i32 16)
65 // CHECK-NEXT: ret i32 [[TMP0]]
67 // CPP-CHECK-LABEL: @_Z18test_svqincw_n_u32j(
68 // CPP-CHECK-NEXT: entry:
69 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.uqincw.n32(i32 [[OP:%.*]], i32 31, i32 16)
70 // CPP-CHECK-NEXT: ret i32 [[TMP0]]
72 uint32_t test_svqincw_n_u32(uint32_t op
)
74 return SVE_ACLE_FUNC(svqincw
,_n_u32
,,)(op
, 16);
77 // CHECK-LABEL: @test_svqincw_n_u64(
79 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.uqincw.n64(i64 [[OP:%.*]], i32 31, i32 1)
80 // CHECK-NEXT: ret i64 [[TMP0]]
82 // CPP-CHECK-LABEL: @_Z18test_svqincw_n_u64m(
83 // CPP-CHECK-NEXT: entry:
84 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.uqincw.n64(i64 [[OP:%.*]], i32 31, i32 1)
85 // CPP-CHECK-NEXT: ret i64 [[TMP0]]
87 uint64_t test_svqincw_n_u64(uint64_t op
)
89 return SVE_ACLE_FUNC(svqincw
,_n_u64
,,)(op
, 1);
92 // CHECK-LABEL: @test_svqincw_pat_n_s32(
94 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 4, i32 16)
95 // CHECK-NEXT: ret i32 [[TMP0]]
97 // CPP-CHECK-LABEL: @_Z22test_svqincw_pat_n_s32i(
98 // CPP-CHECK-NEXT: entry:
99 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.sqincw.n32(i32 [[OP:%.*]], i32 4, i32 16)
100 // CPP-CHECK-NEXT: ret i32 [[TMP0]]
102 int32_t test_svqincw_pat_n_s32(int32_t op
)
104 return SVE_ACLE_FUNC(svqincw_pat
,_n_s32
,,)(op
, SV_VL4
, 16);
107 // CHECK-LABEL: @test_svqincw_pat_n_s64(
108 // CHECK-NEXT: entry:
109 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.sqincw.n64(i64 [[OP:%.*]], i32 5, i32 1)
110 // CHECK-NEXT: ret i64 [[TMP0]]
112 // CPP-CHECK-LABEL: @_Z22test_svqincw_pat_n_s64l(
113 // CPP-CHECK-NEXT: entry:
114 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.sqincw.n64(i64 [[OP:%.*]], i32 5, i32 1)
115 // CPP-CHECK-NEXT: ret i64 [[TMP0]]
117 int64_t test_svqincw_pat_n_s64(int64_t op
)
119 return SVE_ACLE_FUNC(svqincw_pat
,_n_s64
,,)(op
, SV_VL5
, 1);
122 // CHECK-LABEL: @test_svqincw_pat_n_u32(
123 // CHECK-NEXT: entry:
124 // CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.uqincw.n32(i32 [[OP:%.*]], i32 6, i32 16)
125 // CHECK-NEXT: ret i32 [[TMP0]]
127 // CPP-CHECK-LABEL: @_Z22test_svqincw_pat_n_u32j(
128 // CPP-CHECK-NEXT: entry:
129 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i32 @llvm.aarch64.sve.uqincw.n32(i32 [[OP:%.*]], i32 6, i32 16)
130 // CPP-CHECK-NEXT: ret i32 [[TMP0]]
132 uint32_t test_svqincw_pat_n_u32(uint32_t op
)
134 return SVE_ACLE_FUNC(svqincw_pat
,_n_u32
,,)(op
, SV_VL6
, 16);
137 // CHECK-LABEL: @test_svqincw_pat_n_u64(
138 // CHECK-NEXT: entry:
139 // CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.uqincw.n64(i64 [[OP:%.*]], i32 7, i32 1)
140 // CHECK-NEXT: ret i64 [[TMP0]]
142 // CPP-CHECK-LABEL: @_Z22test_svqincw_pat_n_u64m(
143 // CPP-CHECK-NEXT: entry:
144 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call i64 @llvm.aarch64.sve.uqincw.n64(i64 [[OP:%.*]], i32 7, i32 1)
145 // CPP-CHECK-NEXT: ret i64 [[TMP0]]
147 uint64_t test_svqincw_pat_n_u64(uint64_t op
)
149 return SVE_ACLE_FUNC(svqincw_pat
,_n_u64
,,)(op
, SV_VL7
, 1);
152 // CHECK-LABEL: @test_svqincw_s32(
153 // CHECK-NEXT: entry:
154 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 31, i32 16)
155 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
157 // CPP-CHECK-LABEL: @_Z16test_svqincw_s32u11__SVInt32_t(
158 // CPP-CHECK-NEXT: entry:
159 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 31, i32 16)
160 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
162 svint32_t
test_svqincw_s32(svint32_t op
)
164 return SVE_ACLE_FUNC(svqincw
,_s32
,,)(op
, 16);
167 // CHECK-LABEL: @test_svqincw_u32(
168 // CHECK-NEXT: entry:
169 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 31, i32 1)
170 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
172 // CPP-CHECK-LABEL: @_Z16test_svqincw_u32u12__SVUint32_t(
173 // CPP-CHECK-NEXT: entry:
174 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 31, i32 1)
175 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
177 svuint32_t
test_svqincw_u32(svuint32_t op
)
179 return SVE_ACLE_FUNC(svqincw
,_u32
,,)(op
, 1);
182 // CHECK-LABEL: @test_svqincw_pat_s32(
183 // CHECK-NEXT: entry:
184 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 8, i32 16)
185 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
187 // CPP-CHECK-LABEL: @_Z20test_svqincw_pat_s32u11__SVInt32_t(
188 // CPP-CHECK-NEXT: entry:
189 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.sqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 8, i32 16)
190 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
192 svint32_t
test_svqincw_pat_s32(svint32_t op
)
194 return SVE_ACLE_FUNC(svqincw_pat
,_s32
,,)(op
, SV_VL8
, 16);
197 // CHECK-LABEL: @test_svqincw_pat_u32(
198 // CHECK-NEXT: entry:
199 // CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 9, i32 1)
200 // CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
202 // CPP-CHECK-LABEL: @_Z20test_svqincw_pat_u32u12__SVUint32_t(
203 // CPP-CHECK-NEXT: entry:
204 // CPP-CHECK-NEXT: [[TMP0:%.*]] = tail call <vscale x 4 x i32> @llvm.aarch64.sve.uqincw.nxv4i32(<vscale x 4 x i32> [[OP:%.*]], i32 9, i32 1)
205 // CPP-CHECK-NEXT: ret <vscale x 4 x i32> [[TMP0]]
207 svuint32_t
test_svqincw_pat_u32(svuint32_t op
)
209 return SVE_ACLE_FUNC(svqincw_pat
,_u32
,,)(op
, SV_VL16
, 1);