1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple aarch64-none-linux-gnu -target-feature +sve \
3 // RUN: -disable-O0-optnone \
4 // RUN: -emit-llvm -o - %s | opt -S -passes=sroa | FileCheck %s
6 // REQUIRES: aarch64-registered-target
10 // CHECK-LABEL: @_Z9cond_boolu10__SVBool_tu10__SVBool_t(
12 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i1> [[A:%.*]], [[B:%.*]]
13 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i1> [[CMP]], zeroinitializer
14 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i1> [[A]], <vscale x 16 x i1> [[B]]
15 // CHECK-NEXT: ret <vscale x 16 x i1> [[VECTOR_SELECT]]
17 svbool_t
cond_bool(svbool_t a
, svbool_t b
) {
21 // CHECK-LABEL: @_Z7cond_i8u10__SVInt8_tu10__SVInt8_t(
23 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
24 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
25 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i8> [[CONV]], zeroinitializer
26 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]
27 // CHECK-NEXT: ret <vscale x 16 x i8> [[VECTOR_SELECT]]
29 svint8_t
cond_i8(svint8_t a
, svint8_t b
) {
33 // CHECK-LABEL: @_Z7cond_u8u11__SVUint8_tu11__SVUint8_t(
35 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 16 x i8> [[A:%.*]], [[B:%.*]]
36 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 16 x i1> [[CMP]] to <vscale x 16 x i8>
37 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 16 x i8> [[CONV]], zeroinitializer
38 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 16 x i1> [[VECTOR_COND]], <vscale x 16 x i8> [[A]], <vscale x 16 x i8> [[B]]
39 // CHECK-NEXT: ret <vscale x 16 x i8> [[VECTOR_SELECT]]
41 svuint8_t
cond_u8(svuint8_t a
, svuint8_t b
) {
45 // CHECK-LABEL: @_Z8cond_i16u11__SVInt16_tu11__SVInt16_t(
47 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
48 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
49 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
50 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> [[B]]
51 // CHECK-NEXT: ret <vscale x 8 x i16> [[VECTOR_SELECT]]
53 svint16_t
cond_i16(svint16_t a
, svint16_t b
) {
57 // CHECK-LABEL: @_Z8cond_u16u12__SVUint16_tu12__SVUint16_t(
59 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 8 x i16> [[A:%.*]], [[B:%.*]]
60 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
61 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
62 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x i16> [[A]], <vscale x 8 x i16> [[B]]
63 // CHECK-NEXT: ret <vscale x 8 x i16> [[VECTOR_SELECT]]
65 svuint16_t
cond_u16(svuint16_t a
, svuint16_t b
) {
69 // CHECK-LABEL: @_Z8cond_i32u11__SVInt32_tu11__SVInt32_t(
71 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
72 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
73 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
74 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[B]]
75 // CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
77 svint32_t
cond_i32(svint32_t a
, svint32_t b
) {
81 // CHECK-LABEL: @_Z8cond_u32u12__SVUint32_tu12__SVUint32_t(
83 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], [[B:%.*]]
84 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
85 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
86 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> [[B]]
87 // CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
89 svuint32_t
cond_u32(svuint32_t a
, svuint32_t b
) {
93 // CHECK-LABEL: @_Z8cond_i64u11__SVInt64_tu11__SVInt64_t(
95 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
96 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
97 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
98 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[B]]
99 // CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
101 svint64_t
cond_i64(svint64_t a
, svint64_t b
) {
102 return a
< b
? a
: b
;
105 // CHECK-LABEL: @_Z8cond_u64u12__SVUint64_tu12__SVUint64_t(
106 // CHECK-NEXT: entry:
107 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], [[B:%.*]]
108 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
109 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
110 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> [[B]]
111 // CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
113 svuint64_t
cond_u64(svuint64_t a
, svuint64_t b
) {
114 return a
< b
? a
: b
;
117 // CHECK-LABEL: @_Z8cond_f16u13__SVFloat16_tu13__SVFloat16_t(
118 // CHECK-NEXT: entry:
119 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 8 x half> [[A:%.*]], [[B:%.*]]
120 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 8 x i1> [[CMP]] to <vscale x 8 x i16>
121 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 8 x i16> [[CONV]], zeroinitializer
122 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 8 x i1> [[VECTOR_COND]], <vscale x 8 x half> [[A]], <vscale x 8 x half> [[B]]
123 // CHECK-NEXT: ret <vscale x 8 x half> [[VECTOR_SELECT]]
125 svfloat16_t
cond_f16(svfloat16_t a
, svfloat16_t b
) {
126 return a
< b
? a
: b
;
129 // CHECK-LABEL: @_Z8cond_f32u13__SVFloat32_tu13__SVFloat32_t(
130 // CHECK-NEXT: entry:
131 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], [[B:%.*]]
132 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
133 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
134 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x float> [[A]], <vscale x 4 x float> [[B]]
135 // CHECK-NEXT: ret <vscale x 4 x float> [[VECTOR_SELECT]]
137 svfloat32_t
cond_f32(svfloat32_t a
, svfloat32_t b
) {
138 return a
< b
? a
: b
;
141 // CHECK-LABEL: @_Z8cond_f64u13__SVFloat64_tu13__SVFloat64_t(
142 // CHECK-NEXT: entry:
143 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], [[B:%.*]]
144 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
145 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
146 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x double> [[A]], <vscale x 2 x double> [[B]]
147 // CHECK-NEXT: ret <vscale x 2 x double> [[VECTOR_SELECT]]
149 svfloat64_t
cond_f64(svfloat64_t a
, svfloat64_t b
) {
150 return a
< b
? a
: b
;
153 // CHECK-LABEL: @_Z14cond_i32_splatu11__SVInt32_t(
154 // CHECK-NEXT: entry:
155 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], zeroinitializer
156 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
157 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
158 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> zeroinitializer
159 // CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
161 svint32_t
cond_i32_splat(svint32_t a
) {
162 return a
< 0 ? a
: 0;
165 // CHECK-LABEL: @_Z14cond_u32_splatu12__SVUint32_t(
166 // CHECK-NEXT: entry:
167 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 4 x i32> [[A:%.*]], shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
168 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
169 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
170 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x i32> [[A]], <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer)
171 // CHECK-NEXT: ret <vscale x 4 x i32> [[VECTOR_SELECT]]
173 svuint32_t
cond_u32_splat(svuint32_t a
) {
174 return a
< 1u ? a
: 1u;
177 // CHECK-LABEL: @_Z14cond_i64_splatu11__SVInt64_t(
178 // CHECK-NEXT: entry:
179 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], zeroinitializer
180 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
181 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
182 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> zeroinitializer
183 // CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
185 svint64_t
cond_i64_splat(svint64_t a
) {
186 return a
< 0l ? a
: 0l;
189 // CHECK-LABEL: @_Z14cond_u64_splatu12__SVUint64_t(
190 // CHECK-NEXT: entry:
191 // CHECK-NEXT: [[CMP:%.*]] = icmp ult <vscale x 2 x i64> [[A:%.*]], shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
192 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
193 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
194 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x i64> [[A]], <vscale x 2 x i64> shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> poison, i64 1, i64 0), <vscale x 2 x i64> poison, <vscale x 2 x i32> zeroinitializer)
195 // CHECK-NEXT: ret <vscale x 2 x i64> [[VECTOR_SELECT]]
197 svuint64_t
cond_u64_splat(svuint64_t a
) {
198 return a
< 1ul ? a
: 1ul;
201 // CHECK-LABEL: @_Z14cond_f32_splatu13__SVFloat32_t(
202 // CHECK-NEXT: entry:
203 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 4 x float> [[A:%.*]], zeroinitializer
204 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 4 x i1> [[CMP]] to <vscale x 4 x i32>
205 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 4 x i32> [[CONV]], zeroinitializer
206 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 4 x i1> [[VECTOR_COND]], <vscale x 4 x float> [[A]], <vscale x 4 x float> zeroinitializer
207 // CHECK-NEXT: ret <vscale x 4 x float> [[VECTOR_SELECT]]
209 svfloat32_t
cond_f32_splat(svfloat32_t a
) {
210 return a
< 0.f
? a
: 0.f
;
213 // CHECK-LABEL: @_Z14cond_f64_splatu13__SVFloat64_t(
214 // CHECK-NEXT: entry:
215 // CHECK-NEXT: [[CMP:%.*]] = fcmp olt <vscale x 2 x double> [[A:%.*]], zeroinitializer
216 // CHECK-NEXT: [[CONV:%.*]] = zext <vscale x 2 x i1> [[CMP]] to <vscale x 2 x i64>
217 // CHECK-NEXT: [[VECTOR_COND:%.*]] = icmp ne <vscale x 2 x i64> [[CONV]], zeroinitializer
218 // CHECK-NEXT: [[VECTOR_SELECT:%.*]] = select <vscale x 2 x i1> [[VECTOR_COND]], <vscale x 2 x double> [[A]], <vscale x 2 x double> zeroinitializer
219 // CHECK-NEXT: ret <vscale x 2 x double> [[VECTOR_SELECT]]
221 svfloat64_t
cond_f64_splat(svfloat64_t a
) {
222 return a
< 0. ? a
: 0.;