1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
3 // RUN: %clang_cc1 -triple thumbv8.1m.main-none-none-eabi -target-feature +mve.fp -mfloat-abi hard -O0 -disable-O0-optnone -DPOLYMORPHIC -emit-llvm -o - %s | opt -S -passes=mem2reg | FileCheck %s
5 // REQUIRES: aarch64-registered-target || arm-registered-target
9 // CHECK-LABEL: @test_vbicq_u8(
11 // CHECK-NEXT: [[TMP0:%.*]] = xor <16 x i8> [[B:%.*]], splat (i8 -1)
12 // CHECK-NEXT: [[TMP1:%.*]] = and <16 x i8> [[A:%.*]], [[TMP0]]
13 // CHECK-NEXT: ret <16 x i8> [[TMP1]]
15 uint8x16_t
test_vbicq_u8(uint8x16_t a
, uint8x16_t b
)
19 #else /* POLYMORPHIC */
20 return vbicq_u8(a
, b
);
21 #endif /* POLYMORPHIC */
24 // CHECK-LABEL: @test_vbicq_s16(
26 // CHECK-NEXT: [[TMP0:%.*]] = xor <8 x i16> [[B:%.*]], splat (i16 -1)
27 // CHECK-NEXT: [[TMP1:%.*]] = and <8 x i16> [[A:%.*]], [[TMP0]]
28 // CHECK-NEXT: ret <8 x i16> [[TMP1]]
30 int16x8_t
test_vbicq_s16(int16x8_t a
, int16x8_t b
)
34 #else /* POLYMORPHIC */
35 return vbicq_s16(a
, b
);
36 #endif /* POLYMORPHIC */
39 // CHECK-LABEL: @test_vbicq_u32(
41 // CHECK-NEXT: [[TMP0:%.*]] = xor <4 x i32> [[B:%.*]], splat (i32 -1)
42 // CHECK-NEXT: [[TMP1:%.*]] = and <4 x i32> [[A:%.*]], [[TMP0]]
43 // CHECK-NEXT: ret <4 x i32> [[TMP1]]
45 uint32x4_t
test_vbicq_u32(uint32x4_t a
, uint32x4_t b
)
49 #else /* POLYMORPHIC */
50 return vbicq_u32(a
, b
);
51 #endif /* POLYMORPHIC */
54 // CHECK-LABEL: @test_vbicq_f32(
56 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
57 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
58 // CHECK-NEXT: [[TMP2:%.*]] = xor <4 x i32> [[TMP1]], splat (i32 -1)
59 // CHECK-NEXT: [[TMP3:%.*]] = and <4 x i32> [[TMP0]], [[TMP2]]
60 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <4 x i32> [[TMP3]] to <4 x float>
61 // CHECK-NEXT: ret <4 x float> [[TMP4]]
63 float32x4_t
test_vbicq_f32(float32x4_t a
, float32x4_t b
)
67 #else /* POLYMORPHIC */
68 return vbicq_f32(a
, b
);
69 #endif /* POLYMORPHIC */
72 // CHECK-LABEL: @test_vbicq_m_s8(
74 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
75 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
76 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.bic.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> [[INACTIVE:%.*]])
77 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
79 int8x16_t
test_vbicq_m_s8(int8x16_t inactive
, int8x16_t a
, int8x16_t b
, mve_pred16_t p
)
82 return vbicq_m(inactive
, a
, b
, p
);
83 #else /* POLYMORPHIC */
84 return vbicq_m_s8(inactive
, a
, b
, p
);
85 #endif /* POLYMORPHIC */
88 // CHECK-LABEL: @test_vbicq_m_u16(
90 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
91 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
92 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]], <8 x i16> [[INACTIVE:%.*]])
93 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
95 uint16x8_t
test_vbicq_m_u16(uint16x8_t inactive
, uint16x8_t a
, uint16x8_t b
, mve_pred16_t p
)
98 return vbicq_m(inactive
, a
, b
, p
);
99 #else /* POLYMORPHIC */
100 return vbicq_m_u16(inactive
, a
, b
, p
);
101 #endif /* POLYMORPHIC */
104 // CHECK-LABEL: @test_vbicq_m_s32(
105 // CHECK-NEXT: entry:
106 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
107 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
108 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]], <4 x i32> [[INACTIVE:%.*]])
109 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
111 int32x4_t
test_vbicq_m_s32(int32x4_t inactive
, int32x4_t a
, int32x4_t b
, mve_pred16_t p
)
114 return vbicq_m(inactive
, a
, b
, p
);
115 #else /* POLYMORPHIC */
116 return vbicq_m_s32(inactive
, a
, b
, p
);
117 #endif /* POLYMORPHIC */
120 // CHECK-LABEL: @test_vbicq_m_f16(
121 // CHECK-NEXT: entry:
122 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x half> [[A:%.*]] to <8 x i16>
123 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x half> [[B:%.*]] to <8 x i16>
124 // CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
125 // CHECK-NEXT: [[TMP3:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP2]])
126 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x half> [[INACTIVE:%.*]] to <8 x i16>
127 // CHECK-NEXT: [[TMP5:%.*]] = call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> [[TMP0]], <8 x i16> [[TMP1]], <8 x i1> [[TMP3]], <8 x i16> [[TMP4]])
128 // CHECK-NEXT: [[TMP6:%.*]] = bitcast <8 x i16> [[TMP5]] to <8 x half>
129 // CHECK-NEXT: ret <8 x half> [[TMP6]]
131 float16x8_t
test_vbicq_m_f16(float16x8_t inactive
, float16x8_t a
, float16x8_t b
, mve_pred16_t p
)
134 return vbicq_m(inactive
, a
, b
, p
);
135 #else /* POLYMORPHIC */
136 return vbicq_m_f16(inactive
, a
, b
, p
);
137 #endif /* POLYMORPHIC */
140 // CHECK-LABEL: @test_vbicq_x_u8(
141 // CHECK-NEXT: entry:
142 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
143 // CHECK-NEXT: [[TMP1:%.*]] = call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 [[TMP0]])
144 // CHECK-NEXT: [[TMP2:%.*]] = call <16 x i8> @llvm.arm.mve.bic.predicated.v16i8.v16i1(<16 x i8> [[A:%.*]], <16 x i8> [[B:%.*]], <16 x i1> [[TMP1]], <16 x i8> undef)
145 // CHECK-NEXT: ret <16 x i8> [[TMP2]]
147 uint8x16_t
test_vbicq_x_u8(uint8x16_t a
, uint8x16_t b
, mve_pred16_t p
)
150 return vbicq_x(a
, b
, p
);
151 #else /* POLYMORPHIC */
152 return vbicq_x_u8(a
, b
, p
);
153 #endif /* POLYMORPHIC */
156 // CHECK-LABEL: @test_vbicq_x_s16(
157 // CHECK-NEXT: entry:
158 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
159 // CHECK-NEXT: [[TMP1:%.*]] = call <8 x i1> @llvm.arm.mve.pred.i2v.v8i1(i32 [[TMP0]])
160 // CHECK-NEXT: [[TMP2:%.*]] = call <8 x i16> @llvm.arm.mve.bic.predicated.v8i16.v8i1(<8 x i16> [[A:%.*]], <8 x i16> [[B:%.*]], <8 x i1> [[TMP1]], <8 x i16> undef)
161 // CHECK-NEXT: ret <8 x i16> [[TMP2]]
163 int16x8_t
test_vbicq_x_s16(int16x8_t a
, int16x8_t b
, mve_pred16_t p
)
166 return vbicq_x(a
, b
, p
);
167 #else /* POLYMORPHIC */
168 return vbicq_x_s16(a
, b
, p
);
169 #endif /* POLYMORPHIC */
172 // CHECK-LABEL: @test_vbicq_x_u32(
173 // CHECK-NEXT: entry:
174 // CHECK-NEXT: [[TMP0:%.*]] = zext i16 [[P:%.*]] to i32
175 // CHECK-NEXT: [[TMP1:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP0]])
176 // CHECK-NEXT: [[TMP2:%.*]] = call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> [[A:%.*]], <4 x i32> [[B:%.*]], <4 x i1> [[TMP1]], <4 x i32> undef)
177 // CHECK-NEXT: ret <4 x i32> [[TMP2]]
179 uint32x4_t
test_vbicq_x_u32(uint32x4_t a
, uint32x4_t b
, mve_pred16_t p
)
182 return vbicq_x(a
, b
, p
);
183 #else /* POLYMORPHIC */
184 return vbicq_x_u32(a
, b
, p
);
185 #endif /* POLYMORPHIC */
188 // CHECK-LABEL: @test_vbicq_m_f32(
189 // CHECK-NEXT: entry:
190 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <4 x float> [[A:%.*]] to <4 x i32>
191 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[B:%.*]] to <4 x i32>
192 // CHECK-NEXT: [[TMP2:%.*]] = zext i16 [[P:%.*]] to i32
193 // CHECK-NEXT: [[TMP3:%.*]] = call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 [[TMP2]])
194 // CHECK-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.arm.mve.bic.predicated.v4i32.v4i1(<4 x i32> [[TMP0]], <4 x i32> [[TMP1]], <4 x i1> [[TMP3]], <4 x i32> undef)
195 // CHECK-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <4 x float>
196 // CHECK-NEXT: ret <4 x float> [[TMP5]]
198 float32x4_t
test_vbicq_m_f32(float32x4_t a
, float32x4_t b
, mve_pred16_t p
)
201 return vbicq_x(a
, b
, p
);
202 #else /* POLYMORPHIC */
203 return vbicq_x_f32(a
, b
, p
);
204 #endif /* POLYMORPHIC */