Revert "Reland '[flang] Allow to pass an async id to allocate the descriptor (#118713...
[llvm-project.git] / clang / test / CodeGen / AArch64 / neon-intrinsics-constrained.c
blob15ae7eea820e801521939acca2ad9bc4ec9e3832
1 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
2 // RUN: -disable-O0-optnone \
3 // RUN: -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -passes=mem2reg \
4 // RUN: | FileCheck --check-prefixes=COMMON,COMMONIR,UNCONSTRAINED %s
5 // RUN: %clang_cc1 -triple arm64-none-linux-gnu -target-feature +neon \
6 // RUN: -disable-O0-optnone \
7 // RUN: -ffp-exception-behavior=strict \
8 // RUN: -flax-vector-conversions=none -emit-llvm -o - %s | opt -S -passes=mem2reg \
9 // RUN: | FileCheck --check-prefixes=COMMON,COMMONIR,CONSTRAINED %s
11 // REQUIRES: aarch64-registered-target
13 // Test new aarch64 intrinsics and types but constrained
15 #include <arm_neon.h>
17 // COMMON-LABEL: test_vadd_f32
18 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x float> %v1, %v2
19 // CONSTRAINED: [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
20 // COMMONIR: ret <2 x float> [[ADD_I]]
21 float32x2_t test_vadd_f32(float32x2_t v1, float32x2_t v2) {
22 return vadd_f32(v1, v2);
25 // COMMON-LABEL: test_vaddq_f32
26 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <4 x float> %v1, %v2
27 // CONSTRAINED: [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
28 // COMMONIR: ret <4 x float> [[ADD_I]]
29 float32x4_t test_vaddq_f32(float32x4_t v1, float32x4_t v2) {
30 return vaddq_f32(v1, v2);
33 // COMMON-LABEL: test_vsub_f32
34 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x float> %v1, %v2
35 // CONSTRAINED: [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
36 // COMMONIR: ret <2 x float> [[SUB_I]]
37 float32x2_t test_vsub_f32(float32x2_t v1, float32x2_t v2) {
38 return vsub_f32(v1, v2);
41 // COMMON-LABEL: test_vsubq_f32
42 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <4 x float> %v1, %v2
43 // CONSTRAINED: [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
44 // COMMONIR: ret <4 x float> [[SUB_I]]
45 float32x4_t test_vsubq_f32(float32x4_t v1, float32x4_t v2) {
46 return vsubq_f32(v1, v2);
49 // COMMON-LABEL: test_vsubq_f64
50 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x double> %v1, %v2
51 // CONSTRAINED: [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
52 // COMMONIR: ret <2 x double> [[SUB_I]]
53 float64x2_t test_vsubq_f64(float64x2_t v1, float64x2_t v2) {
54 return vsubq_f64(v1, v2);
57 // COMMON-LABEL: test_vmul_f32
58 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v1, %v2
59 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
60 // COMMONIR: ret <2 x float> [[MUL_I]]
61 float32x2_t test_vmul_f32(float32x2_t v1, float32x2_t v2) {
62 return vmul_f32(v1, v2);
65 // COMMON-LABEL: test_vmulq_f32
66 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v1, %v2
67 // CONSTRAINED: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
68 // COMMONIR: ret <4 x float> [[MUL_I]]
69 float32x4_t test_vmulq_f32(float32x4_t v1, float32x4_t v2) {
70 return vmulq_f32(v1, v2);
73 // COMMON-LABEL: test_vmulq_f64
74 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v1, %v2
75 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
76 // COMMONIR: ret <2 x double> [[MUL_I]]
77 float64x2_t test_vmulq_f64(float64x2_t v1, float64x2_t v2) {
78 return vmulq_f64(v1, v2);
81 // COMMON-LABEL: test_vmla_f32
82 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3
83 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v2, <2 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
84 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x float> %v1, [[MUL_I]]
85 // CONSTRAINED: [[ADD_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fadd.v2f32(<2 x float> %v1, <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
86 // COMMONIR: ret <2 x float> [[ADD_I]]
87 float32x2_t test_vmla_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
88 return vmla_f32(v1, v2, v3);
91 // COMMON-LABEL: test_vmlaq_f32
92 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3
93 // CONSTRAINED: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v2, <4 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
94 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <4 x float> %v1, [[MUL_I]]
95 // CONSTRAINED: [[ADD_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %v1, <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
96 // COMMONIR: ret <4 x float> [[ADD_I]]
97 float32x4_t test_vmlaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
98 return vmlaq_f32(v1, v2, v3);
101 // COMMON-LABEL: test_vmlaq_f64
102 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3
103 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v2, <2 x double> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
104 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <2 x double> %v1, [[MUL_I]]
105 // CONSTRAINED: [[ADD_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %v1, <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
106 // COMMONIR: ret <2 x double> [[ADD_I]]
107 float64x2_t test_vmlaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
108 return vmlaq_f64(v1, v2, v3);
111 // COMMON-LABEL: test_vmls_f32
112 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x float> %v2, %v3
113 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fmul.v2f32(<2 x float> %v2, <2 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
114 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x float> %v1, [[MUL_I]]
115 // CONSTRAINED: [[SUB_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fsub.v2f32(<2 x float> %v1, <2 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
116 // COMMONIR: ret <2 x float> [[SUB_I]]
117 float32x2_t test_vmls_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
118 return vmls_f32(v1, v2, v3);
121 // COMMON-LABEL: test_vmlsq_f32
122 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <4 x float> %v2, %v3
123 // CONSTRAINED: [[MUL_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %v2, <4 x float> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
124 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <4 x float> %v1, [[MUL_I]]
125 // CONSTRAINED: [[SUB_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %v1, <4 x float> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
126 // COMMONIR: ret <4 x float> [[SUB_I]]
127 float32x4_t test_vmlsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
128 return vmlsq_f32(v1, v2, v3);
131 // COMMON-LABEL: test_vmlsq_f64
132 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <2 x double> %v2, %v3
133 // CONSTRAINED: [[MUL_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %v2, <2 x double> %v3, metadata !"round.tonearest", metadata !"fpexcept.strict")
134 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <2 x double> %v1, [[MUL_I]]
135 // CONSTRAINED: [[SUB_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %v1, <2 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
136 // COMMONIR: ret <2 x double> [[SUB_I]]
137 float64x2_t test_vmlsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
138 return vmlsq_f64(v1, v2, v3);
141 // COMMON-LABEL: test_vfma_f32
142 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8>
143 // COMMONIR: [[TMP1:%.*]] = bitcast <2 x float> %v2 to <8 x i8>
144 // COMMONIR: [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8>
145 // UNCONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> %v2, <2 x float> %v3, <2 x float> %v1)
146 // CONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> %v2, <2 x float> %v3, <2 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
147 // COMMONIR: ret <2 x float> [[TMP3]]
148 float32x2_t test_vfma_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
149 return vfma_f32(v1, v2, v3);
152 // COMMON-LABEL: test_vfmaq_f32
153 // COMMONIR: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8>
154 // COMMONIR: [[TMP1:%.*]] = bitcast <4 x float> %v2 to <16 x i8>
155 // COMMONIR: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8>
156 // UNCONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> %v2, <4 x float> %v3, <4 x float> %v1)
157 // CONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %v2, <4 x float> %v3, <4 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
158 // COMMONIR: ret <4 x float> [[TMP3]]
159 float32x4_t test_vfmaq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
160 return vfmaq_f32(v1, v2, v3);
163 // COMMON-LABEL: test_vfmaq_f64
164 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8>
165 // COMMONIR: [[TMP1:%.*]] = bitcast <2 x double> %v2 to <16 x i8>
166 // COMMONIR: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8>
167 // UNCONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> %v2, <2 x double> %v3, <2 x double> %v1)
168 // CONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %v2, <2 x double> %v3, <2 x double> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
169 // COMMONIR: ret <2 x double> [[TMP3]]
170 float64x2_t test_vfmaq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
171 return vfmaq_f64(v1, v2, v3);
174 // COMMON-LABEL: test_vfms_f32
175 // COMMONIR: [[SUB_I:%.*]] = fneg <2 x float> %v2
176 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x float> %v1 to <8 x i8>
177 // COMMONIR: [[TMP1:%.*]] = bitcast <2 x float> [[SUB_I]] to <8 x i8>
178 // COMMONIR: [[TMP2:%.*]] = bitcast <2 x float> %v3 to <8 x i8>
179 // UNCONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.fma.v2f32(<2 x float> [[SUB_I]], <2 x float> %v3, <2 x float> %v1)
180 // CONSTRAINED: [[TMP3:%.*]] = call <2 x float> @llvm.experimental.constrained.fma.v2f32(<2 x float> [[SUB_I]], <2 x float> %v3, <2 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
181 // COMMONIR: ret <2 x float> [[TMP3]]
182 float32x2_t test_vfms_f32(float32x2_t v1, float32x2_t v2, float32x2_t v3) {
183 return vfms_f32(v1, v2, v3);
186 // COMMON-LABEL: test_vfmsq_f32
187 // COMMONIR: [[SUB_I:%.*]] = fneg <4 x float> %v2
188 // COMMONIR: [[TMP0:%.*]] = bitcast <4 x float> %v1 to <16 x i8>
189 // COMMONIR: [[TMP1:%.*]] = bitcast <4 x float> [[SUB_I]] to <16 x i8>
190 // COMMONIR: [[TMP2:%.*]] = bitcast <4 x float> %v3 to <16 x i8>
191 // UNCONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.fma.v4f32(<4 x float> [[SUB_I]], <4 x float> %v3, <4 x float> %v1)
192 // CONSTRAINED: [[TMP3:%.*]] = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> [[SUB_I]], <4 x float> %v3, <4 x float> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
193 // COMMONIR: ret <4 x float> [[TMP3]]
194 float32x4_t test_vfmsq_f32(float32x4_t v1, float32x4_t v2, float32x4_t v3) {
195 return vfmsq_f32(v1, v2, v3);
198 // COMMON-LABEL: test_vfmsq_f64
199 // COMMONIR: [[SUB_I:%.*]] = fneg <2 x double> %v2
200 // COMMONIR: [[TMP0:%.*]] = bitcast <2 x double> %v1 to <16 x i8>
201 // COMMONIR: [[TMP1:%.*]] = bitcast <2 x double> [[SUB_I]] to <16 x i8>
202 // COMMONIR: [[TMP2:%.*]] = bitcast <2 x double> %v3 to <16 x i8>
203 // UNCONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> %v3, <2 x double> %v1)
204 // CONSTRAINED: [[TMP3:%.*]] = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> [[SUB_I]], <2 x double> %v3, <2 x double> %v1, metadata !"round.tonearest", metadata !"fpexcept.strict")
205 // COMMONIR: ret <2 x double> [[TMP3]]
206 float64x2_t test_vfmsq_f64(float64x2_t v1, float64x2_t v2, float64x2_t v3) {
207 return vfmsq_f64(v1, v2, v3);
210 // COMMON-LABEL: test_vdivq_f64
211 // UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <2 x double> %v1, %v2
212 // CONSTRAINED: [[DIV_I:%.*]] = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
213 // COMMONIR: ret <2 x double> [[DIV_I]]
214 float64x2_t test_vdivq_f64(float64x2_t v1, float64x2_t v2) {
215 return vdivq_f64(v1, v2);
218 // COMMON-LABEL: test_vdivq_f32
219 // UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <4 x float> %v1, %v2
220 // CONSTRAINED: [[DIV_I:%.*]] = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
221 // COMMONIR: ret <4 x float> [[DIV_I]]
222 float32x4_t test_vdivq_f32(float32x4_t v1, float32x4_t v2) {
223 return vdivq_f32(v1, v2);
226 // COMMON-LABEL: test_vdiv_f32
227 // UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <2 x float> %v1, %v2
228 // CONSTRAINED: [[DIV_I:%.*]] = call <2 x float> @llvm.experimental.constrained.fdiv.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"round.tonearest", metadata !"fpexcept.strict")
229 // COMMONIR: ret <2 x float> [[DIV_I]]
230 float32x2_t test_vdiv_f32(float32x2_t v1, float32x2_t v2) {
231 return vdiv_f32(v1, v2);
234 // COMMON-LABEL: test_vceq_f32
235 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <2 x float> %v1, %v2
236 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"oeq", metadata !"fpexcept.strict")
237 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
238 // COMMONIR: ret <2 x i32> [[SEXT_I]]
239 uint32x2_t test_vceq_f32(float32x2_t v1, float32x2_t v2) {
240 return vceq_f32(v1, v2);
243 // COMMON-LABEL: test_vceq_f64
244 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <1 x double> %a, %b
245 // CONSTRAINED: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> %a, <1 x double> %b, metadata !"oeq", metadata !"fpexcept.strict")
246 // COMMONIR: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
247 // COMMONIR: ret <1 x i64> [[SEXT_I]]
248 uint64x1_t test_vceq_f64(float64x1_t a, float64x1_t b) {
249 return vceq_f64(a, b);
252 // COMMON-LABEL: test_vceqq_f32
253 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <4 x float> %v1, %v2
254 // CONSTRAINED: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"oeq", metadata !"fpexcept.strict")
255 // COMMONIR: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
256 // COMMONIR: ret <4 x i32> [[SEXT_I]]
257 uint32x4_t test_vceqq_f32(float32x4_t v1, float32x4_t v2) {
258 return vceqq_f32(v1, v2);
261 // COMMON-LABEL: test_vceqq_f64
262 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oeq <2 x double> %v1, %v2
263 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"oeq", metadata !"fpexcept.strict")
264 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
265 // COMMONIR: ret <2 x i64> [[SEXT_I]]
266 uint64x2_t test_vceqq_f64(float64x2_t v1, float64x2_t v2) {
267 return vceqq_f64(v1, v2);
270 // COMMON-LABEL: test_vcge_f32
271 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <2 x float> %v1, %v2
272 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"oge", metadata !"fpexcept.strict")
273 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
274 // COMMONIR: ret <2 x i32> [[SEXT_I]]
275 uint32x2_t test_vcge_f32(float32x2_t v1, float32x2_t v2) {
276 return vcge_f32(v1, v2);
279 // COMMON-LABEL: test_vcge_f64
280 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <1 x double> %a, %b
281 // CONSTRAINED: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"oge", metadata !"fpexcept.strict")
282 // COMMONIR: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
283 // COMMONIR: ret <1 x i64> [[SEXT_I]]
284 uint64x1_t test_vcge_f64(float64x1_t a, float64x1_t b) {
285 return vcge_f64(a, b);
288 // COMMON-LABEL: test_vcgeq_f32
289 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <4 x float> %v1, %v2
290 // CONSTRAINED: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"oge", metadata !"fpexcept.strict")
291 // COMMONIR: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
292 // COMMONIR: ret <4 x i32> [[SEXT_I]]
293 uint32x4_t test_vcgeq_f32(float32x4_t v1, float32x4_t v2) {
294 return vcgeq_f32(v1, v2);
297 // COMMON-LABEL: test_vcgeq_f64
298 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp oge <2 x double> %v1, %v2
299 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"oge", metadata !"fpexcept.strict")
300 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
301 // COMMONIR: ret <2 x i64> [[SEXT_I]]
302 uint64x2_t test_vcgeq_f64(float64x2_t v1, float64x2_t v2) {
303 return vcgeq_f64(v1, v2);
306 // COMMON-LABEL: test_vcle_f32
307 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <2 x float> %v1, %v2
308 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"ole", metadata !"fpexcept.strict")
309 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
310 // COMMONIR: ret <2 x i32> [[SEXT_I]]
311 uint32x2_t test_vcle_f32(float32x2_t v1, float32x2_t v2) {
312 return vcle_f32(v1, v2);
315 // COMMON-LABEL: test_vcle_f64
316 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <1 x double> %a, %b
317 // CONSTRAINED: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"ole", metadata !"fpexcept.strict")
318 // COMMONIR: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
319 // COMMONIR: ret <1 x i64> [[SEXT_I]]
320 uint64x1_t test_vcle_f64(float64x1_t a, float64x1_t b) {
321 return vcle_f64(a, b);
324 // COMMON-LABEL: test_vcleq_f32
325 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <4 x float> %v1, %v2
326 // CONSTRAINED: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"ole", metadata !"fpexcept.strict")
327 // COMMONIR: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
328 // COMMONIR: ret <4 x i32> [[SEXT_I]]
329 uint32x4_t test_vcleq_f32(float32x4_t v1, float32x4_t v2) {
330 return vcleq_f32(v1, v2);
333 // COMMON-LABEL: test_vcleq_f64
334 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ole <2 x double> %v1, %v2
335 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"ole", metadata !"fpexcept.strict")
336 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
337 // COMMONIR: ret <2 x i64> [[SEXT_I]]
338 uint64x2_t test_vcleq_f64(float64x2_t v1, float64x2_t v2) {
339 return vcleq_f64(v1, v2);
342 // COMMON-LABEL: test_vcgt_f32
343 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <2 x float> %v1, %v2
344 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"ogt", metadata !"fpexcept.strict")
345 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
346 // COMMONIR: ret <2 x i32> [[SEXT_I]]
347 uint32x2_t test_vcgt_f32(float32x2_t v1, float32x2_t v2) {
348 return vcgt_f32(v1, v2);
351 // COMMON-LABEL: test_vcgt_f64
352 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <1 x double> %a, %b
353 // CONSTRAINED: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"ogt", metadata !"fpexcept.strict")
354 // COMMONIR: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
355 // COMMONIR: ret <1 x i64> [[SEXT_I]]
356 uint64x1_t test_vcgt_f64(float64x1_t a, float64x1_t b) {
357 return vcgt_f64(a, b);
360 // COMMON-LABEL: test_vcgtq_f32
361 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <4 x float> %v1, %v2
362 // CONSTRAINED: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"ogt", metadata !"fpexcept.strict")
363 // COMMONIR: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
364 // COMMONIR: ret <4 x i32> [[SEXT_I]]
365 uint32x4_t test_vcgtq_f32(float32x4_t v1, float32x4_t v2) {
366 return vcgtq_f32(v1, v2);
369 // COMMON-LABEL: test_vcgtq_f64
370 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp ogt <2 x double> %v1, %v2
371 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"ogt", metadata !"fpexcept.strict")
372 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
373 // COMMONIR: ret <2 x i64> [[SEXT_I]]
374 uint64x2_t test_vcgtq_f64(float64x2_t v1, float64x2_t v2) {
375 return vcgtq_f64(v1, v2);
378 // COMMON-LABEL: test_vclt_f32
379 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <2 x float> %v1, %v2
380 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f32(<2 x float> %v1, <2 x float> %v2, metadata !"olt", metadata !"fpexcept.strict")
381 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i32>
382 // COMMONIR: ret <2 x i32> [[SEXT_I]]
383 uint32x2_t test_vclt_f32(float32x2_t v1, float32x2_t v2) {
384 return vclt_f32(v1, v2);
387 // COMMON-LABEL: test_vclt_f64
388 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <1 x double> %a, %b
389 // CONSTRAINED: [[CMP_I:%.*]] = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %a, <1 x double> %b, metadata !"olt", metadata !"fpexcept.strict")
390 // COMMONIR: [[SEXT_I:%.*]] = sext <1 x i1> [[CMP_I]] to <1 x i64>
391 // COMMONIR: ret <1 x i64> [[SEXT_I]]
392 uint64x1_t test_vclt_f64(float64x1_t a, float64x1_t b) {
393 return vclt_f64(a, b);
396 // COMMON-LABEL: test_vcltq_f32
397 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <4 x float> %v1, %v2
398 // CONSTRAINED: [[CMP_I:%.*]] = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %v1, <4 x float> %v2, metadata !"olt", metadata !"fpexcept.strict")
399 // COMMONIR: [[SEXT_I:%.*]] = sext <4 x i1> [[CMP_I]] to <4 x i32>
400 // COMMONIR: ret <4 x i32> [[SEXT_I]]
401 uint32x4_t test_vcltq_f32(float32x4_t v1, float32x4_t v2) {
402 return vcltq_f32(v1, v2);
405 // COMMON-LABEL: test_vcltq_f64
406 // UNCONSTRAINED: [[CMP_I:%.*]] = fcmp olt <2 x double> %v1, %v2
407 // CONSTRAINED: [[CMP_I:%.*]] = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %v1, <2 x double> %v2, metadata !"olt", metadata !"fpexcept.strict")
408 // COMMONIR: [[SEXT_I:%.*]] = sext <2 x i1> [[CMP_I]] to <2 x i64>
409 // COMMONIR: ret <2 x i64> [[SEXT_I]]
410 uint64x2_t test_vcltq_f64(float64x2_t v1, float64x2_t v2) {
411 return vcltq_f64(v1, v2);
414 // COMMON-LABEL: test_vpadds_f32
415 // COMMONIR: [[LANE0_I:%.*]] = extractelement <2 x float> %a, i64 0
416 // COMMONIR: [[LANE1_I:%.*]] = extractelement <2 x float> %a, i64 1
417 // UNCONSTRAINED: [[VPADDD_I:%.*]] = fadd float [[LANE0_I]], [[LANE1_I]]
418 // CONSTRAINED: [[VPADDD_I:%.*]] = call float @llvm.experimental.constrained.fadd.f32(float [[LANE0_I]], float [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict"
419 // COMMONIR: ret float [[VPADDD_I]]
420 float32_t test_vpadds_f32(float32x2_t a) {
421 return vpadds_f32(a);
424 // COMMON-LABEL: test_vpaddd_f64
425 // COMMONIR: [[LANE0_I:%.*]] = extractelement <2 x double> %a, i64 0
426 // COMMONIR: [[LANE1_I:%.*]] = extractelement <2 x double> %a, i64 1
427 // UNCONSTRAINED: [[VPADDD_I:%.*]] = fadd double [[LANE0_I]], [[LANE1_I]]
428 // CONSTRAINED: [[VPADDD_I:%.*]] = call double @llvm.experimental.constrained.fadd.f64(double [[LANE0_I]], double [[LANE1_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
429 // COMMONIR: ret double [[VPADDD_I]]
430 float64_t test_vpaddd_f64(float64x2_t a) {
431 return vpaddd_f64(a);
434 // COMMON-LABEL: test_vcvts_f32_s32
435 // UNCONSTRAINED: [[TMP0:%.*]] = sitofp i32 %a to float
436 // CONSTRAINED: [[TMP0:%.*]] = call float @llvm.experimental.constrained.sitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
437 // COMMONIR: ret float [[TMP0]]
438 float32_t test_vcvts_f32_s32(int32_t a) {
439 return vcvts_f32_s32(a);
442 // COMMON-LABEL: test_vcvtd_f64_s64
443 // UNCONSTRAINED: [[TMP0:%.*]] = sitofp i64 %a to double
444 // CONSTRAINED: [[TMP0:%.*]] = call double @llvm.experimental.constrained.sitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
445 // COMMONIR: ret double [[TMP0]]
446 float64_t test_vcvtd_f64_s64(int64_t a) {
447 return vcvtd_f64_s64(a);
450 // COMMON-LABEL: test_vcvts_f32_u32
451 // UNCONSTRAINED: [[TMP0:%.*]] = uitofp i32 %a to float
452 // CONSTRAINED: [[TMP0:%.*]] = call float @llvm.experimental.constrained.uitofp.f32.i32(i32 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
453 // COMMONIR: ret float [[TMP0]]
454 float32_t test_vcvts_f32_u32(uint32_t a) {
455 return vcvts_f32_u32(a);
458 // XXX should verify the type of registers
459 // COMMON-LABEL: test_vcvtd_f64_u64
460 // UNCONSTRAINED: [[TMP0:%.*]] = uitofp i64 %a to double
461 // CONSTRAINED: [[TMP0:%.*]] = call double @llvm.experimental.constrained.uitofp.f64.i64(i64 %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
462 // COMMONIR: ret double [[TMP0]]
463 float64_t test_vcvtd_f64_u64(uint64_t a) {
464 return vcvtd_f64_u64(a);
467 // COMMON-LABEL: test_vceqs_f32
468 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq float %a, %b
469 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float %b, metadata !"oeq", metadata !"fpexcept.strict")
470 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
471 // COMMONIR: ret i32 [[VCMPD_I]]
472 uint32_t test_vceqs_f32(float32_t a, float32_t b) {
473 return (uint32_t)vceqs_f32(a, b);
476 // COMMON-LABEL: test_vceqd_f64
477 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq double %a, %b
478 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double %b, metadata !"oeq", metadata !"fpexcept.strict")
479 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
480 // COMMONIR: ret i64 [[VCMPD_I]]
481 uint64_t test_vceqd_f64(float64_t a, float64_t b) {
482 return (uint64_t)vceqd_f64(a, b);
485 // COMMON-LABEL: test_vceqzs_f32
486 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq float %a, 0.000000e+00
487 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f32(float %a, float 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict")
488 // COMMONIR: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i32
489 // COMMONIR: ret i32 [[VCEQZ_I]]
490 uint32_t test_vceqzs_f32(float32_t a) {
491 return (uint32_t)vceqzs_f32(a);
494 // COMMON-LABEL: test_vceqzd_f64
495 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oeq double %a, 0.000000e+00
496 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmp.f64(double %a, double 0.000000e+00, metadata !"oeq", metadata !"fpexcept.strict")
497 // COMMONIR: [[VCEQZ_I:%.*]] = sext i1 [[TMP0]] to i64
498 // COMMONIR: ret i64 [[VCEQZ_I]]
499 uint64_t test_vceqzd_f64(float64_t a) {
500 return (uint64_t)vceqzd_f64(a);
503 // COMMON-LABEL: test_vcges_f32
504 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge float %a, %b
505 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"oge", metadata !"fpexcept.strict")
506 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
507 // COMMONIR: ret i32 [[VCMPD_I]]
508 uint32_t test_vcges_f32(float32_t a, float32_t b) {
509 return (uint32_t)vcges_f32(a, b);
512 // COMMON-LABEL: test_vcged_f64
513 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge double %a, %b
514 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"oge", metadata !"fpexcept.strict")
515 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
516 // COMMONIR: ret i64 [[VCMPD_I]]
517 uint64_t test_vcged_f64(float64_t a, float64_t b) {
518 return (uint64_t)vcged_f64(a, b);
521 // COMMON-LABEL: test_vcgezs_f32
522 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge float %a, 0.000000e+00
523 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict")
524 // COMMONIR: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i32
525 // COMMONIR: ret i32 [[VCGEZ_I]]
526 uint32_t test_vcgezs_f32(float32_t a) {
527 return (uint32_t)vcgezs_f32(a);
530 // COMMON-LABEL: test_vcgezd_f64
531 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp oge double %a, 0.000000e+00
532 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double 0.000000e+00, metadata !"oge", metadata !"fpexcept.strict")
533 // COMMONIR: [[VCGEZ_I:%.*]] = sext i1 [[TMP0]] to i64
534 // COMMONIR: ret i64 [[VCGEZ_I]]
535 uint64_t test_vcgezd_f64(float64_t a) {
536 return (uint64_t)vcgezd_f64(a);
539 // COMMON-LABEL: test_vcgts_f32
540 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt float %a, %b
541 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ogt", metadata !"fpexcept.strict")
542 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
543 // COMMONIR: ret i32 [[VCMPD_I]]
544 uint32_t test_vcgts_f32(float32_t a, float32_t b) {
545 return (uint32_t)vcgts_f32(a, b);
548 // COMMON-LABEL: test_vcgtd_f64
549 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt double %a, %b
550 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ogt", metadata !"fpexcept.strict")
551 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
552 // COMMONIR: ret i64 [[VCMPD_I]]
553 uint64_t test_vcgtd_f64(float64_t a, float64_t b) {
554 return (uint64_t)vcgtd_f64(a, b);
557 // COMMON-LABEL: test_vcgtzs_f32
558 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt float %a, 0.000000e+00
559 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict")
560 // COMMONIR: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i32
561 // COMMONIR: ret i32 [[VCGTZ_I]]
562 uint32_t test_vcgtzs_f32(float32_t a) {
563 return (uint32_t)vcgtzs_f32(a);
566 // COMMON-LABEL: test_vcgtzd_f64
567 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ogt double %a, 0.000000e+00
568 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double 0.000000e+00, metadata !"ogt", metadata !"fpexcept.strict")
569 // COMMONIR: [[VCGTZ_I:%.*]] = sext i1 [[TMP0]] to i64
570 // COMMONIR: ret i64 [[VCGTZ_I]]
571 uint64_t test_vcgtzd_f64(float64_t a) {
572 return (uint64_t)vcgtzd_f64(a);
575 // COMMON-LABEL: test_vcles_f32
576 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole float %a, %b
577 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"ole", metadata !"fpexcept.strict")
578 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
579 // COMMONIR: ret i32 [[VCMPD_I]]
580 uint32_t test_vcles_f32(float32_t a, float32_t b) {
581 return (uint32_t)vcles_f32(a, b);
584 // COMMON-LABEL: test_vcled_f64
585 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole double %a, %b
586 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"ole", metadata !"fpexcept.strict")
587 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
588 // COMMONIR: ret i64 [[VCMPD_I]]
589 uint64_t test_vcled_f64(float64_t a, float64_t b) {
590 return (uint64_t)vcled_f64(a, b);
593 // COMMON-LABEL: test_vclezs_f32
594 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole float %a, 0.000000e+00
595 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict")
596 // COMMONIR: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i32
597 // COMMONIR: ret i32 [[VCLEZ_I]]
598 uint32_t test_vclezs_f32(float32_t a) {
599 return (uint32_t)vclezs_f32(a);
602 // COMMON-LABEL: test_vclezd_f64
603 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp ole double %a, 0.000000e+00
604 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double 0.000000e+00, metadata !"ole", metadata !"fpexcept.strict")
605 // COMMONIR: [[VCLEZ_I:%.*]] = sext i1 [[TMP0]] to i64
606 // COMMONIR: ret i64 [[VCLEZ_I]]
607 uint64_t test_vclezd_f64(float64_t a) {
608 return (uint64_t)vclezd_f64(a);
611 // COMMON-LABEL: test_vclts_f32
612 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt float %a, %b
613 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float %b, metadata !"olt", metadata !"fpexcept.strict")
614 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i32
615 // COMMONIR: ret i32 [[VCMPD_I]]
616 uint32_t test_vclts_f32(float32_t a, float32_t b) {
617 return (uint32_t)vclts_f32(a, b);
620 // COMMON-LABEL: test_vcltd_f64
621 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt double %a, %b
622 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double %b, metadata !"olt", metadata !"fpexcept.strict")
623 // COMMONIR: [[VCMPD_I:%.*]] = sext i1 [[TMP0]] to i64
624 // COMMONIR: ret i64 [[VCMPD_I]]
625 uint64_t test_vcltd_f64(float64_t a, float64_t b) {
626 return (uint64_t)vcltd_f64(a, b);
629 // COMMON-LABEL: test_vcltzs_f32
630 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt float %a, 0.000000e+00
631 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f32(float %a, float 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict")
632 // COMMONIR: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i32
633 // COMMONIR: ret i32 [[VCLTZ_I]]
634 uint32_t test_vcltzs_f32(float32_t a) {
635 return (uint32_t)vcltzs_f32(a);
638 // COMMON-LABEL: test_vcltzd_f64
639 // UNCONSTRAINED: [[TMP0:%.*]] = fcmp olt double %a, 0.000000e+00
640 // CONSTRAINED: [[TMP0:%.*]] = call i1 @llvm.experimental.constrained.fcmps.f64(double %a, double 0.000000e+00, metadata !"olt", metadata !"fpexcept.strict")
641 // COMMONIR: [[VCLTZ_I:%.*]] = sext i1 [[TMP0]] to i64
642 // COMMONIR: ret i64 [[VCLTZ_I]]
643 uint64_t test_vcltzd_f64(float64_t a) {
644 return (uint64_t)vcltzd_f64(a);
647 // COMMON-LABEL: test_vadd_f64
648 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <1 x double> %a, %b
649 // CONSTRAINED: [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
650 // COMMONIR: ret <1 x double> [[ADD_I]]
651 float64x1_t test_vadd_f64(float64x1_t a, float64x1_t b) {
652 return vadd_f64(a, b);
655 // COMMON-LABEL: test_vmul_f64
656 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %a, %b
657 // CONSTRAINED: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
658 // COMMONIR: ret <1 x double> [[MUL_I]]
659 float64x1_t test_vmul_f64(float64x1_t a, float64x1_t b) {
660 return vmul_f64(a, b);
663 // COMMON-LABEL: test_vdiv_f64
664 // UNCONSTRAINED: [[DIV_I:%.*]] = fdiv <1 x double> %a, %b
665 // CONSTRAINED: [[DIV_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
666 // COMMONIR: ret <1 x double> [[DIV_I]]
667 float64x1_t test_vdiv_f64(float64x1_t a, float64x1_t b) {
668 return vdiv_f64(a, b);
671 // COMMON-LABEL: test_vmla_f64
672 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %b, %c
673 // CONSTRAINED: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %b, <1 x double> %c, metadata !"round.tonearest", metadata !"fpexcept.strict")
674 // UNCONSTRAINED: [[ADD_I:%.*]] = fadd <1 x double> %a, [[MUL_I]]
675 // CONSTRAINED: [[ADD_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %a, <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
676 // COMMONIR: ret <1 x double> [[ADD_I]]
677 float64x1_t test_vmla_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
678 return vmla_f64(a, b, c);
681 // COMMON-LABEL: test_vmls_f64
682 // UNCONSTRAINED: [[MUL_I:%.*]] = fmul <1 x double> %b, %c
683 // CONSTRAINED: [[MUL_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %b, <1 x double> %c, metadata !"round.tonearest", metadata !"fpexcept.strict")
684 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <1 x double> %a, [[MUL_I]]
685 // CONSTRAINED: [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %a, <1 x double> [[MUL_I]], metadata !"round.tonearest", metadata !"fpexcept.strict")
686 // COMMONIR: ret <1 x double> [[SUB_I]]
687 float64x1_t test_vmls_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
688 return vmls_f64(a, b, c);
691 // COMMON-LABEL: test_vfma_f64
692 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
693 // COMMONIR: [[TMP1:%.*]] = bitcast <1 x double> %b to <8 x i8>
694 // COMMONIR: [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8>
695 // UNCONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a)
696 // CONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %b, <1 x double> %c, <1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
697 // COMMONIR: ret <1 x double> [[TMP3]]
698 float64x1_t test_vfma_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
699 return vfma_f64(a, b, c);
702 // COMMON-LABEL: test_vfms_f64
703 // COMMONIR: [[SUB_I:%.*]] = fneg <1 x double> %b
704 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
705 // COMMONIR: [[TMP1:%.*]] = bitcast <1 x double> [[SUB_I]] to <8 x i8>
706 // COMMONIR: [[TMP2:%.*]] = bitcast <1 x double> %c to <8 x i8>
707 // UNCONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.fma.v1f64(<1 x double> [[SUB_I]], <1 x double> %c, <1 x double> %a)
708 // CONSTRAINED: [[TMP3:%.*]] = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> [[SUB_I]], <1 x double> %c, <1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
709 // COMMONIR: ret <1 x double> [[TMP3]]
710 float64x1_t test_vfms_f64(float64x1_t a, float64x1_t b, float64x1_t c) {
711 return vfms_f64(a, b, c);
714 // COMMON-LABEL: test_vsub_f64
715 // UNCONSTRAINED: [[SUB_I:%.*]] = fsub <1 x double> %a, %b
716 // CONSTRAINED: [[SUB_I:%.*]] = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %a, <1 x double> %b, metadata !"round.tonearest", metadata !"fpexcept.strict")
717 // COMMONIR: ret <1 x double> [[SUB_I]]
718 float64x1_t test_vsub_f64(float64x1_t a, float64x1_t b) {
719 return vsub_f64(a, b);
722 // COMMON-LABEL: test_vcvt_s64_f64
723 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
724 // COMMONIR: [[TMP1:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzs.v1i64.v1f64(<1 x double> %a)
725 // COMMONIR: ret <1 x i64> [[TMP1]]
726 int64x1_t test_vcvt_s64_f64(float64x1_t a) {
727 return vcvt_s64_f64(a);
730 // COMMON-LABEL: test_vcvt_u64_f64
731 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
732 // COMMONIR: [[TMP1:%.*]] = call <1 x i64> @llvm.aarch64.neon.fcvtzu.v1i64.v1f64(<1 x double> %a)
733 // COMMONIR: ret <1 x i64> [[TMP1]]
734 uint64x1_t test_vcvt_u64_f64(float64x1_t a) {
735 return vcvt_u64_f64(a);
738 // COMMON-LABEL: test_vcvt_f64_s64
739 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
740 // UNCONSTRAINED: [[VCVT_I:%.*]] = sitofp <1 x i64> %a to <1 x double>
741 // CONSTRAINED: [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
742 // COMMONIR: ret <1 x double> [[VCVT_I]]
743 float64x1_t test_vcvt_f64_s64(int64x1_t a) {
744 return vcvt_f64_s64(a);
747 // COMMON-LABEL: test_vcvt_f64_u64
748 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x i64> %a to <8 x i8>
749 // UNCONSTRAINED: [[VCVT_I:%.*]] = uitofp <1 x i64> %a to <1 x double>
750 // CONSTRAINED: [[VCVT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
751 // COMMONIR: ret <1 x double> [[VCVT_I]]
752 float64x1_t test_vcvt_f64_u64(uint64x1_t a) {
753 return vcvt_f64_u64(a);
756 // COMMON-LABEL: test_vrnda_f64
757 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
758 // UNCONSTRAINED: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.round.v1f64(<1 x double> %a)
759 // CONSTRAINED: [[VRNDA1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
760 // COMMONIR: ret <1 x double> [[VRNDA1_I]]
761 float64x1_t test_vrnda_f64(float64x1_t a) {
762 return vrnda_f64(a);
765 // COMMON-LABEL: test_vrndp_f64
766 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
767 // UNCONSTRAINED: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.ceil.v1f64(<1 x double> %a)
768 // CONSTRAINED: [[VRNDP1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
769 // COMMONIR: ret <1 x double> [[VRNDP1_I]]
770 float64x1_t test_vrndp_f64(float64x1_t a) {
771 return vrndp_f64(a);
774 // COMMON-LABEL: test_vrndm_f64
775 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
776 // UNCONSTRAINED: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.floor.v1f64(<1 x double> %a)
777 // CONSTRAINED: [[VRNDM1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
778 // COMMONIR: ret <1 x double> [[VRNDM1_I]]
779 float64x1_t test_vrndm_f64(float64x1_t a) {
780 return vrndm_f64(a);
783 // COMMON-LABEL: test_vrndx_f64
784 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
785 // UNCONSTRAINED: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.rint.v1f64(<1 x double> %a)
786 // CONSTRAINED: [[VRNDX1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
787 // COMMONIR: ret <1 x double> [[VRNDX1_I]]
788 float64x1_t test_vrndx_f64(float64x1_t a) {
789 return vrndx_f64(a);
792 // COMMON-LABEL: test_vrnd_f64
793 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
794 // UNCONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.trunc.v1f64(<1 x double> %a)
795 // CONSTRAINED: [[VRNDZ1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %a, metadata !"fpexcept.strict")
796 // COMMONIR: ret <1 x double> [[VRNDZ1_I]]
797 float64x1_t test_vrnd_f64(float64x1_t a) {
798 return vrnd_f64(a);
801 // COMMON-LABEL: test_vrndi_f64
802 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
803 // UNCONSTRAINED: [[VRNDI1_I:%.*]] = call <1 x double> @llvm.nearbyint.v1f64(<1 x double> %a)
804 // CONSTRAINED: [[VRNDI1_I:%.*]] = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
805 // COMMONIR: ret <1 x double> [[VRNDI1_I]]
806 float64x1_t test_vrndi_f64(float64x1_t a) {
807 return vrndi_f64(a);
810 // COMMON-LABEL: test_vsqrt_f64
811 // COMMONIR: [[TMP0:%.*]] = bitcast <1 x double> %a to <8 x i8>
812 // UNCONSTRAINED: [[VSQRT_I:%.*]] = call <1 x double> @llvm.sqrt.v1f64(<1 x double> %a)
813 // CONSTRAINED: [[VSQRT_I:%.*]] = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> %a, metadata !"round.tonearest", metadata !"fpexcept.strict")
814 // COMMONIR: ret <1 x double> [[VSQRT_I]]
815 float64x1_t test_vsqrt_f64(float64x1_t a) {
816 return vsqrt_f64(a);