1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc -mtriple=aarch64 %s -disable-strictnode-mutation -o - | FileCheck %s --check-prefixes=CHECK,CHECK-SD
3 ; RUN: llc -mtriple=aarch64 -global-isel=true -global-isel-abort=2 -disable-strictnode-mutation %s -o - 2>&1 | FileCheck %s --check-prefixes=CHECK,CHECK-GI
5 ; Check that constrained fp vector intrinsics are correctly lowered.
7 ; CHECK-GI: warning: Instruction selection used fallback path for add_v4f32
8 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_v4f32
9 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_v4f32
10 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_v4f32
11 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_v4f32
12 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v4i32_v4f32
13 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v4i32_v4f32
14 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v4i64_v4f32
15 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v4i64_v4f32
16 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v4f32_v4i32
17 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v4f32_v4i32
18 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v4f32_v4i64
19 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v4f32_v4i64
20 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_v4f32
21 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_v4f32
22 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_v4f32
23 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_v4f32
24 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_v4f32
25 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_v4f32
26 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_v4f32
27 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_v4f32
28 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for roundeven_v4f32
29 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_v4f32
30 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_v4f32
31 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_v4f32
32 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_v2f64
33 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_v2f64
34 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_v2f64
35 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_v2f64
36 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_v2f64
37 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v2i32_v2f64
38 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v2i32_v2f64
39 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v2i64_v2f64
40 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v2i64_v2f64
41 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v2f64_v2i32
42 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v2f64_v2i32
43 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v2f64_v2i64
44 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v2f64_v2i64
45 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_v2f64
46 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_v2f64
47 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_v2f64
48 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_v2f64
49 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_v2f64
50 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_v2f64
51 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_v2f64
52 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_v2f64
53 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for roundeven_v2f64
54 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_v2f64
55 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_v2f64
56 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_v2f64
57 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for add_v1f64
58 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sub_v1f64
59 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for mul_v1f64
60 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for div_v1f64
61 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fma_v1f64
62 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v1i32_v1f64
63 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v1i32_v1f64
64 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptosi_v1i64_v1f64
65 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptoui_v1i64_v1f64
66 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v1f64_v1i32
67 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v1f64_v1i32
68 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sitofp_v1f64_v1i64
69 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for uitofp_v1f64_v1i64
70 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for sqrt_v1f64
71 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for rint_v1f64
72 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for nearbyint_v1f64
73 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for maxnum_v1f64
74 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for minnum_v1f64
75 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for ceil_v1f64
76 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for floor_v1f64
77 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for round_v1f64
78 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for roundeven_v1f64
79 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for trunc_v1f64
80 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmp_v1f61
81 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fcmps_v1f61
82 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fptrunc_v2f32_v2f64
83 ; CHECK-GI-NEXT: warning: Instruction selection used fallback path for fpext_v2f64_v2f32
85 ; Single-precision intrinsics
87 define <4 x float> @add_v4f32(<4 x float> %x, <4 x float> %y) #0 {
88 ; CHECK-LABEL: add_v4f32:
90 ; CHECK-NEXT: fadd v0.4s, v0.4s, v1.4s
92 %val = call <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
96 define <4 x float> @sub_v4f32(<4 x float> %x, <4 x float> %y) #0 {
97 ; CHECK-LABEL: sub_v4f32:
99 ; CHECK-NEXT: fsub v0.4s, v0.4s, v1.4s
101 %val = call <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
105 define <4 x float> @mul_v4f32(<4 x float> %x, <4 x float> %y) #0 {
106 ; CHECK-LABEL: mul_v4f32:
108 ; CHECK-NEXT: fmul v0.4s, v0.4s, v1.4s
110 %val = call <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
114 define <4 x float> @div_v4f32(<4 x float> %x, <4 x float> %y) #0 {
115 ; CHECK-LABEL: div_v4f32:
117 ; CHECK-NEXT: fdiv v0.4s, v0.4s, v1.4s
119 %val = call <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float> %x, <4 x float> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
123 define <4 x float> @fma_v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z) #0 {
124 ; CHECK-LABEL: fma_v4f32:
126 ; CHECK-NEXT: fmla v2.4s, v1.4s, v0.4s
127 ; CHECK-NEXT: mov v0.16b, v2.16b
129 %val = call <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float> %x, <4 x float> %y, <4 x float> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
133 define <4 x i32> @fptosi_v4i32_v4f32(<4 x float> %x) #0 {
134 ; CHECK-LABEL: fptosi_v4i32_v4f32:
136 ; CHECK-NEXT: fcvtzs v0.4s, v0.4s
138 %val = call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
142 define <4 x i32> @fptoui_v4i32_v4f32(<4 x float> %x) #0 {
143 ; CHECK-LABEL: fptoui_v4i32_v4f32:
145 ; CHECK-NEXT: fcvtzu v0.4s, v0.4s
147 %val = call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
151 define <4 x i64> @fptosi_v4i64_v4f32(<4 x float> %x) #0 {
152 ; CHECK-LABEL: fptosi_v4i64_v4f32:
154 ; CHECK-NEXT: fcvtl2 v1.2d, v0.4s
155 ; CHECK-NEXT: fcvtl v0.2d, v0.2s
156 ; CHECK-NEXT: fcvtzs v1.2d, v1.2d
157 ; CHECK-NEXT: fcvtzs v0.2d, v0.2d
159 %val = call <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
163 define <4 x i64> @fptoui_v4i64_v4f32(<4 x float> %x) #0 {
164 ; CHECK-LABEL: fptoui_v4i64_v4f32:
166 ; CHECK-NEXT: fcvtl2 v1.2d, v0.4s
167 ; CHECK-NEXT: fcvtl v0.2d, v0.2s
168 ; CHECK-NEXT: fcvtzu v1.2d, v1.2d
169 ; CHECK-NEXT: fcvtzu v0.2d, v0.2d
171 %val = call <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
175 define <4 x float> @sitofp_v4f32_v4i32(<4 x i32> %x) #0 {
176 ; CHECK-LABEL: sitofp_v4f32_v4i32:
178 ; CHECK-NEXT: scvtf v0.4s, v0.4s
180 %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
184 define <4 x float> @uitofp_v4f32_v4i32(<4 x i32> %x) #0 {
185 ; CHECK-LABEL: uitofp_v4f32_v4i32:
187 ; CHECK-NEXT: ucvtf v0.4s, v0.4s
189 %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
193 define <4 x float> @sitofp_v4f32_v4i64(<4 x i64> %x) #0 {
194 ; CHECK-LABEL: sitofp_v4f32_v4i64:
196 ; CHECK-NEXT: scvtf v0.2d, v0.2d
197 ; CHECK-NEXT: scvtf v1.2d, v1.2d
198 ; CHECK-NEXT: fcvtn v0.2s, v0.2d
199 ; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
201 %val = call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
205 define <4 x float> @uitofp_v4f32_v4i64(<4 x i64> %x) #0 {
206 ; CHECK-LABEL: uitofp_v4f32_v4i64:
208 ; CHECK-NEXT: ucvtf v0.2d, v0.2d
209 ; CHECK-NEXT: ucvtf v1.2d, v1.2d
210 ; CHECK-NEXT: fcvtn v0.2s, v0.2d
211 ; CHECK-NEXT: fcvtn2 v0.4s, v1.2d
213 %val = call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
217 define <4 x float> @sqrt_v4f32(<4 x float> %x) #0 {
218 ; CHECK-LABEL: sqrt_v4f32:
220 ; CHECK-NEXT: fsqrt v0.4s, v0.4s
222 %val = call <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
226 define <4 x float> @rint_v4f32(<4 x float> %x) #0 {
227 ; CHECK-LABEL: rint_v4f32:
229 ; CHECK-NEXT: frintx v0.4s, v0.4s
231 %val = call <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
235 define <4 x float> @nearbyint_v4f32(<4 x float> %x) #0 {
236 ; CHECK-LABEL: nearbyint_v4f32:
238 ; CHECK-NEXT: frinti v0.4s, v0.4s
240 %val = call <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
244 define <4 x float> @maxnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
245 ; CHECK-LABEL: maxnum_v4f32:
247 ; CHECK-NEXT: fmaxnm v0.4s, v0.4s, v1.4s
249 %val = call <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
253 define <4 x float> @minnum_v4f32(<4 x float> %x, <4 x float> %y) #0 {
254 ; CHECK-LABEL: minnum_v4f32:
256 ; CHECK-NEXT: fminnm v0.4s, v0.4s, v1.4s
258 %val = call <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float> %x, <4 x float> %y, metadata !"fpexcept.strict") #0
262 define <4 x float> @ceil_v4f32(<4 x float> %x) #0 {
263 ; CHECK-LABEL: ceil_v4f32:
265 ; CHECK-NEXT: frintp v0.4s, v0.4s
267 %val = call <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
271 define <4 x float> @floor_v4f32(<4 x float> %x) #0 {
272 ; CHECK-LABEL: floor_v4f32:
274 ; CHECK-NEXT: frintm v0.4s, v0.4s
276 %val = call <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
280 define <4 x float> @round_v4f32(<4 x float> %x) #0 {
281 ; CHECK-LABEL: round_v4f32:
283 ; CHECK-NEXT: frinta v0.4s, v0.4s
285 %val = call <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
289 define <4 x float> @roundeven_v4f32(<4 x float> %x) #0 {
290 ; CHECK-LABEL: roundeven_v4f32:
292 ; CHECK-NEXT: frintn v0.4s, v0.4s
294 %val = call <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
298 define <4 x float> @trunc_v4f32(<4 x float> %x) #0 {
299 ; CHECK-LABEL: trunc_v4f32:
301 ; CHECK-NEXT: frintz v0.4s, v0.4s
303 %val = call <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float> %x, metadata !"fpexcept.strict") #0
307 define <4 x i1> @fcmp_v4f32(<4 x float> %x, <4 x float> %y) #0 {
308 ; CHECK-LABEL: fcmp_v4f32:
309 ; CHECK: // %bb.0: // %entry
310 ; CHECK-NEXT: mov s2, v1.s[1]
311 ; CHECK-NEXT: mov s3, v0.s[1]
312 ; CHECK-NEXT: fcmp s0, s1
313 ; CHECK-NEXT: csetm w8, eq
314 ; CHECK-NEXT: fcmp s3, s2
315 ; CHECK-NEXT: mov s2, v1.s[2]
316 ; CHECK-NEXT: mov s3, v0.s[2]
317 ; CHECK-NEXT: fmov s4, w8
318 ; CHECK-NEXT: mov s1, v1.s[3]
319 ; CHECK-NEXT: mov s0, v0.s[3]
320 ; CHECK-NEXT: csetm w8, eq
321 ; CHECK-NEXT: mov v4.s[1], w8
322 ; CHECK-NEXT: fcmp s3, s2
323 ; CHECK-NEXT: csetm w8, eq
324 ; CHECK-NEXT: fcmp s0, s1
325 ; CHECK-NEXT: mov v4.s[2], w8
326 ; CHECK-NEXT: csetm w8, eq
327 ; CHECK-NEXT: mov v4.s[3], w8
328 ; CHECK-NEXT: xtn v0.4h, v4.4s
331 %val = call <4 x i1> @llvm.experimental.constrained.fcmp.v4f64(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
335 define <4 x i1> @fcmps_v4f32(<4 x float> %x, <4 x float> %y) #0 {
336 ; CHECK-LABEL: fcmps_v4f32:
337 ; CHECK: // %bb.0: // %entry
338 ; CHECK-NEXT: mov s2, v1.s[1]
339 ; CHECK-NEXT: mov s3, v0.s[1]
340 ; CHECK-NEXT: fcmpe s0, s1
341 ; CHECK-NEXT: csetm w8, eq
342 ; CHECK-NEXT: fcmpe s3, s2
343 ; CHECK-NEXT: mov s2, v1.s[2]
344 ; CHECK-NEXT: mov s3, v0.s[2]
345 ; CHECK-NEXT: fmov s4, w8
346 ; CHECK-NEXT: mov s1, v1.s[3]
347 ; CHECK-NEXT: mov s0, v0.s[3]
348 ; CHECK-NEXT: csetm w8, eq
349 ; CHECK-NEXT: mov v4.s[1], w8
350 ; CHECK-NEXT: fcmpe s3, s2
351 ; CHECK-NEXT: csetm w8, eq
352 ; CHECK-NEXT: fcmpe s0, s1
353 ; CHECK-NEXT: mov v4.s[2], w8
354 ; CHECK-NEXT: csetm w8, eq
355 ; CHECK-NEXT: mov v4.s[3], w8
356 ; CHECK-NEXT: xtn v0.4h, v4.4s
359 %val = call <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float> %x, <4 x float> %y, metadata !"oeq", metadata !"fpexcept.strict")
364 ; Double-precision intrinsics
366 define <2 x double> @add_v2f64(<2 x double> %x, <2 x double> %y) #0 {
367 ; CHECK-LABEL: add_v2f64:
369 ; CHECK-NEXT: fadd v0.2d, v0.2d, v1.2d
371 %val = call <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
372 ret <2 x double> %val
375 define <2 x double> @sub_v2f64(<2 x double> %x, <2 x double> %y) #0 {
376 ; CHECK-LABEL: sub_v2f64:
378 ; CHECK-NEXT: fsub v0.2d, v0.2d, v1.2d
380 %val = call <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
381 ret <2 x double> %val
384 define <2 x double> @mul_v2f64(<2 x double> %x, <2 x double> %y) #0 {
385 ; CHECK-LABEL: mul_v2f64:
387 ; CHECK-NEXT: fmul v0.2d, v0.2d, v1.2d
389 %val = call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
390 ret <2 x double> %val
393 define <2 x double> @div_v2f64(<2 x double> %x, <2 x double> %y) #0 {
394 ; CHECK-LABEL: div_v2f64:
396 ; CHECK-NEXT: fdiv v0.2d, v0.2d, v1.2d
398 %val = call <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double> %x, <2 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
399 ret <2 x double> %val
402 define <2 x double> @fma_v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z) #0 {
403 ; CHECK-LABEL: fma_v2f64:
405 ; CHECK-NEXT: fmla v2.2d, v1.2d, v0.2d
406 ; CHECK-NEXT: mov v0.16b, v2.16b
408 %val = call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %x, <2 x double> %y, <2 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
409 ret <2 x double> %val
412 define <2 x i32> @fptosi_v2i32_v2f64(<2 x double> %x) #0 {
413 ; CHECK-LABEL: fptosi_v2i32_v2f64:
415 ; CHECK-NEXT: fcvtzs v0.2d, v0.2d
416 ; CHECK-NEXT: xtn v0.2s, v0.2d
418 %val = call <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
422 define <2 x i32> @fptoui_v2i32_v2f64(<2 x double> %x) #0 {
423 ; CHECK-LABEL: fptoui_v2i32_v2f64:
425 ; CHECK-NEXT: fcvtzu v0.2d, v0.2d
426 ; CHECK-NEXT: xtn v0.2s, v0.2d
428 %val = call <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
432 define <2 x i64> @fptosi_v2i64_v2f64(<2 x double> %x) #0 {
433 ; CHECK-LABEL: fptosi_v2i64_v2f64:
435 ; CHECK-NEXT: fcvtzs v0.2d, v0.2d
437 %val = call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
441 define <2 x i64> @fptoui_v2i64_v2f64(<2 x double> %x) #0 {
442 ; CHECK-LABEL: fptoui_v2i64_v2f64:
444 ; CHECK-NEXT: fcvtzu v0.2d, v0.2d
446 %val = call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
450 define <2 x double> @sitofp_v2f64_v2i32(<2 x i32> %x) #0 {
451 ; CHECK-LABEL: sitofp_v2f64_v2i32:
453 ; CHECK-NEXT: sshll v0.2d, v0.2s, #0
454 ; CHECK-NEXT: scvtf v0.2d, v0.2d
456 %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
457 ret <2 x double> %val
460 define <2 x double> @uitofp_v2f64_v2i32(<2 x i32> %x) #0 {
461 ; CHECK-LABEL: uitofp_v2f64_v2i32:
463 ; CHECK-NEXT: ushll v0.2d, v0.2s, #0
464 ; CHECK-NEXT: ucvtf v0.2d, v0.2d
466 %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
467 ret <2 x double> %val
470 define <2 x double> @sitofp_v2f64_v2i64(<2 x i64> %x) #0 {
471 ; CHECK-LABEL: sitofp_v2f64_v2i64:
473 ; CHECK-NEXT: scvtf v0.2d, v0.2d
475 %val = call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
476 ret <2 x double> %val
479 define <2 x double> @uitofp_v2f64_v2i64(<2 x i64> %x) #0 {
480 ; CHECK-LABEL: uitofp_v2f64_v2i64:
482 ; CHECK-NEXT: ucvtf v0.2d, v0.2d
484 %val = call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
485 ret <2 x double> %val
488 define <2 x double> @sqrt_v2f64(<2 x double> %x) #0 {
489 ; CHECK-LABEL: sqrt_v2f64:
491 ; CHECK-NEXT: fsqrt v0.2d, v0.2d
493 %val = call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
494 ret <2 x double> %val
497 define <2 x double> @rint_v2f64(<2 x double> %x) #0 {
498 ; CHECK-LABEL: rint_v2f64:
500 ; CHECK-NEXT: frintx v0.2d, v0.2d
502 %val = call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
503 ret <2 x double> %val
506 define <2 x double> @nearbyint_v2f64(<2 x double> %x) #0 {
507 ; CHECK-LABEL: nearbyint_v2f64:
509 ; CHECK-NEXT: frinti v0.2d, v0.2d
511 %val = call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
512 ret <2 x double> %val
515 define <2 x double> @maxnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
516 ; CHECK-LABEL: maxnum_v2f64:
518 ; CHECK-NEXT: fmaxnm v0.2d, v0.2d, v1.2d
520 %val = call <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
521 ret <2 x double> %val
524 define <2 x double> @minnum_v2f64(<2 x double> %x, <2 x double> %y) #0 {
525 ; CHECK-LABEL: minnum_v2f64:
527 ; CHECK-NEXT: fminnm v0.2d, v0.2d, v1.2d
529 %val = call <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double> %x, <2 x double> %y, metadata !"fpexcept.strict") #0
530 ret <2 x double> %val
533 define <2 x double> @ceil_v2f64(<2 x double> %x) #0 {
534 ; CHECK-LABEL: ceil_v2f64:
536 ; CHECK-NEXT: frintp v0.2d, v0.2d
538 %val = call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
539 ret <2 x double> %val
542 define <2 x double> @floor_v2f64(<2 x double> %x) #0 {
543 ; CHECK-LABEL: floor_v2f64:
545 ; CHECK-NEXT: frintm v0.2d, v0.2d
547 %val = call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
548 ret <2 x double> %val
551 define <2 x double> @round_v2f64(<2 x double> %x) #0 {
552 ; CHECK-LABEL: round_v2f64:
554 ; CHECK-NEXT: frinta v0.2d, v0.2d
556 %val = call <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
557 ret <2 x double> %val
560 define <2 x double> @roundeven_v2f64(<2 x double> %x) #0 {
561 ; CHECK-LABEL: roundeven_v2f64:
563 ; CHECK-NEXT: frintn v0.2d, v0.2d
565 %val = call <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
566 ret <2 x double> %val
569 define <2 x double> @trunc_v2f64(<2 x double> %x) #0 {
570 ; CHECK-LABEL: trunc_v2f64:
572 ; CHECK-NEXT: frintz v0.2d, v0.2d
574 %val = call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %x, metadata !"fpexcept.strict") #0
575 ret <2 x double> %val
578 define <2 x i1> @fcmp_v2f64(<2 x double> %x, <2 x double> %y) #0 {
579 ; CHECK-LABEL: fcmp_v2f64:
580 ; CHECK: // %bb.0: // %entry
581 ; CHECK-NEXT: mov d2, v1.d[1]
582 ; CHECK-NEXT: mov d3, v0.d[1]
583 ; CHECK-NEXT: fcmp d0, d1
584 ; CHECK-NEXT: csetm x8, eq
585 ; CHECK-NEXT: fcmp d3, d2
586 ; CHECK-NEXT: fmov d0, x8
587 ; CHECK-NEXT: csetm x8, eq
588 ; CHECK-NEXT: mov v0.d[1], x8
589 ; CHECK-NEXT: xtn v0.2s, v0.2d
592 %val = call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
596 define <2 x i1> @fcmps_v2f64(<2 x double> %x, <2 x double> %y) #0 {
597 ; CHECK-LABEL: fcmps_v2f64:
598 ; CHECK: // %bb.0: // %entry
599 ; CHECK-NEXT: mov d2, v1.d[1]
600 ; CHECK-NEXT: mov d3, v0.d[1]
601 ; CHECK-NEXT: fcmpe d0, d1
602 ; CHECK-NEXT: csetm x8, eq
603 ; CHECK-NEXT: fcmpe d3, d2
604 ; CHECK-NEXT: fmov d0, x8
605 ; CHECK-NEXT: csetm x8, eq
606 ; CHECK-NEXT: mov v0.d[1], x8
607 ; CHECK-NEXT: xtn v0.2s, v0.2d
610 %val = call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %x, <2 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
615 ; Double-precision single element intrinsics
617 define <1 x double> @add_v1f64(<1 x double> %x, <1 x double> %y) #0 {
618 ; CHECK-LABEL: add_v1f64:
620 ; CHECK-NEXT: fadd d0, d0, d1
622 %val = call <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
623 ret <1 x double> %val
626 define <1 x double> @sub_v1f64(<1 x double> %x, <1 x double> %y) #0 {
627 ; CHECK-LABEL: sub_v1f64:
629 ; CHECK-NEXT: fsub d0, d0, d1
631 %val = call <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
632 ret <1 x double> %val
635 define <1 x double> @mul_v1f64(<1 x double> %x, <1 x double> %y) #0 {
636 ; CHECK-LABEL: mul_v1f64:
638 ; CHECK-NEXT: fmul d0, d0, d1
640 %val = call <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
641 ret <1 x double> %val
644 define <1 x double> @div_v1f64(<1 x double> %x, <1 x double> %y) #0 {
645 ; CHECK-LABEL: div_v1f64:
647 ; CHECK-NEXT: fdiv d0, d0, d1
649 %val = call <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double> %x, <1 x double> %y, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
650 ret <1 x double> %val
653 define <1 x double> @fma_v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z) #0 {
654 ; CHECK-LABEL: fma_v1f64:
656 ; CHECK-NEXT: fmadd d0, d0, d1, d2
658 %val = call <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double> %x, <1 x double> %y, <1 x double> %z, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
659 ret <1 x double> %val
662 define <1 x i32> @fptosi_v1i32_v1f64(<1 x double> %x) #0 {
663 ; CHECK-LABEL: fptosi_v1i32_v1f64:
665 ; CHECK-NEXT: fcvtzs w8, d0
666 ; CHECK-NEXT: fmov s0, w8
668 %val = call <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
672 define <1 x i32> @fptoui_v1i32_v1f64(<1 x double> %x) #0 {
673 ; CHECK-LABEL: fptoui_v1i32_v1f64:
675 ; CHECK-NEXT: fcvtzu w8, d0
676 ; CHECK-NEXT: fmov s0, w8
678 %val = call <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
682 define <1 x i64> @fptosi_v1i64_v1f64(<1 x double> %x) #0 {
683 ; CHECK-LABEL: fptosi_v1i64_v1f64:
685 ; CHECK-NEXT: fcvtzs x8, d0
686 ; CHECK-NEXT: fmov d0, x8
688 %val = call <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
692 define <1 x i64> @fptoui_v1i64_v1f64(<1 x double> %x) #0 {
693 ; CHECK-LABEL: fptoui_v1i64_v1f64:
695 ; CHECK-NEXT: fcvtzu x8, d0
696 ; CHECK-NEXT: fmov d0, x8
698 %val = call <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
702 define <1 x double> @sitofp_v1f64_v1i32(<1 x i32> %x) #0 {
703 ; CHECK-LABEL: sitofp_v1f64_v1i32:
705 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
706 ; CHECK-NEXT: fmov w8, s0
707 ; CHECK-NEXT: scvtf d0, w8
709 %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
710 ret <1 x double> %val
713 define <1 x double> @uitofp_v1f64_v1i32(<1 x i32> %x) #0 {
714 ; CHECK-LABEL: uitofp_v1f64_v1i32:
716 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
717 ; CHECK-NEXT: fmov w8, s0
718 ; CHECK-NEXT: ucvtf d0, w8
720 %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
721 ret <1 x double> %val
724 define <1 x double> @sitofp_v1f64_v1i64(<1 x i64> %x) #0 {
725 ; CHECK-LABEL: sitofp_v1f64_v1i64:
727 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
728 ; CHECK-NEXT: fmov x8, d0
729 ; CHECK-NEXT: scvtf d0, x8
731 %val = call <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
732 ret <1 x double> %val
735 define <1 x double> @uitofp_v1f64_v1i64(<1 x i64> %x) #0 {
736 ; CHECK-LABEL: uitofp_v1f64_v1i64:
738 ; CHECK-NEXT: // kill: def $d0 killed $d0 def $q0
739 ; CHECK-NEXT: fmov x8, d0
740 ; CHECK-NEXT: ucvtf d0, x8
742 %val = call <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
743 ret <1 x double> %val
746 define <1 x double> @sqrt_v1f64(<1 x double> %x) #0 {
747 ; CHECK-LABEL: sqrt_v1f64:
749 ; CHECK-NEXT: fsqrt d0, d0
751 %val = call <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
752 ret <1 x double> %val
755 define <1 x double> @rint_v1f64(<1 x double> %x) #0 {
756 ; CHECK-LABEL: rint_v1f64:
758 ; CHECK-NEXT: frintx d0, d0
760 %val = call <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
761 ret <1 x double> %val
764 define <1 x double> @nearbyint_v1f64(<1 x double> %x) #0 {
765 ; CHECK-LABEL: nearbyint_v1f64:
767 ; CHECK-NEXT: frinti d0, d0
769 %val = call <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
770 ret <1 x double> %val
773 define <1 x double> @maxnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
774 ; CHECK-LABEL: maxnum_v1f64:
776 ; CHECK-NEXT: fmaxnm d0, d0, d1
778 %val = call <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
779 ret <1 x double> %val
782 define <1 x double> @minnum_v1f64(<1 x double> %x, <1 x double> %y) #0 {
783 ; CHECK-LABEL: minnum_v1f64:
785 ; CHECK-NEXT: fminnm d0, d0, d1
787 %val = call <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double> %x, <1 x double> %y, metadata !"fpexcept.strict") #0
788 ret <1 x double> %val
791 define <1 x double> @ceil_v1f64(<1 x double> %x) #0 {
792 ; CHECK-LABEL: ceil_v1f64:
794 ; CHECK-NEXT: frintp d0, d0
796 %val = call <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
797 ret <1 x double> %val
800 define <1 x double> @floor_v1f64(<1 x double> %x) #0 {
801 ; CHECK-LABEL: floor_v1f64:
803 ; CHECK-NEXT: frintm d0, d0
805 %val = call <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
806 ret <1 x double> %val
809 define <1 x double> @round_v1f64(<1 x double> %x) #0 {
810 ; CHECK-LABEL: round_v1f64:
812 ; CHECK-NEXT: frinta d0, d0
814 %val = call <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
815 ret <1 x double> %val
818 define <1 x double> @roundeven_v1f64(<1 x double> %x) #0 {
819 ; CHECK-LABEL: roundeven_v1f64:
821 ; CHECK-NEXT: frintn d0, d0
823 %val = call <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
824 ret <1 x double> %val
827 define <1 x double> @trunc_v1f64(<1 x double> %x) #0 {
828 ; CHECK-LABEL: trunc_v1f64:
830 ; CHECK-NEXT: frintz d0, d0
832 %val = call <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double> %x, metadata !"fpexcept.strict") #0
833 ret <1 x double> %val
836 define <1 x i1> @fcmp_v1f61(<1 x double> %x, <1 x double> %y) #0 {
837 ; CHECK-LABEL: fcmp_v1f61:
838 ; CHECK: // %bb.0: // %entry
839 ; CHECK-NEXT: fcmp d0, d1
840 ; CHECK-NEXT: cset w0, eq
843 %val = call <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
847 define <1 x i1> @fcmps_v1f61(<1 x double> %x, <1 x double> %y) #0 {
848 ; CHECK-LABEL: fcmps_v1f61:
849 ; CHECK: // %bb.0: // %entry
850 ; CHECK-NEXT: fcmpe d0, d1
851 ; CHECK-NEXT: cset w0, eq
854 %val = call <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double> %x, <1 x double> %y, metadata !"oeq", metadata !"fpexcept.strict")
859 ; Intrinsics to convert between floating-point types
861 define <2 x float> @fptrunc_v2f32_v2f64(<2 x double> %x) #0 {
862 ; CHECK-LABEL: fptrunc_v2f32_v2f64:
864 ; CHECK-NEXT: fcvtn v0.2s, v0.2d
866 %val = call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %x, metadata !"round.tonearest", metadata !"fpexcept.strict") #0
870 define <2 x double> @fpext_v2f64_v2f32(<2 x float> %x) #0 {
871 ; CHECK-LABEL: fpext_v2f64_v2f32:
873 ; CHECK-NEXT: fcvtl v0.2d, v0.2s
875 %val = call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> %x, metadata !"fpexcept.strict") #0
876 ret <2 x double> %val
880 attributes #0 = { strictfp }
882 declare <4 x float> @llvm.experimental.constrained.fadd.v4f32(<4 x float>, <4 x float>, metadata, metadata)
883 declare <4 x float> @llvm.experimental.constrained.fsub.v4f32(<4 x float>, <4 x float>, metadata, metadata)
884 declare <4 x float> @llvm.experimental.constrained.fmul.v4f32(<4 x float>, <4 x float>, metadata, metadata)
885 declare <4 x float> @llvm.experimental.constrained.fdiv.v4f32(<4 x float>, <4 x float>, metadata, metadata)
886 declare <4 x float> @llvm.experimental.constrained.fma.v4f32(<4 x float>, <4 x float>, <4 x float>, metadata, metadata)
887 declare <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float>, metadata)
888 declare <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float>, metadata)
889 declare <4 x i64> @llvm.experimental.constrained.fptosi.v4i64.v4f32(<4 x float>, metadata)
890 declare <4 x i64> @llvm.experimental.constrained.fptoui.v4i64.v4f32(<4 x float>, metadata)
891 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
892 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32>, metadata, metadata)
893 declare <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
894 declare <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i64(<4 x i64>, metadata, metadata)
895 declare <4 x float> @llvm.experimental.constrained.sqrt.v4f32(<4 x float>, metadata, metadata)
896 declare <4 x float> @llvm.experimental.constrained.rint.v4f32(<4 x float>, metadata, metadata)
897 declare <4 x float> @llvm.experimental.constrained.nearbyint.v4f32(<4 x float>, metadata, metadata)
898 declare <4 x float> @llvm.experimental.constrained.maxnum.v4f32(<4 x float>, <4 x float>, metadata)
899 declare <4 x float> @llvm.experimental.constrained.minnum.v4f32(<4 x float>, <4 x float>, metadata)
900 declare <4 x float> @llvm.experimental.constrained.ceil.v4f32(<4 x float>, metadata)
901 declare <4 x float> @llvm.experimental.constrained.floor.v4f32(<4 x float>, metadata)
902 declare <4 x float> @llvm.experimental.constrained.round.v4f32(<4 x float>, metadata)
903 declare <4 x float> @llvm.experimental.constrained.roundeven.v4f32(<4 x float>, metadata)
904 declare <4 x float> @llvm.experimental.constrained.trunc.v4f32(<4 x float>, metadata)
905 declare <4 x i1> @llvm.experimental.constrained.fcmp.v4f32(<4 x float>, <4 x float>, metadata, metadata)
906 declare <4 x i1> @llvm.experimental.constrained.fcmps.v4f32(<4 x float>, <4 x float>, metadata, metadata)
908 declare <2 x double> @llvm.experimental.constrained.fadd.v2f64(<2 x double>, <2 x double>, metadata, metadata)
909 declare <2 x double> @llvm.experimental.constrained.fsub.v2f64(<2 x double>, <2 x double>, metadata, metadata)
910 declare <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double>, <2 x double>, metadata, metadata)
911 declare <2 x double> @llvm.experimental.constrained.fdiv.v2f64(<2 x double>, <2 x double>, metadata, metadata)
912 declare <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double>, <2 x double>, <2 x double>, metadata, metadata)
913 declare <2 x i32> @llvm.experimental.constrained.fptosi.v2i32.v2f64(<2 x double>, metadata)
914 declare <2 x i32> @llvm.experimental.constrained.fptoui.v2i32.v2f64(<2 x double>, metadata)
915 declare <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double>, metadata)
916 declare <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double>, metadata)
917 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
918 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i32(<2 x i32>, metadata, metadata)
919 declare <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
920 declare <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64>, metadata, metadata)
921 declare <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double>, metadata, metadata)
922 declare <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double>, metadata, metadata)
923 declare <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double>, metadata, metadata)
924 declare <2 x double> @llvm.experimental.constrained.maxnum.v2f64(<2 x double>, <2 x double>, metadata)
925 declare <2 x double> @llvm.experimental.constrained.minnum.v2f64(<2 x double>, <2 x double>, metadata)
926 declare <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double>, metadata)
927 declare <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double>, metadata)
928 declare <2 x double> @llvm.experimental.constrained.round.v2f64(<2 x double>, metadata)
929 declare <2 x double> @llvm.experimental.constrained.roundeven.v2f64(<2 x double>, metadata)
930 declare <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double>, metadata)
931 declare <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double>, <2 x double>, metadata, metadata)
932 declare <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double>, <2 x double>, metadata, metadata)
934 declare <1 x double> @llvm.experimental.constrained.fadd.v1f64(<1 x double>, <1 x double>, metadata, metadata)
935 declare <1 x double> @llvm.experimental.constrained.fsub.v1f64(<1 x double>, <1 x double>, metadata, metadata)
936 declare <1 x double> @llvm.experimental.constrained.fmul.v1f64(<1 x double>, <1 x double>, metadata, metadata)
937 declare <1 x double> @llvm.experimental.constrained.fdiv.v1f64(<1 x double>, <1 x double>, metadata, metadata)
938 declare <1 x double> @llvm.experimental.constrained.fma.v1f64(<1 x double>, <1 x double>, <1 x double>, metadata, metadata)
939 declare <1 x i32> @llvm.experimental.constrained.fptosi.v1i32.v1f64(<1 x double>, metadata)
940 declare <1 x i32> @llvm.experimental.constrained.fptoui.v1i32.v1f64(<1 x double>, metadata)
941 declare <1 x i64> @llvm.experimental.constrained.fptosi.v1i64.v1f64(<1 x double>, metadata)
942 declare <1 x i64> @llvm.experimental.constrained.fptoui.v1i64.v1f64(<1 x double>, metadata)
943 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
944 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i32(<1 x i32>, metadata, metadata)
945 declare <1 x double> @llvm.experimental.constrained.sitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
946 declare <1 x double> @llvm.experimental.constrained.uitofp.v1f64.v1i64(<1 x i64>, metadata, metadata)
947 declare <1 x double> @llvm.experimental.constrained.sqrt.v1f64(<1 x double>, metadata, metadata)
948 declare <1 x double> @llvm.experimental.constrained.rint.v1f64(<1 x double>, metadata, metadata)
949 declare <1 x double> @llvm.experimental.constrained.nearbyint.v1f64(<1 x double>, metadata, metadata)
950 declare <1 x double> @llvm.experimental.constrained.maxnum.v1f64(<1 x double>, <1 x double>, metadata)
951 declare <1 x double> @llvm.experimental.constrained.minnum.v1f64(<1 x double>, <1 x double>, metadata)
952 declare <1 x double> @llvm.experimental.constrained.ceil.v1f64(<1 x double>, metadata)
953 declare <1 x double> @llvm.experimental.constrained.floor.v1f64(<1 x double>, metadata)
954 declare <1 x double> @llvm.experimental.constrained.round.v1f64(<1 x double>, metadata)
955 declare <1 x double> @llvm.experimental.constrained.roundeven.v1f64(<1 x double>, metadata)
956 declare <1 x double> @llvm.experimental.constrained.trunc.v1f64(<1 x double>, metadata)
957 declare <1 x i1> @llvm.experimental.constrained.fcmp.v1f64(<1 x double>, <1 x double>, metadata, metadata)
958 declare <1 x i1> @llvm.experimental.constrained.fcmps.v1f64(<1 x double>, <1 x double>, metadata, metadata)
960 declare <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double>, metadata, metadata)
961 declare <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float>, metadata)
963 ;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line: