Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / clang / test / CodeGen / SystemZ / builtins-systemz-zvector3-constrained.c
blob17af7b8a7fccf48d23645d78cee0d23328c5a8e9
1 // REQUIRES: systemz-registered-target
2 // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
3 // RUN: -O2 -fzvector -flax-vector-conversions=none \
4 // RUN: -ffp-exception-behavior=strict \
5 // RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
6 // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
7 // RUN: -O2 -fzvector -flax-vector-conversions=none \
8 // RUN: -ffp-exception-behavior=strict \
9 // RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
11 #include <vecintrin.h>
13 volatile vector signed int vsi;
14 volatile vector signed long long vsl;
15 volatile vector unsigned int vui;
16 volatile vector unsigned long long vul;
17 volatile vector float vf;
18 volatile vector double vd;
20 volatile float f;
21 volatile double d;
23 const float * volatile cptrf;
24 const double * volatile cptrd;
26 float * volatile ptrf;
27 double * volatile ptrd;
29 volatile int idx;
31 void test_core(void) {
32 // CHECK-ASM-LABEL: test_core
33 vector float vf2;
34 vector double vd2;
36 vf += vec_revb(vec_xl(idx, cptrf));
37 // CHECK-ASM: vlbrf
38 vd += vec_revb(vec_xl(idx, cptrd));
39 // CHECK-ASM: vlbrg
41 vec_xst(vec_revb(vf), idx, ptrf);
42 // CHECK-ASM: vstbrf
43 vec_xst(vec_revb(vd), idx, ptrd);
44 // CHECK-ASM: vstbrg
46 vf += vec_revb(vec_insert_and_zero(cptrf));
47 // CHECK-ASM: vllebrzf
48 vd += vec_revb(vec_insert_and_zero(cptrd));
49 // CHECK-ASM: vllebrzg
51 vf += vec_revb(vec_splats(f));
52 // CHECK-ASM: vlbrrepf
53 vd += vec_revb(vec_splats(d));
54 // CHECK-ASM: vlbrrepg
56 vf2 = vf;
57 vf += vec_revb(vec_insert(f, vec_revb(vf2), 0));
58 // CHECK-ASM: vlebrf
59 vd2 = vd;
60 vd += vec_revb(vec_insert(d, vec_revb(vd2), 0));
61 // CHECK-ASM: vlebrg
63 f = vec_extract(vec_revb(vf), 0);
64 // CHECK-ASM: vstebrf
65 d = vec_extract(vec_revb(vd), 0);
66 // CHECK-ASM: vstebrg
68 vf += vec_reve(vec_xl(idx, cptrf));
69 // CHECK-ASM: vlerf
70 vd += vec_reve(vec_xl(idx, cptrd));
71 // CHECK-ASM: vlerg
73 vec_xst(vec_reve(vf), idx, ptrf);
74 // CHECK-ASM: vsterf
75 vec_xst(vec_reve(vd), idx, ptrd);
76 // CHECK-ASM: vsterg
79 void test_float(void) {
80 // CHECK-ASM-LABEL: test_float
82 vd = vec_double(vsl);
83 // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
84 // CHECK-ASM: vcdgb
85 vd = vec_double(vul);
86 // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
87 // CHECK-ASM: vcdlgb
88 vf = vec_float(vsi);
89 // CHECK: call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
90 // CHECK-ASM: vcefb
91 vf = vec_float(vui);
92 // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
93 // CHECK-ASM: vcelfb
95 vsl = vec_signed(vd);
96 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
97 // CHECK-ASM: vcgdb
98 vsi = vec_signed(vf);
99 // CHECK: call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
100 // CHECK-ASM: vcfeb
101 vul = vec_unsigned(vd);
102 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
103 // CHECK-ASM: vclgdb
104 vui = vec_unsigned(vf);
105 // xHECK: fptoui <4 x float> %{{.*}} to <4 x i32>
106 // CHECK: call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
107 // CHECK-ASM: vclfeb