1 // REQUIRES: systemz-registered-target
2 // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
3 // RUN: -O2 -fzvector -flax-vector-conversions=none \
4 // RUN: -ffp-exception-behavior=strict \
5 // RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
6 // RUN: %clang_cc1 -target-cpu z15 -triple s390x-linux-gnu \
7 // RUN: -O2 -fzvector -flax-vector-conversions=none \
8 // RUN: -ffp-exception-behavior=strict \
9 // RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
11 #include <vecintrin.h>
13 volatile vector
signed int vsi
;
14 volatile vector
signed long long vsl
;
15 volatile vector
unsigned int vui
;
16 volatile vector
unsigned long long vul
;
17 volatile vector
float vf
;
18 volatile vector
double vd
;
23 const float * volatile cptrf
;
24 const double * volatile cptrd
;
26 float * volatile ptrf
;
27 double * volatile ptrd
;
31 void test_core(void) {
32 // CHECK-ASM-LABEL: test_core
36 vf
+= vec_revb(vec_xl(idx
, cptrf
));
38 vd
+= vec_revb(vec_xl(idx
, cptrd
));
41 vec_xst(vec_revb(vf
), idx
, ptrf
);
43 vec_xst(vec_revb(vd
), idx
, ptrd
);
46 vf
+= vec_revb(vec_insert_and_zero(cptrf
));
47 // CHECK-ASM: vllebrzf
48 vd
+= vec_revb(vec_insert_and_zero(cptrd
));
49 // CHECK-ASM: vllebrzg
51 vf
+= vec_revb(vec_splats(f
));
52 // CHECK-ASM: vlbrrepf
53 vd
+= vec_revb(vec_splats(d
));
54 // CHECK-ASM: vlbrrepg
57 vf
+= vec_revb(vec_insert(f
, vec_revb(vf2
), 0));
60 vd
+= vec_revb(vec_insert(d
, vec_revb(vd2
), 0));
63 f
= vec_extract(vec_revb(vf
), 0);
65 d
= vec_extract(vec_revb(vd
), 0);
68 vf
+= vec_reve(vec_xl(idx
, cptrf
));
70 vd
+= vec_reve(vec_xl(idx
, cptrd
));
73 vec_xst(vec_reve(vf
), idx
, ptrf
);
75 vec_xst(vec_reve(vd
), idx
, ptrd
);
79 void test_float(void) {
80 // CHECK-ASM-LABEL: test_float
83 // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
86 // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
89 // CHECK: call <4 x float> @llvm.experimental.constrained.sitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
92 // CHECK: call <4 x float> @llvm.experimental.constrained.uitofp.v4f32.v4i32(<4 x i32> %{{.*}}, metadata !{{.*}})
96 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
99 // CHECK: call <4 x i32> @llvm.experimental.constrained.fptosi.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})
101 vul
= vec_unsigned(vd
);
102 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
104 vui
= vec_unsigned(vf
);
105 // xHECK: fptoui <4 x float> %{{.*}} to <4 x i32>
106 // CHECK: call <4 x i32> @llvm.experimental.constrained.fptoui.v4i32.v4f32(<4 x float> %{{.*}}, metadata !{{.*}})