[clang] Implement lifetime analysis for lifetime_capture_by(X) (#115921)
[llvm-project.git] / clang / test / CodeGen / SystemZ / builtins-systemz-zvector-constrained.c
blob6a1f8f0e923f650114423ba359b9a19fdca49c25
1 // REQUIRES: systemz-registered-target
2 // RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
3 // RUN: -O2 -fzvector -flax-vector-conversions=none \
4 // RUN: -ffp-exception-behavior=strict \
5 // RUN: -Wall -Wno-unused -Werror -emit-llvm %s -o - | FileCheck %s
6 // RUN: %clang_cc1 -target-cpu z13 -triple s390x-linux-gnu \
7 // RUN: -O2 -fzvector -flax-vector-conversions=none \
8 // RUN: -ffp-exception-behavior=strict \
9 // RUN: -Wall -Wno-unused -Werror -S %s -o - | FileCheck %s --check-prefix=CHECK-ASM
11 #include <vecintrin.h>
13 volatile vector signed long long vsl;
14 volatile vector unsigned long long vul;
15 volatile vector bool long long vbl;
16 volatile vector double vd;
18 volatile double d;
20 const float * volatile cptrf;
21 const double * volatile cptrd;
23 float * volatile ptrf;
24 double * volatile ptrd;
26 volatile int idx;
28 void test_core(void) {
29 // CHECK-ASM-LABEL: test_core
31 d = vec_extract(vd, idx);
32 // CHECK: extractelement <2 x double> %{{.*}}, i32 %{{.*}}
33 // CHECK-ASM: vlgvg
35 vd = vec_insert(d, vd, idx);
36 // CHECK: insertelement <2 x double> %{{.*}}, double %{{.*}}, i32 %{{.*}}
37 // CHECK-ASM: vlvgg
39 vd = vec_promote(d, idx);
40 // CHECK: insertelement <2 x double> poison, double %{{.*}}, i32 %{{.*}}
41 // CHECK-ASM: vlvgg
43 vd = vec_insert_and_zero(cptrd);
44 // CHECK: [[ZVEC:%[^ ]+]] = insertelement <2 x double> <double poison, double 0.000000e+00>, double {{.*}}, i64 0
45 // CHECK-ASM: vllezg
47 vd = vec_revb(vd);
48 // CHECK-ASM: vperm
50 vd = vec_reve(vd);
51 // CHECK-ASM: {{vperm|vpdi}}
53 vd = vec_sel(vd, vd, vul);
54 // CHECK-ASM: vsel
55 vd = vec_sel(vd, vd, vbl);
56 // CHECK-ASM: vsel
58 vd = vec_gather_element(vd, vul, cptrd, 0);
59 // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
60 vd = vec_gather_element(vd, vul, cptrd, 1);
61 // CHECK-ASM: vgeg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
63 vec_scatter_element(vd, vul, ptrd, 0);
64 // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 0
65 vec_scatter_element(vd, vul, ptrd, 1);
66 // CHECK-ASM: vsceg %{{.*}}, 0(%{{.*}},%{{.*}}), 1
68 vd = vec_xl(idx, cptrd);
69 // CHECK-ASM-NEXT: lgf %r5, 0(%r3)
70 // CHECK-ASM-NEXT: lg %r13, 0(%r4)
71 // CHECK-ASM-NEXT: vl %v0, 0(%r5,%r13){{$}}
72 // CHECK-ASM-NEXT: vst
74 vd = vec_xld2(idx, cptrd);
75 // CHECK-ASM: vst
77 vec_xst(vd, idx, ptrd);
79 vec_xstd2(vd, idx, ptrd);
81 vd = vec_splat(vd, 0);
82 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> zeroinitializer
83 // CHECK-ASM: vrepg
84 vd = vec_splat(vd, 1);
85 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> <i32 1, i32 1>
86 // CHECK-ASM: vrepg
88 vd = vec_splats(d);
89 // CHECK: shufflevector <2 x double> %{{.*}}, <2 x double> poison, <2 x i32> zeroinitializer
90 // CHECK-ASM: vlrepg
92 vd = vec_mergeh(vd, vd);
93 // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x i32> <i32 0, i32 2>
94 // CHECK-ASM: vmrhg
96 vd = vec_mergel(vd, vd);
97 // shufflevector <2 x double> %{{.*}}, <2 x double> %{{.*}}, <i32 1, i32 3>
98 // CHECK-ASM: vmrlg
101 void test_compare(void) {
102 // CHECK-ASM-LABEL: test_compare
104 vbl = vec_cmpeq(vd, vd);
105 // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmp.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oeq", metadata !{{.*}})
106 // CHECK-ASM: vfcedb
108 vbl = vec_cmpge(vd, vd);
109 // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"oge", metadata !{{.*}})
110 // CHECK-ASM: kdbr
111 // CHECK-ASM: kdbr
112 // CHECK-ASM: vst
114 vbl = vec_cmpgt(vd, vd);
115 // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ogt", metadata !{{.*}})
116 // CHECK-ASM: kdbr
117 // CHECK-ASM: kdbr
118 // CHECK-ASM: vst
120 vbl = vec_cmple(vd, vd);
121 // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"ole", metadata !{{.*}})
122 // CHECK-ASM: kdbr
123 // CHECK-ASM: kdbr
124 // CHECK-ASM: vst
126 vbl = vec_cmplt(vd, vd);
127 // CHECK: call <2 x i1> @llvm.experimental.constrained.fcmps.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !"olt", metadata !{{.*}})
128 // CHECK-ASM: kdbr
129 // CHECK-ASM: kdbr
130 // CHECK-ASM: vst
132 idx = vec_all_lt(vd, vd);
133 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
134 // CHECK-ASM: vfchdbs
136 idx = vec_all_nge(vd, vd);
137 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
138 // CHECK-ASM: vfchedbs
139 idx = vec_all_ngt(vd, vd);
140 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
141 // CHECK-ASM: vfchdbs
142 idx = vec_all_nle(vd, vd);
143 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
144 // CHECK-ASM: vfchedbs
145 idx = vec_all_nlt(vd, vd);
146 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
147 // CHECK-ASM: vfchdbs
149 idx = vec_all_nan(vd);
150 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
151 // CHECK-ASM: vftcidb
152 idx = vec_all_numeric(vd);
153 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
154 // CHECK-ASM: vftcidb
156 idx = vec_any_eq(vd, vd);
157 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
158 // CHECK-ASM: vfcedbs
160 idx = vec_any_ne(vd, vd);
161 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfcedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
162 // CHECK-ASM: vfcedbs
164 idx = vec_any_ge(vd, vd);
165 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
166 // CHECK-ASM: vfchedbs
168 idx = vec_any_gt(vd, vd);
169 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
170 // CHECK-ASM: vfchdbs
172 idx = vec_any_le(vd, vd);
173 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
174 // CHECK-ASM: vfchedbs
176 idx = vec_any_lt(vd, vd);
177 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
178 // CHECK-ASM: vfchdbs
180 idx = vec_any_nge(vd, vd);
181 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
182 // CHECK-ASM: vfchedbs
183 idx = vec_any_ngt(vd, vd);
184 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
185 // CHECK-ASM: vfchdbs
186 idx = vec_any_nle(vd, vd);
187 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchedbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
188 // CHECK-ASM: vfchedbs
189 idx = vec_any_nlt(vd, vd);
190 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vfchdbs(<2 x double> %{{.*}}, <2 x double> %{{.*}})
191 // CHECK-ASM: vfchdbs
193 idx = vec_any_nan(vd);
194 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
195 // CHECK-ASM: vftcidb
196 idx = vec_any_numeric(vd);
197 // CHECK: call { <2 x i64>, i32 } @llvm.s390.vftcidb(<2 x double> %{{.*}}, i32 15)
198 // CHECK-ASM: vftcidb
201 void test_float(void) {
202 // CHECK-ASM-LABEL: test_float
204 vd = vec_abs(vd);
205 // CHECK: call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
206 // CHECK-ASM: vflpdb
208 vd = vec_nabs(vd);
209 // CHECK: [[ABS:%[^ ]+]] = tail call <2 x double> @llvm.fabs.v2f64(<2 x double> %{{.*}})
210 // CHECK-NEXT: fneg <2 x double> [[ABS]]
211 // CHECK-ASM: vflndb
213 vd = vec_madd(vd, vd, vd);
214 // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> %{{.*}}, metadata !{{.*}})
215 // CHECK-ASM: vfmadb
216 vd = vec_msub(vd, vd, vd);
217 // CHECK: [[NEG:%[^ ]+]] = fneg <2 x double> %{{.*}}
218 // CHECK: call <2 x double> @llvm.experimental.constrained.fma.v2f64(<2 x double> %{{.*}}, <2 x double> %{{.*}}, <2 x double> [[NEG]], metadata !{{.*}})
219 // CHECK-ASM: vfmsdb
220 vd = vec_sqrt(vd);
221 // CHECK: call <2 x double> @llvm.experimental.constrained.sqrt.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
222 // CHECK-ASM: vfsqdb
224 vd = vec_ld2f(cptrf);
225 // CHECK: [[VAL:%[^ ]+]] = load <2 x float>, ptr %{{.*}}
226 // CHECK: call <2 x double> @llvm.experimental.constrained.fpext.v2f64.v2f32(<2 x float> [[VAL]], metadata !{{.*}})
227 // (emulated)
228 vec_st2f(vd, ptrf);
229 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x float> @llvm.experimental.constrained.fptrunc.v2f32.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
230 // CHECK: store <2 x float> [[VAL]], ptr %{{.*}}
231 // (emulated)
233 vd = vec_ctd(vsl, 0);
234 // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
235 // (emulated)
236 vd = vec_ctd(vul, 0);
237 // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
238 // (emulated)
239 vd = vec_ctd(vsl, 1);
240 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
241 // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> splat (double 5.000000e-01), metadata !{{.*}})
242 // (emulated)
243 vd = vec_ctd(vul, 1);
244 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
245 // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> splat (double 5.000000e-01), metadata !{{.*}})
246 // (emulated)
247 vd = vec_ctd(vsl, 31);
248 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
249 // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> splat (double 0x3E00000000000000), metadata !{{.*}})
250 // (emulated)
251 vd = vec_ctd(vul, 31);
252 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
253 // CHECK: call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> [[VAL]], <2 x double> splat (double 0x3E00000000000000), metadata !{{.*}})
254 // (emulated)
256 vsl = vec_ctsl(vd, 0);
257 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
258 // (emulated)
259 vul = vec_ctul(vd, 0);
260 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
261 // (emulated)
262 vsl = vec_ctsl(vd, 1);
263 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> {{.*}}, <2 x double> splat (double 2.000000e+00), metadata !{{.*}})
264 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
265 // (emulated)
266 vul = vec_ctul(vd, 1);
267 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> splat (double 2.000000e+00), metadata !{{.*}})
268 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
269 // (emulated)
270 vsl = vec_ctsl(vd, 31);
271 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> splat (double 0x41E0000000000000), metadata !{{.*}})
272 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
273 // (emulated)
274 vul = vec_ctul(vd, 31);
275 // CHECK: [[VAL:%[^ ]+]] = tail call <2 x double> @llvm.experimental.constrained.fmul.v2f64(<2 x double> %{{.*}}, <2 x double> splat (double 0x41E0000000000000), metadata !{{.*}})
276 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> [[VAL]], metadata !{{.*}})
277 // (emulated)
279 vd = vec_double(vsl);
280 // CHECK: call <2 x double> @llvm.experimental.constrained.sitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
281 // CHECK-ASM: vcdgb
282 vd = vec_double(vul);
283 // CHECK: call <2 x double> @llvm.experimental.constrained.uitofp.v2f64.v2i64(<2 x i64> %{{.*}}, metadata !{{.*}})
284 // CHECK-ASM: vcdlgb
286 vsl = vec_signed(vd);
287 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptosi.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
288 // CHECK-ASM: vcgdb
289 vul = vec_unsigned(vd);
290 // CHECK: call <2 x i64> @llvm.experimental.constrained.fptoui.v2i64.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
291 // CHECK-ASM: vclgdb
293 vd = vec_roundp(vd);
294 // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
295 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
296 vd = vec_ceil(vd);
297 // CHECK: call <2 x double> @llvm.experimental.constrained.ceil.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
298 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 6
299 vd = vec_roundm(vd);
300 // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
301 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
302 vd = vec_floor(vd);
303 // CHECK: call <2 x double> @llvm.experimental.constrained.floor.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
304 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 7
305 vd = vec_roundz(vd);
306 // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
307 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
308 vd = vec_trunc(vd);
309 // CHECK: call <2 x double> @llvm.experimental.constrained.trunc.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
310 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 5
311 vd = vec_roundc(vd);
312 // CHECK: call <2 x double> @llvm.experimental.constrained.nearbyint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
313 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 4, 0
314 vd = vec_rint(vd);
315 // CHECK: call <2 x double> @llvm.experimental.constrained.rint.v2f64(<2 x double> %{{.*}}, metadata !{{.*}})
316 // CHECK-ASM: vfidb %{{.*}}, %{{.*}}, 0, 0
317 vd = vec_round(vd);