1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +altivec -target-feature +isa-v207-instructions -target-feature +power8-vector -triple powerpc64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
3 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +altivec -target-feature +isa-v207-instructions -target-feature +power8-vector -triple powerpc64le-unknown-unknown -emit-llvm %s -o - | FileCheck %s -check-prefix=CHECK-LE
4 // RUN: not %clang_cc1 -target-feature +altivec -target-feature +vsx -triple powerpc64-unknown-unknown -emit-llvm %s -o - 2>&1 | FileCheck %s -check-prefix=CHECK-PPC
5 // Added -target-feature +vsx above to avoid errors about "vector double" and to
6 // generate the correct errors for functions that are only overloaded with VSX
7 // (vec_cmpge, vec_cmple). Without this option, there is only one overload so
14 unsigned long long ull
;
18 vector
signed char vsc
= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
19 vector
unsigned char vuc
= { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5 };
20 vector
bool char vbc
= { 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1 };
22 vector
signed short vss
= { 0, 1, 2, 3, 4, 5, 6, 7 };
23 vector
unsigned short vus
= { 0, 1, 2, 3, 4, 5, 6, 7 };
24 vector
bool short vbs
= { 1, 1, 0, 0, 0, 0, 1, 1 };
26 vector
signed int vsi
= { -1, 2, -3, 4 };
27 vector
unsigned int vui
= { 1, 2, 3, 4 };
28 vector
bool int vbi
= {0, -1, -1, 0};
30 vector
signed long long vsll
= { 1, 2 };
31 vector
unsigned long long vull
= { 1, 2 };
32 vector
bool long long vbll
= { 1, 0 };
34 vector
signed __int128 vsx
= { 1 };
35 vector
unsigned __int128 vux
= { 1 };
37 vector
float vfa
= { 1.e
-4f
, -132.23f
, -22.1, 32.00f
};
38 vector
float vfb
= { 1.e
-4f
, -132.23f
, -22.1, 32.00f
};
39 vector
double vda
= { 1.e
-11, -132.23e10
};
40 vector
double vdb
= { 1.e
-11, -132.23e10
};
44 signed long long res_sll
;
45 unsigned long long res_ull
;
47 vector
signed char res_vsc
;
48 vector
unsigned char res_vuc
;
49 vector
bool char res_vbc
;
51 vector
signed short res_vss
;
52 vector
unsigned short res_vus
;
53 vector
bool short res_vbs
;
55 vector
signed int res_vsi
;
56 vector
unsigned int res_vui
;
57 vector
bool int res_vbi
;
59 vector
signed long long res_vsll
;
60 vector
unsigned long long res_vull
;
61 vector
bool long long res_vbll
;
63 vector
signed __int128 res_vsx
;
64 vector
unsigned __int128 res_vux
;
69 // CHECK-LABEL: define{{.*}} void @test1
73 res_vsll
= vec_abs(vsll
);
74 // CHECK: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]*}}, <2 x i64>
75 // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vmaxsd(<2 x i64> %{{[0-9]*}}, <2 x i64>
76 // CHECK-PPC: error: call to 'vec_abs' is ambiguous
79 res_vsll
= vec_add(vsll
, vsll
);
80 // CHECK: add <2 x i64>
81 // CHECK-LE: add <2 x i64>
83 res_vull
= vec_add(vull
, vull
);
84 // CHECK: add <2 x i64>
85 // CHECK-LE: add <2 x i64>
87 res_vuc
= vec_add_u128(vuc
, vuc
);
88 // CHECK: add <1 x i128>
89 // CHECK-LE: add <1 x i128>
92 res_vsi
= vec_addc(vsi
, vsi
);
93 // CHECK: @llvm.ppc.altivec.vaddcuw
94 // CHECK-LE: @llvm.ppc.altivec.vaddcuw
96 res_vui
= vec_addc(vui
, vui
);
97 // CHECK: @llvm.ppc.altivec.vaddcuw
98 // CHECK-LE: @llvm.ppc.altivec.vaddcuw
100 res_vsx
= vec_addc(vsx
, vsx
);
101 // CHECK: @llvm.ppc.altivec.vaddcuq
102 // CHECK-LE: @llvm.ppc.altivec.vaddcuq
104 res_vux
= vec_addc(vux
, vux
);
105 // CHECK: @llvm.ppc.altivec.vaddcuq
106 // CHECK-LE: @llvm.ppc.altivec.vaddcuq
108 res_vuc
= vec_addc_u128(vuc
, vuc
);
109 // CHECK: @llvm.ppc.altivec.vaddcuq
110 // CHECK-LE: @llvm.ppc.altivec.vaddcuq
113 res_vsx
= vec_adde(vsx
, vsx
, vsx
);
114 // CHECK: @llvm.ppc.altivec.vaddeuqm
115 // CHECK-LE: @llvm.ppc.altivec.vaddeuqm
117 res_vux
= vec_adde(vux
, vux
, vux
);
118 // CHECK: @llvm.ppc.altivec.vaddeuqm
119 // CHECK-LE: @llvm.ppc.altivec.vaddeuqm
121 res_vuc
= vec_adde_u128(vuc
, vuc
, vuc
);
122 // CHECK: @llvm.ppc.altivec.vaddeuqm
123 // CHECK-LE: @llvm.ppc.altivec.vaddeuqm
126 res_vsx
= vec_addec(vsx
, vsx
, vsx
);
127 // CHECK: @llvm.ppc.altivec.vaddecuq
128 // CHECK-LE: @llvm.ppc.altivec.vaddecuq
130 res_vuc
= vec_addec_u128(vuc
, vuc
, vuc
);
131 // CHECK: @llvm.ppc.altivec.vaddecuq
132 // CHECK-LE: @llvm.ppc.altivec.vaddecuq
135 res_vbi
= vec_mergee(vbi
, vbi
);
136 // CHECK: @llvm.ppc.altivec.vperm
137 // CHECK-LE: @llvm.ppc.altivec.vperm
139 res_vsi
= vec_mergee(vsi
, vsi
);
140 // CHECK: @llvm.ppc.altivec.vperm
141 // CHECK-LE: @llvm.ppc.altivec.vperm
143 res_vui
= vec_mergee(vui
, vui
);
144 // CHECK: @llvm.ppc.altivec.vperm
145 // CHECK-LE: @llvm.ppc.altivec.vperm
146 // CHECK-PPC: error: call to undeclared function 'vec_mergee'
148 res_vbll
= vec_mergee(vbll
, vbll
);
149 // CHECK: @llvm.ppc.altivec.vperm
150 // CHECK-LE: @llvm.ppc.altivec.vperm
152 res_vsll
= vec_mergee(vsll
, vsll
);
153 // CHECK: @llvm.ppc.altivec.vperm
154 // CHECK-LE: @llvm.ppc.altivec.vperm
156 res_vull
= vec_mergee(vull
, vull
);
157 // CHECK: @llvm.ppc.altivec.vperm
158 // CHECK-LE: @llvm.ppc.altivec.vperm
160 res_vf
= vec_mergee(vfa
, vfa
);
161 // CHECK: @llvm.ppc.altivec.vperm
162 // CHECK-LE: @llvm.ppc.altivec.vperm
164 res_vd
= vec_mergee(vda
, vda
);
165 // CHECK: @llvm.ppc.altivec.vperm
166 // CHECK-LE: @llvm.ppc.altivec.vperm
169 res_vbi
= vec_mergeo(vbi
, vbi
);
170 // CHECK: @llvm.ppc.altivec.vperm
171 // CHECK-LE: @llvm.ppc.altivec.vperm
173 res_vsi
= vec_mergeo(vsi
, vsi
);
174 // CHECK: @llvm.ppc.altivec.vperm
175 // CHECK-LE: @llvm.ppc.altivec.vperm
177 res_vui
= vec_mergeo(vui
, vui
);
178 // CHECK: @llvm.ppc.altivec.vperm
179 // CHECK-LE: @llvm.ppc.altivec.vperm
180 // CHECK-PPC: error: call to undeclared function 'vec_mergeo'
183 res_vbll
= vec_cmpeq(vbll
, vbll
);
184 // CHECK: @llvm.ppc.altivec.vcmpequd
185 // CHECK-LE: @llvm.ppc.altivec.vcmpequd
187 res_vbll
= vec_cmpeq(vsll
, vsll
);
188 // CHECK: @llvm.ppc.altivec.vcmpequd
189 // CHECK-LE: @llvm.ppc.altivec.vcmpequd
191 res_vbll
= vec_cmpeq(vull
, vull
);
192 // CHECK: @llvm.ppc.altivec.vcmpequd
193 // CHECK-LE: @llvm.ppc.altivec.vcmpequd
196 res_vbll
= vec_cmpge(vsll
, vsll
);
197 // CHECK: @llvm.ppc.altivec.vcmpgtsd
198 // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
200 res_vbll
= vec_cmpge(vull
, vull
);
201 // CHECK: @llvm.ppc.altivec.vcmpgtud
202 // CHECK-LE: @llvm.ppc.altivec.vcmpgtud
205 res_vbll
= vec_cmple(vsll
, vsll
);
206 // CHECK: @llvm.ppc.altivec.vcmpgtsd
207 // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
209 res_vbll
= vec_cmple(vull
, vull
);
210 // CHECK: @llvm.ppc.altivec.vcmpgtud
211 // CHECK-LE: @llvm.ppc.altivec.vcmpgtud
214 res_vbll
= vec_cmpgt(vsll
, vsll
);
215 // CHECK: @llvm.ppc.altivec.vcmpgtsd
216 // CHECK-LE: @llvm.ppc.altivec.vcmpgtsd
218 res_vbll
= vec_cmpgt(vull
, vull
);
219 // CHECK: @llvm.ppc.altivec.vcmpgtud
220 // CHECK-LE: @llvm.ppc.altivec.vcmpgtud
223 res_vbll
= vec_cmplt(vsll
, vsll
);
224 // CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
225 // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtsd(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
227 res_vbll
= vec_cmplt(vull
, vull
);
228 // CHECK: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
229 // CHECK-LE: call <2 x i64> @llvm.ppc.altivec.vcmpgtud(<2 x i64> %{{[0-9]*}}, <2 x i64> %{{[0-9]*}})
232 res_vsc
= vec_eqv(vsc
, vsc
);
233 // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
234 // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
235 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
236 // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
237 // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
238 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
239 // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
240 // CHECK-PPC: error: assigning to
242 res_vbc
= vec_eqv(vbc
, vbc
);
243 // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
244 // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
245 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
246 // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
247 // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
248 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
249 // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
250 // CHECK-PPC: error: assigning to
252 res_vuc
= vec_eqv(vuc
, vuc
);
253 // CHECK: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
254 // CHECK: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
255 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
256 // CHECK-LE: [[T1:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
257 // CHECK-LE: [[T2:%.+]] = bitcast <16 x i8> {{.+}} to <4 x i32>
258 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
259 // CHECK-LE: bitcast <4 x i32> [[T3]] to <16 x i8>
260 // CHECK-PPC: error: assigning to
262 res_vss
= vec_eqv(vss
, vss
);
263 // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
264 // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
265 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
266 // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
267 // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
268 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
269 // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
270 // CHECK-PPC: error: assigning to
272 res_vbs
= vec_eqv(vbs
, vbs
);
273 // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
274 // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
275 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
276 // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
277 // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
278 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
279 // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
280 // CHECK-PPC: error: assigning to
282 res_vus
= vec_eqv(vus
, vus
);
283 // CHECK: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
284 // CHECK: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
285 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
286 // CHECK-LE: [[T1:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
287 // CHECK-LE: [[T2:%.+]] = bitcast <8 x i16> {{.+}} to <4 x i32>
288 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
289 // CHECK-LE: bitcast <4 x i32> [[T3]] to <8 x i16>
290 // CHECK-PPC: error: assigning to
292 res_vsi
= vec_eqv(vsi
, vsi
);
293 // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
294 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
295 // CHECK-PPC: error: assigning to
297 res_vbi
= vec_eqv(vbi
, vbi
);
298 // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
299 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
300 // CHECK-PPC: error: assigning to
302 res_vui
= vec_eqv(vui
, vui
);
303 // CHECK: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
304 // CHECK-LE: call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> {{.*}}, <4 x i32> {{.+}})
305 // CHECK-PPC: error: assigning to
307 res_vsll
= vec_eqv(vsll
, vsll
);
308 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
309 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
310 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
311 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
312 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
313 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
314 // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
315 // CHECK-PPC: error: assigning to
317 res_vbll
= vec_eqv(vbll
, vbll
);
318 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
319 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
320 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
321 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
322 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
323 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
324 // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
325 // CHECK-PPC: error: assigning to
327 res_vull
= vec_eqv(vull
, vull
);
328 // CHECK: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
329 // CHECK: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
330 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
331 // CHECK-LE: [[T1:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
332 // CHECK-LE: [[T2:%.+]] = bitcast <2 x i64> {{.+}} to <4 x i32>
333 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
334 // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x i64>
335 // CHECK-PPC: error: assigning to
337 res_vf
= vec_eqv(vfa
, vfa
);
338 // CHECK: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
339 // CHECK: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
340 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
341 // CHECK-LE: [[T1:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
342 // CHECK-LE: [[T2:%.+]] = bitcast <4 x float> {{.+}} to <4 x i32>
343 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
344 // CHECK-LE: bitcast <4 x i32> [[T3]] to <4 x float>
345 // CHECK-PPC: error: assigning to
347 res_vd
= vec_eqv(vda
, vda
);
348 // CHECK: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
349 // CHECK: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
350 // CHECK: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
351 // CHECK-LE: [[T1:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
352 // CHECK-LE: [[T2:%.+]] = bitcast <2 x double> {{.+}} to <4 x i32>
353 // CHECK-LE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.vsx.xxleqv(<4 x i32> [[T1]], <4 x i32> [[T2]])
354 // CHECK-LE: bitcast <4 x i32> [[T3]] to <2 x double>
355 // CHECK-PPC: error: assigning to
358 res_sll
= vec_extract(vsll
, si
);
359 // CHECK: extractelement <2 x i64>
360 // CHECK-LE: extractelement <2 x i64>
362 res_ull
= vec_extract(vull
, si
);
363 // CHECK: extractelement <2 x i64>
364 // CHECK-LE: extractelement <2 x i64>
366 res_ull
= vec_extract(vbll
, si
);
367 // CHECK: extractelement <2 x i64>
368 // CHECK-LE: extractelement <2 x i64>
370 res_d
= vec_extract(vda
, si
);
371 // CHECK: extractelement <2 x double>
372 // CHECK-LE: extractelement <2 x double>
375 res_vsll
= vec_insert(sll
, vsll
, si
);
376 // CHECK: insertelement <2 x i64>
377 // CHECK-LE: insertelement <2 x i64>
379 res_vbll
= vec_insert(ull
, vbll
, si
);
380 // CHECK: insertelement <2 x i64>
381 // CHECK-LE: insertelement <2 x i64>
383 res_vull
= vec_insert(ull
, vull
, si
);
384 // CHECK: insertelement <2 x i64>
385 // CHECK-LE: insertelement <2 x i64>
387 res_vd
= vec_insert(d
, vda
, si
);
388 // CHECK: insertelement <2 x double>
389 // CHECK-LE: insertelement <2 x double>
392 res_vsc
= vec_cntlz(vsc
);
393 // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
394 // CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
395 // CHECK-PPC: error: call to undeclared function 'vec_cntlz'
397 res_vuc
= vec_cntlz(vuc
);
398 // CHECK: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
399 // CHECK-LE: call <16 x i8> @llvm.ctlz.v16i8(<16 x i8> %{{.+}}, i1 false)
401 res_vss
= vec_cntlz(vss
);
402 // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
403 // CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
405 res_vus
= vec_cntlz(vus
);
406 // CHECK: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
407 // CHECK-LE: call <8 x i16> @llvm.ctlz.v8i16(<8 x i16> %{{.+}}, i1 false)
409 res_vsi
= vec_cntlz(vsi
);
410 // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
411 // CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
413 res_vui
= vec_cntlz(vui
);
414 // CHECK: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
415 // CHECK-LE: call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %{{.+}}, i1 false)
417 res_vsll
= vec_cntlz(vsll
);
418 // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
419 // CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
421 res_vull
= vec_cntlz(vull
);
422 // CHECK: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
423 // CHECK-LE: call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %{{.+}}, i1 false)
425 /* ----------------------- predicates --------------------------- */
426 res_i
= vec_all_eq(vda
, vda
);
427 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
428 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
430 res_i
= vec_all_eq(vfa
, vfa
);
431 // CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
432 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
437 res_i
= vec_all_ne(vda
, vda
);
438 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
439 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
444 res_i
= vec_all_ne(vfa
, vfa
);
445 // CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
446 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
451 res_i
= vec_all_nge(vda
, vda
);
452 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
453 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
455 res_i
= vec_all_ngt(vda
, vda
);
456 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
457 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
459 res_i
= vec_any_eq(vda
, vda
);
460 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
461 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
463 res_i
= vec_any_eq(vfa
, vfa
);
464 // CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
465 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
467 res_i
= vec_any_ne(vda
, vda
);
468 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
469 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
471 res_i
= vec_any_ne(vfa
, vfa
);
472 // CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
473 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
475 res_i
= vec_all_ge(vda
, vda
);
476 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
477 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
479 res_i
= vec_all_ge(vfa
, vfa
);
480 // CHECK: @llvm.ppc.vsx.xvcmpgesp.p
481 // CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
483 res_i
= vec_all_gt(vda
, vda
);
484 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
485 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
487 res_i
= vec_all_gt(vfa
, vfa
);
488 // CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
489 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
491 res_i
= vec_all_le(vda
, vda
);
492 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
493 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
495 res_i
= vec_all_le(vfa
, vfa
);
496 // CHECK: @llvm.ppc.vsx.xvcmpgesp.p
497 // CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
499 res_i
= vec_all_lt(vda
, vda
);
500 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
501 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
503 res_i
= vec_all_lt(vfa
, vfa
);
504 // CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
505 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
507 res_i
= vec_all_nan(vda
);
508 // CHECK: @llvm.ppc.vsx.xvcmpeqdp.p
509 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqdp.p
511 res_i
= vec_all_nan(vfa
);
512 // CHECK: @llvm.ppc.vsx.xvcmpeqsp.p
513 // CHECK-LE: @llvm.ppc.vsx.xvcmpeqsp.p
515 res_i
= vec_any_ge(vda
, vda
);
516 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
517 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
519 res_i
= vec_any_ge(vfa
, vfa
);
520 // CHECK: @llvm.ppc.vsx.xvcmpgesp.p
521 // CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
523 res_i
= vec_any_gt(vda
, vda
);
524 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
525 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
527 res_i
= vec_any_le(vda
, vda
);
528 // CHECK: @llvm.ppc.vsx.xvcmpgedp.p
529 // CHECK-LE: @llvm.ppc.vsx.xvcmpgedp.p
531 res_i
= vec_any_le(vfa
, vfa
);
532 // CHECK: @llvm.ppc.vsx.xvcmpgesp.p
533 // CHECK-LE: @llvm.ppc.vsx.xvcmpgesp.p
535 res_i
= vec_any_lt(vda
, vda
);
536 // CHECK: @llvm.ppc.vsx.xvcmpgtdp.p
537 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtdp.p
539 res_i
= vec_any_lt(vfa
, vfa
);
540 // CHECK: @llvm.ppc.vsx.xvcmpgtsp.p
541 // CHECK-LE: @llvm.ppc.vsx.xvcmpgtsp.p
544 res_vsll
= vec_max(vsll
, vsll
);
545 // CHECK: @llvm.ppc.altivec.vmaxsd
546 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
547 // CHECK-PPC: error: call to 'vec_max' is ambiguous
549 res_vsll
= vec_max(vbll
, vsll
);
550 // CHECK: @llvm.ppc.altivec.vmaxsd
551 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
552 // CHECK-PPC: error: call to 'vec_max' is ambiguous
554 res_vsll
= vec_max(vsll
, vbll
);
555 // CHECK: @llvm.ppc.altivec.vmaxsd
556 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
557 // CHECK-PPC: error: call to 'vec_max' is ambiguous
559 res_vull
= vec_max(vull
, vull
);
560 // CHECK: @llvm.ppc.altivec.vmaxud
561 // CHECK-LE: @llvm.ppc.altivec.vmaxud
562 // CHECK-PPC: error: call to 'vec_max' is ambiguous
564 res_vull
= vec_max(vbll
, vull
);
565 // CHECK: @llvm.ppc.altivec.vmaxud
566 // CHECK-LE: @llvm.ppc.altivec.vmaxud
567 // CHECK-PPC: error: call to 'vec_max' is ambiguous
569 res_vull
= vec_max(vull
, vbll
);
570 // CHECK: @llvm.ppc.altivec.vmaxud
571 // CHECK-LE: @llvm.ppc.altivec.vmaxud
572 // CHECK-PPC: error: call to 'vec_max' is ambiguous
575 res_vbll
= vec_mergeh(vbll
, vbll
);
576 // CHECK: @llvm.ppc.altivec.vperm
577 // CHECK-LE: @llvm.ppc.altivec.vperm
579 res_vbll
= vec_mergel(vbll
, vbll
);
580 // CHECK: @llvm.ppc.altivec.vperm
581 // CHECK-LE: @llvm.ppc.altivec.vperm
584 res_vsll
= vec_min(vsll
, vsll
);
585 // CHECK: @llvm.ppc.altivec.vminsd
586 // CHECK-LE: @llvm.ppc.altivec.vminsd
587 // CHECK-PPC: error: call to 'vec_min' is ambiguous
589 res_vsll
= vec_min(vbll
, vsll
);
590 // CHECK: @llvm.ppc.altivec.vminsd
591 // CHECK-LE: @llvm.ppc.altivec.vminsd
592 // CHECK-PPC: error: call to 'vec_min' is ambiguous
594 res_vsll
= vec_min(vsll
, vbll
);
595 // CHECK: @llvm.ppc.altivec.vminsd
596 // CHECK-LE: @llvm.ppc.altivec.vminsd
597 // CHECK-PPC: error: call to 'vec_min' is ambiguous
599 res_vull
= vec_min(vull
, vull
);
600 // CHECK: @llvm.ppc.altivec.vminud
601 // CHECK-LE: @llvm.ppc.altivec.vminud
602 // CHECK-PPC: error: call to 'vec_min' is ambiguous
604 res_vull
= vec_min(vbll
, vull
);
605 // CHECK: @llvm.ppc.altivec.vminud
606 // CHECK-LE: @llvm.ppc.altivec.vminud
607 // CHECK-PPC: error: call to 'vec_min' is ambiguous
609 res_vull
= vec_min(vull
, vbll
);
610 // CHECK: @llvm.ppc.altivec.vminud
611 // CHECK-LE: @llvm.ppc.altivec.vminud
612 // CHECK-PPC: error: call to 'vec_min' is ambiguous
615 res_vsll
= vec_mule(vsi
, vsi
);
616 // CHECK: @llvm.ppc.altivec.vmulesw
617 // CHECK-LE: @llvm.ppc.altivec.vmulosw
618 // CHECK-PPC: error: call to 'vec_mule' is ambiguous
620 res_vull
= vec_mule(vui
, vui
);
621 // CHECK: @llvm.ppc.altivec.vmuleuw
622 // CHECK-LE: @llvm.ppc.altivec.vmulouw
623 // CHECK-PPC: error: call to 'vec_mule' is ambiguous
626 res_vsll
= vec_mulo(vsi
, vsi
);
627 // CHECK: @llvm.ppc.altivec.vmulosw
628 // CHECK-LE: @llvm.ppc.altivec.vmulesw
629 // CHECK-PPC: error: call to 'vec_mulo' is ambiguous
631 res_vull
= vec_mulo(vui
, vui
);
632 // CHECK: @llvm.ppc.altivec.vmulouw
633 // CHECK-LE: @llvm.ppc.altivec.vmuleuw
634 // CHECK-PPC: error: call to 'vec_mulo' is ambiguous
637 res_vsi
= vec_packs(vsll
, vsll
);
638 // CHECK: @llvm.ppc.altivec.vpksdss
639 // CHECK-LE: @llvm.ppc.altivec.vpksdss
640 // CHECK-PPC: error: call to 'vec_packs' is ambiguous
642 res_vui
= vec_packs(vull
, vull
);
643 // CHECK: @llvm.ppc.altivec.vpkudus
644 // CHECK-LE: @llvm.ppc.altivec.vpkudus
645 // CHECK-PPC: error: call to 'vec_packs' is ambiguous
648 res_vui
= vec_packsu(vsll
, vsll
);
649 // CHECK: @llvm.ppc.altivec.vpksdus
650 // CHECK-LE: @llvm.ppc.altivec.vpksdus
651 // CHECK-PPC: error: call to 'vec_packsu' is ambiguous
653 res_vui
= vec_packsu(vull
, vull
);
654 // CHECK: @llvm.ppc.altivec.vpkudus
655 // CHECK-LE: @llvm.ppc.altivec.vpkudus
656 // CHECK-PPC: error: call to 'vec_packsu' is ambiguous
659 res_vsll
= vec_rl(vsll
, vull
);
660 // CHECK: @llvm.ppc.altivec.vrld
661 // CHECK-LE: @llvm.ppc.altivec.vrld
663 res_vull
= vec_rl(vull
, vull
);
664 // CHECK: @llvm.ppc.altivec.vrld
665 // CHECK-LE: @llvm.ppc.altivec.vrld
668 res_vsll
= vec_sl(vsll
, vull
);
669 // CHECK: shl <2 x i64>
670 // CHECK-LE: shl <2 x i64>
672 res_vull
= vec_sl(vull
, vull
);
673 // CHECK: shl <2 x i64>
674 // CHECK-LE: shl <2 x i64>
677 res_vsll
= vec_sr(vsll
, vull
);
678 // CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
679 // CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
680 // CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
681 // CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
683 res_vull
= vec_sr(vull
, vull
);
684 // CHECK: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
685 // CHECK: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
686 // CHECK-LE: [[UREM:[0-9a-zA-Z%.]+]] = urem <2 x i64> {{[0-9a-zA-Z%.]+}}, <i64 64, i64 64>
687 // CHECK-LE: lshr <2 x i64> {{[0-9a-zA-Z%.]+}}, [[UREM]]
690 res_vsll
= vec_sra(vsll
, vull
);
691 // CHECK: ashr <2 x i64>
692 // CHECK-LE: ashr <2 x i64>
694 res_vull
= vec_sra(vull
, vull
);
695 // CHECK: ashr <2 x i64>
696 // CHECK-LE: ashr <2 x i64>
699 res_vsll
= vec_splats(sll
);
700 // CHECK: insertelement <2 x i64>
701 // CHECK-LE: insertelement <2 x i64>
703 res_vull
= vec_splats(ull
);
704 // CHECK: insertelement <2 x i64>
705 // CHECK-LE: insertelement <2 x i64>
707 res_vsx
= vec_splats(sx
);
708 // CHECK: insertelement <1 x i128>
709 // CHECK-LE: insertelement <1 x i128>
711 res_vux
= vec_splats(ux
);
712 // CHECK: insertelement <1 x i128>
713 // CHECK-LE: insertelement <1 x i128>
715 res_vd
= vec_splats(d
);
716 // CHECK: insertelement <2 x double>
717 // CHECK-LE: insertelement <2 x double>
721 res_vsll
= vec_unpackh(vsi
);
722 // CHECK: llvm.ppc.altivec.vupkhsw
723 // CHECK-LE: llvm.ppc.altivec.vupklsw
724 // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
726 res_vbll
= vec_unpackh(vbi
);
727 // CHECK: llvm.ppc.altivec.vupkhsw
728 // CHECK-LE: llvm.ppc.altivec.vupklsw
729 // CHECK-PPC: error: call to 'vec_unpackh' is ambiguous
732 res_vsll
= vec_unpackl(vsi
);
733 // CHECK: llvm.ppc.altivec.vupklsw
734 // CHECK-LE: llvm.ppc.altivec.vupkhsw
735 // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
737 res_vbll
= vec_unpackl(vbi
);
738 // CHECK: llvm.ppc.altivec.vupklsw
739 // CHECK-LE: llvm.ppc.altivec.vupkhsw
740 // CHECK-PPC: error: call to 'vec_unpackl' is ambiguous
743 res_vsi
= vec_vpksdss(vsll
, vsll
);
744 // CHECK: llvm.ppc.altivec.vpksdss
745 // CHECK-LE: llvm.ppc.altivec.vpksdss
746 // CHECK-PPC: error: call to undeclared function 'vec_vpksdss'
749 res_vui
= vec_vpksdus(vsll
, vsll
);
750 // CHECK: llvm.ppc.altivec.vpksdus
751 // CHECK-LE: llvm.ppc.altivec.vpksdus
752 // CHECK-PPC: error: call to undeclared function 'vec_vpksdus'
755 res_vsi
= vec_vpkudum(vsll
, vsll
);
758 // CHECK-PPC: error: call to undeclared function 'vec_vpkudum'
760 res_vui
= vec_vpkudum(vull
, vull
);
764 res_vui
= vec_vpkudus(vull
, vull
);
765 // CHECK: llvm.ppc.altivec.vpkudus
766 // CHECK-LE: llvm.ppc.altivec.vpkudus
767 // CHECK-PPC: error: call to undeclared function 'vec_vpkudus'
770 res_vsll
= vec_vupkhsw(vsi
);
771 // CHECK: llvm.ppc.altivec.vupkhsw
772 // CHECK-LE: llvm.ppc.altivec.vupklsw
773 // CHECK-PPC: error: call to undeclared function 'vec_vupkhsw'
775 res_vbll
= vec_vupkhsw(vbi
);
776 // CHECK: llvm.ppc.altivec.vupkhsw
777 // CHECK-LE: llvm.ppc.altivec.vupklsw
780 res_vsll
= vec_vupklsw(vsi
);
781 // CHECK: llvm.ppc.altivec.vupklsw
782 // CHECK-LE: llvm.ppc.altivec.vupkhsw
783 // CHECK-PPC: error: call to undeclared function 'vec_vupklsw'
785 res_vbll
= vec_vupklsw(vbi
);
786 // CHECK: llvm.ppc.altivec.vupklsw
787 // CHECK-LE: llvm.ppc.altivec.vupkhsw
790 res_vsll
= vec_max(vsll
, vsll
);
791 // CHECK: @llvm.ppc.altivec.vmaxsd
792 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
794 res_vsll
= vec_max(vbll
, vsll
);
795 // CHECK: @llvm.ppc.altivec.vmaxsd
796 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
798 res_vsll
= vec_max(vsll
, vbll
);
799 // CHECK: @llvm.ppc.altivec.vmaxsd
800 // CHECK-LE: @llvm.ppc.altivec.vmaxsd
802 res_vull
= vec_max(vull
, vull
);
803 // CHECK: @llvm.ppc.altivec.vmaxud
804 // CHECK-LE: @llvm.ppc.altivec.vmaxud
806 res_vull
= vec_max(vbll
, vull
);
807 // CHECK: @llvm.ppc.altivec.vmaxud
808 // CHECK-LE: @llvm.ppc.altivec.vmaxud
811 res_vsll
= vec_min(vsll
, vsll
);
812 // CHECK: @llvm.ppc.altivec.vminsd
813 // CHECK-LE: @llvm.ppc.altivec.vminsd
815 res_vsll
= vec_min(vbll
, vsll
);
816 // CHECK: @llvm.ppc.altivec.vminsd
817 // CHECK-LE: @llvm.ppc.altivec.vminsd
819 res_vsll
= vec_min(vsll
, vbll
);
820 // CHECK: @llvm.ppc.altivec.vminsd
821 // CHECK-LE: @llvm.ppc.altivec.vminsd
823 res_vull
= vec_min(vull
, vull
);
824 // CHECK: @llvm.ppc.altivec.vminud
825 // CHECK-LE: @llvm.ppc.altivec.vminud
827 res_vull
= vec_min(vbll
, vull
);
828 // CHECK: @llvm.ppc.altivec.vminud
829 // CHECK-LE: @llvm.ppc.altivec.vminud
832 res_vsc
= vec_nand(vsc
, vsc
);
833 // CHECK: [[T1:%.+]] = and <16 x i8>
834 // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
835 // CHECK-LE: [[T1:%.+]] = and <16 x i8>
836 // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
837 // CHECK-PPC: error: call to undeclared function 'vec_nand'
839 res_vbc
= vec_nand(vbc
, vbc
);
840 // CHECK: [[T1:%.+]] = and <16 x i8>
841 // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
842 // CHECK-LE: [[T1:%.+]] = and <16 x i8>
843 // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
845 res_vuc
= vec_nand(vuc
, vuc
);
846 // CHECK: [[T1:%.+]] = and <16 x i8>
847 // CHECK: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
848 // CHECK-LE: [[T1:%.+]] = and <16 x i8>
849 // CHECK-LE: xor <16 x i8> [[T1]], <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
851 res_vss
= vec_nand(vss
, vss
);
852 // CHECK: [[T1:%.+]] = and <8 x i16>
853 // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
854 // CHECK-LE: [[T1:%.+]] = and <8 x i16>
855 // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
857 res_vbs
= vec_nand(vbs
, vbs
);
858 // CHECK: [[T1:%.+]] = and <8 x i16>
859 // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
860 // CHECK-LE: [[T1:%.+]] = and <8 x i16>
861 // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
863 res_vus
= vec_nand(vus
, vus
);
864 // CHECK: [[T1:%.+]] = and <8 x i16>
865 // CHECK: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
866 // CHECK-LE: [[T1:%.+]] = and <8 x i16>
867 // CHECK-LE: xor <8 x i16> [[T1]], <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
869 res_vsi
= vec_nand(vsi
, vsi
);
870 // CHECK: [[T1:%.+]] = and <4 x i32>
871 // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
872 // CHECK-LE: [[T1:%.+]] = and <4 x i32>
873 // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
875 res_vbi
= vec_nand(vbi
, vbi
);
876 // CHECK: [[T1:%.+]] = and <4 x i32>
877 // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
878 // CHECK-LE: [[T1:%.+]] = and <4 x i32>
879 // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
881 res_vui
= vec_nand(vui
, vui
);
882 // CHECK: [[T1:%.+]] = and <4 x i32>
883 // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
884 // CHECK-LE: [[T1:%.+]] = and <4 x i32>
885 // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
887 res_vf
= vec_nand(vfa
, vfa
);
888 // CHECK: [[T1:%.+]] = and <4 x i32>
889 // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
890 // CHECK-LE: [[T1:%.+]] = and <4 x i32>
891 // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
893 res_vsll
= vec_nand(vsll
, vsll
);
894 // CHECK: [[T1:%.+]] = and <2 x i64>
895 // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
896 // CHECK-LE: [[T1:%.+]] = and <2 x i64>
897 // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
899 res_vbll
= vec_nand(vbll
, vbll
);
900 // CHECK: [[T1:%.+]] = and <2 x i64>
901 // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
902 // CHECK-LE: [[T1:%.+]] = and <2 x i64>
903 // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
905 res_vull
= vec_nand(vull
, vull
);
906 // CHECK: [[T1:%.+]] = and <2 x i64>
907 // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
908 // CHECK-LE: [[T1:%.+]] = and <2 x i64>
909 // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
911 res_vd
= vec_nand(vda
, vda
);
912 // CHECK: [[T1:%.+]] = and <2 x i64>
913 // CHECK: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
914 // CHECK-LE: [[T1:%.+]] = and <2 x i64>
915 // CHECK-LE: xor <2 x i64> [[T1]], <i64 -1, i64 -1>
917 res_vf
= vec_nand(vfa
, vfa
);
918 // CHECK: [[T1:%.+]] = and <4 x i32>
919 // CHECK: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
920 // CHECK-LE: [[T1:%.+]] = and <4 x i32>
921 // CHECK-LE: xor <4 x i32> [[T1]], <i32 -1, i32 -1, i32 -1, i32 -1>
924 res_vsc
= vec_orc(vsc
, vsc
);
925 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
926 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
927 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
928 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
929 // CHECK-PPC: error: call to undeclared function 'vec_orc'
931 res_vsc
= vec_orc(vsc
, vbc
);
932 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
933 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
934 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
935 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
937 res_vsc
= vec_orc(vbc
, vsc
);
938 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
939 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
940 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
941 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
943 res_vuc
= vec_orc(vuc
, vuc
);
944 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
945 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
946 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
947 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
949 res_vuc
= vec_orc(vuc
, vbc
);
950 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
951 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
952 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
953 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
955 res_vuc
= vec_orc(vbc
, vuc
);
956 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
957 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
958 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
959 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
961 res_vbc
= vec_orc(vbc
, vbc
);
962 // CHECK: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
963 // CHECK: or <16 x i8> {{%.+}}, [[T1]]
964 // CHECK-LE: [[T1:%.+]] = xor <16 x i8> {{%.+}}, <i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1, i8 -1>
965 // CHECK-LE: or <16 x i8> {{%.+}}, [[T1]]
967 res_vss
= vec_orc(vss
, vss
);
968 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
969 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
970 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
971 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
973 res_vss
= vec_orc(vss
, vbs
);
974 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
975 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
976 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
977 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
979 res_vss
= vec_orc(vbs
, vss
);
980 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
981 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
982 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
983 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
985 res_vus
= vec_orc(vus
, vus
);
986 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
987 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
988 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
989 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
991 res_vus
= vec_orc(vus
, vbs
);
992 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
993 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
994 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
995 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
997 res_vus
= vec_orc(vbs
, vus
);
998 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
999 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
1000 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1001 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
1003 res_vbs
= vec_orc(vbs
, vbs
);
1004 // CHECK: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1005 // CHECK: or <8 x i16> {{%.+}}, [[T1]]
1006 // CHECK-LE: [[T1:%.+]] = xor <8 x i16> {{%.+}}, <i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
1007 // CHECK-LE: or <8 x i16> {{%.+}}, [[T1]]
1009 res_vsi
= vec_orc(vsi
, vsi
);
1010 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1011 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1012 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1013 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1015 res_vsi
= vec_orc(vsi
, vbi
);
1016 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1017 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1018 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1019 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1021 res_vsi
= vec_orc(vbi
, vsi
);
1022 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1023 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1024 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1025 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1027 res_vui
= vec_orc(vui
, vui
);
1028 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1029 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1030 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1031 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1033 res_vui
= vec_orc(vui
, vbi
);
1034 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1035 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1036 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1037 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1039 res_vui
= vec_orc(vbi
, vui
);
1040 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1041 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1042 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1043 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1045 res_vbi
= vec_orc(vbi
, vbi
);
1046 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1047 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1048 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1049 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1051 res_vf
= vec_orc(vbi
, vfa
);
1052 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1053 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1054 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1055 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1057 res_vf
= vec_orc(vfa
, vbi
);
1058 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1059 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1060 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1061 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1063 res_vf
= vec_orc(vfa
, vfb
);
1064 // CHECK: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1065 // CHECK: or <4 x i32> {{%.+}}, [[T1]]
1066 // CHECK-LE: [[T1:%.+]] = xor <4 x i32> {{%.+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1067 // CHECK-LE: or <4 x i32> {{%.+}}, [[T1]]
1069 res_vsll
= vec_orc(vsll
, vsll
);
1070 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1071 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1072 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1073 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1075 res_vsll
= vec_orc(vsll
, vbll
);
1076 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1077 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1078 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1079 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1081 res_vsll
= vec_orc(vbll
, vsll
);
1082 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1083 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1084 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1085 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1087 res_vull
= vec_orc(vull
, vull
);
1088 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1089 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1090 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1091 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1093 res_vull
= vec_orc(vull
, vbll
);
1094 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1095 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1096 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1097 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1099 res_vull
= vec_orc(vbll
, vull
);
1100 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1101 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1102 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1103 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1105 res_vbll
= vec_orc(vbll
, vbll
);
1106 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1107 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1108 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1109 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1111 res_vd
= vec_orc(vbll
, vda
);
1112 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1113 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1114 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1115 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1117 res_vd
= vec_orc(vda
, vbll
);
1118 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1119 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1120 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1121 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1123 res_vd
= vec_orc(vda
, vdb
);
1124 // CHECK: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1125 // CHECK: or <2 x i64> {{%.+}}, [[T1]]
1126 // CHECK-LE: [[T1:%.+]] = xor <2 x i64> {{%.+}}, <i64 -1, i64 -1>
1127 // CHECK-LE: or <2 x i64> {{%.+}}, [[T1]]
1130 res_vsll
= vec_sub(vsll
, vsll
);
1131 // CHECK: sub <2 x i64>
1132 // CHECK-LE: sub <2 x i64>
1134 res_vull
= vec_sub(vull
, vull
);
1135 // CHECK: sub <2 x i64>
1136 // CHECK-LE: sub <2 x i64>
1138 res_vd
= vec_sub(vda
, vda
);
1139 // CHECK: fsub <2 x double>
1140 // CHECK-LE: fsub <2 x double>
1142 res_vsx
= vec_sub(vsx
, vsx
);
1143 // CHECK: sub <1 x i128>
1144 // CHECK-LE: sub <1 x i128>
1146 res_vux
= vec_sub(vux
, vux
);
1147 // CHECK: sub <1 x i128>
1148 // CHECK-LE: sub <1 x i128>
1151 res_vsll
= vec_vbpermq(vsc
, vsc
);
1152 // CHECK: llvm.ppc.altivec.vbpermq
1153 // CHECK-LE: llvm.ppc.altivec.vbpermq
1155 res_vsll
= vec_vbpermq(vuc
, vuc
);
1156 // CHECK: llvm.ppc.altivec.vbpermq
1157 // CHECK-LE: llvm.ppc.altivec.vbpermq
1158 // CHECK-PPC: error: call to undeclared function 'vec_vbpermq'
1161 res_vsc
= vec_vgbbd(vsc
);
1162 // CHECK: llvm.ppc.altivec.vgbbd
1163 // CHECK-LE: llvm.ppc.altivec.vgbbd
1165 res_vuc
= vec_vgbbd(vuc
);
1166 // CHECK: llvm.ppc.altivec.vgbbd
1167 // CHECK-LE: llvm.ppc.altivec.vgbbd
1168 // CHECK-PPC: error: call to undeclared function 'vec_vgbbd'
1170 res_vuc
= vec_gb(vuc
);
1171 // CHECK: llvm.ppc.altivec.vgbbd
1172 // CHECK-LE: llvm.ppc.altivec.vgbbd
1173 // CHECK-PPC: error: call to undeclared function 'vec_gb'
1175 res_vsll
= vec_gbb(vsll
);
1176 // CHECK: llvm.ppc.altivec.vgbbd
1177 // CHECK-LE: llvm.ppc.altivec.vgbbd
1179 res_vull
= vec_gbb(vull
);
1180 // CHECK: llvm.ppc.altivec.vgbbd
1181 // CHECK-LE: llvm.ppc.altivec.vgbbd
1183 res_vull
= vec_bperm(vux
, vuc
);
1184 // CHECK: llvm.ppc.altivec.vbpermq
1185 // CHECK-LE: llvm.ppc.altivec.vbpermq
1187 res_vuc
= vec_bperm(vuc
, vuc
);
1188 // CHECK: llvm.ppc.altivec.vbpermq
1189 // CHECK-LE: llvm.ppc.altivec.vbpermq
1191 res_vsll
= vec_neg(vsll
);
1192 // CHECK: sub <2 x i64> zeroinitializer, {{%[0-9]+}}
1193 // CHECK-LE: sub <2 x i64> zeroinitializer, {{%[0-9]+}}
1194 // CHECK_PPC: call to 'vec_neg' is ambiguous
1200 vector
signed int test_vec_addec_signed (vector
signed int a
, vector
signed int b
, vector
signed int c
) {
1201 return vec_addec(a
, b
, c
);
1202 // CHECK-LABEL: @test_vec_addec_signed
1203 // CHECK: icmp slt i32 {{%[0-9]+}}, 4
1204 // CHECK: extractelement
1205 // CHECK: extractelement
1206 // CHECK: extractelement
1207 // CHECK: and i32 {{%[0-9]+}}, 1
1215 // CHECK: trunc i64 {{%[0-9]+}} to i32
1217 // CHECK: trunc i64 {{%[0-9]+}} to i32
1219 // CHECK: add nsw i32
1221 // CHECK: ret <4 x i32>
1226 vector
unsigned int test_vec_addec_unsigned (vector
unsigned int a
, vector
unsigned int b
, vector
unsigned int c
) {
1227 return vec_addec(a
, b
, c
);
1229 // CHECK-LABEL: @test_vec_addec_unsigned
1230 // CHECK: icmp slt i32 {{%[0-9]+}}, 4
1231 // CHECK: extractelement
1233 // CHECK: extractelement
1235 // CHECK: extractelement
1241 // CHECK: trunc i64 {{%[0-9]+}} to i32
1243 // CHECK: trunc i64 {{%[0-9]+}} to i32
1245 // CHECK: add nsw i32
1247 // CHECK: ret <4 x i32>
1250 vector
signed int test_vec_subec_signed (vector
signed int a
, vector
signed int b
, vector
signed int c
) {
1251 return vec_subec(a
, b
, c
);
1252 // CHECK-LABEL: @test_vec_subec_signed
1253 // CHECK: xor <4 x i32> {{%[0-9]+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1254 // CHECK: ret <4 x i32>
1257 vector
unsigned int test_vec_subec_unsigned (vector
unsigned int a
, vector
unsigned int b
, vector
unsigned int c
) {
1258 return vec_subec(a
, b
, c
);
1260 // CHECK-LABEL: @test_vec_subec_unsigned
1261 // CHECK: xor <4 x i32> {{%[0-9]+}}, <i32 -1, i32 -1, i32 -1, i32 -1>
1262 // CHECK: ret <4 x i32>
1265 int test_bcd_invalid(vector
unsigned char a
) {
1266 // CHECK-LABEL: test_bcd_invalid
1267 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 6, <16 x i8>
1268 // CHECK-LE-LABEL: test_bcd_invalid
1269 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 6, <16 x i8>
1270 return __bcd_invalid(a
);
1273 vector
unsigned char test_bcd_add(vector
unsigned char a
, vector
unsigned char b
,
1275 // CHECK-LABEL: test_bcd_add
1276 // CHECK: call <16 x i8> @llvm.ppc.bcdadd(<16 x i8>
1277 // CHECK-LE-LABEL: test_bcd_add
1278 // CHECK-LE: call <16 x i8> @llvm.ppc.bcdadd(<16 x i8>
1279 return __bcdadd(a
, b
, 1);
1282 int test_bcd_add_ofl(vector
unsigned char a
, vector
unsigned char b
, long ps
) {
1283 // CHECK-LABEL: test_bcd_add_ofl
1284 // CHECK: call i32 @llvm.ppc.bcdadd.p(i32 6, <16 x i8>
1285 // CHECK-LE-LABEL: test_bcd_add_ofl
1286 // CHECK-LE: call i32 @llvm.ppc.bcdadd.p(i32 6, <16 x i8>
1287 return __bcdadd_ofl(a
, b
);
1290 vector
unsigned char test_bcd_sub(vector
unsigned char a
, vector
unsigned char b
,
1292 // CHECK-LABEL: test_bcd_sub
1293 // CHECK: call <16 x i8> @llvm.ppc.bcdsub(<16 x i8>
1294 // CHECK-LE-LABEL: test_bcd_sub
1295 // CHECK-LE: call <16 x i8> @llvm.ppc.bcdsub(<16 x i8>
1296 return __bcdsub(a
, b
, 0);
1299 int test_bcd_sub_ofl(vector
unsigned char a
, vector
unsigned char b
, long ps
) {
1300 // CHECK-LABEL: test_bcd_sub_ofl
1301 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 6, <16 x i8>
1302 // CHECK-LE-LABEL: test_bcd_sub_ofl
1303 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 6, <16 x i8>
1304 return __bcdsub_ofl(a
, b
);
1307 int test_bcd_cmplt(vector
unsigned char a
, vector
unsigned char b
) {
1308 // CHECK-LABEL: test_bcd_cmplt
1309 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 2, <16 x i8>
1310 // CHECK-LE-LABEL: test_bcd_cmplt
1311 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 2, <16 x i8>
1312 return __bcdcmplt(a
, b
);
1315 int test_bcd_cmpgt(vector
unsigned char a
, vector
unsigned char b
) {
1316 // CHECK-LABEL: test_bcd_cmpgt
1317 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 4, <16 x i8>
1318 // CHECK-LE-LABEL: test_bcd_cmpgt
1319 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 4, <16 x i8>
1320 return __bcdcmpgt(a
, b
);
1323 int test_bcd_cmpeq(vector
unsigned char a
, vector
unsigned char b
) {
1324 // CHECK-LABEL: test_bcd_cmpeq
1325 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 0, <16 x i8>
1326 // CHECK-LE-LABEL: test_bcd_cmpeq
1327 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 0, <16 x i8>
1328 return __bcdcmpeq(a
, b
);
1331 int test_bcd_cmpge(vector
unsigned char a
, vector
unsigned char b
) {
1332 // CHECK-LABEL: test_bcd_cmpge
1333 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 3, <16 x i8>
1334 // CHECK-LE-LABEL: test_bcd_cmpge
1335 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 3, <16 x i8>
1336 return __bcdcmpge(a
, b
);
1339 int test_bcd_cmple(vector
unsigned char a
, vector
unsigned char b
) {
1340 // CHECK-LABEL: test_bcd_cmple
1341 // CHECK: call i32 @llvm.ppc.bcdsub.p(i32 5, <16 x i8>
1342 // CHECK-LE-LABEL: test_bcd_cmple
1343 // CHECK-LE: call i32 @llvm.ppc.bcdsub.p(i32 5, <16 x i8>
1344 return __bcdcmple(a
, b
);