[clang] Handle __declspec() attributes in using
[llvm-project.git] / clang / test / CodeGen / PowerPC / builtins-ppc-p10vector.c
blob484f83aceb89c4cc581022f90550f76e37deb1be
1 // REQUIRES: powerpc-registered-target
2 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +vsx \
3 // RUN: -target-cpu pwr10 -triple powerpc64-unknown-unknown -emit-llvm %s \
4 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-BE,CHECK
5 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +vsx \
6 // RUN: -target-cpu pwr10 -triple powerpc64le-unknown-unknown -emit-llvm %s \
7 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-LE,CHECK
8 // RUN: %clang_cc1 -flax-vector-conversions=none -target-feature +vsx \
9 // RUN: -target-cpu pwr10 -triple powerpc64-ibm-aix-xcoff -emit-llvm %s \
10 // RUN: -o - | FileCheck %s -check-prefixes=CHECK-BE,CHECK
12 #include <altivec.h>
14 vector signed __int128 vi128a;
15 vector signed char vsca, vscb;
16 vector unsigned char vuca, vucb, vucc;
17 vector signed short vssa, vssb;
18 vector unsigned short vusa, vusb, vusc;
19 vector signed int vsia, vsib;
20 vector unsigned int vuia, vuib, vuic;
21 vector signed long long vslla, vsllb;
22 vector unsigned long long vulla, vullb, vullc;
23 vector signed __int128 vsi128a, vsi128b, vsi128c;
24 vector unsigned __int128 vui128a, vui128b, vui128c;
25 vector bool __int128 vbi128a, vbi128b;
26 vector float vfa, vfb;
27 vector double vda, vdb;
28 float fa;
29 double da;
30 signed int sia;
31 signed int *iap;
32 unsigned int uia, uib, *uiap;
33 signed char *cap;
34 unsigned char uca;
35 const unsigned char *ucap;
36 const signed short *sap;
37 unsigned short usa;
38 const unsigned short *usap;
39 const signed long long *llap;
40 signed long long llb;
41 unsigned long long ulla;
42 const unsigned long long *ullap;
44 vector signed long long test_vec_mul_sll(void) {
45 // CHECK: mul <2 x i64>
46 // CHECK-NEXT: ret <2 x i64>
47 return vec_mul(vslla, vsllb);
50 vector unsigned long long test_vec_mul_ull(void) {
51 // CHECK: mul <2 x i64>
52 // CHECK-NEXT: ret <2 x i64>
53 return vec_mul(vulla, vullb);
56 vector signed int test_vec_div_si(void) {
57 // CHECK: sdiv <4 x i32>
58 // CHECK-NEXT: ret <4 x i32>
59 return vec_div(vsia, vsib);
62 vector unsigned int test_vec_div_ui(void) {
63 // CHECK: udiv <4 x i32>
64 // CHECK-NEXT: ret <4 x i32>
65 return vec_div(vuia, vuib);
68 vector signed long long test_vec_div_sll(void) {
69 // CHECK: sdiv <2 x i64>
70 // CHECK-NEXT: ret <2 x i64>
71 return vec_div(vslla, vsllb);
74 vector unsigned long long test_vec_div_ull(void) {
75 // CHECK: udiv <2 x i64>
76 // CHECK-NEXT: ret <2 x i64>
77 return vec_div(vulla, vullb);
80 vector unsigned __int128 test_vec_div_u128(void) {
81 // CHECK: udiv <1 x i128>
82 // CHECK-NEXT: ret <1 x i128>
83 return vec_div(vui128a, vui128b);
86 vector signed __int128 test_vec_div_s128(void) {
87 // CHECK: sdiv <1 x i128>
88 // CHECK-NEXT: ret <1 x i128>
89 return vec_div(vsi128a, vsi128b);
92 vector signed int test_vec_dive_si(void) {
93 // CHECK: @llvm.ppc.altivec.vdivesw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
94 // CHECK-NEXT: ret <4 x i32>
95 return vec_dive(vsia, vsib);
98 vector unsigned int test_vec_dive_ui(void) {
99 // CHECK: @llvm.ppc.altivec.vdiveuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
100 // CHECK-NEXT: ret <4 x i32>
101 return vec_dive(vuia, vuib);
104 vector signed long long test_vec_dive_sll(void) {
105 // CHECK: @llvm.ppc.altivec.vdivesd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
106 // CHECK-NEXT: ret <2 x i64>
107 return vec_dive(vslla, vsllb);
110 vector unsigned long long test_vec_dive_ull(void) {
111 // CHECK: @llvm.ppc.altivec.vdiveud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
112 // CHECK-NEXT: ret <2 x i64>
113 return vec_dive(vulla, vullb);
116 vector unsigned __int128 test_vec_dive_u128(void) {
117 // CHECK: @llvm.ppc.altivec.vdiveuq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
118 // CHECK-NEXT: ret <1 x i128>
119 return vec_dive(vui128a, vui128b);
122 vector signed __int128 test_vec_dive_s128(void) {
123 // CHECK: @llvm.ppc.altivec.vdivesq(<1 x i128> %{{.+}}, <1 x i128> %{{.+}})
124 // CHECK-NEXT: ret <1 x i128>
125 return vec_dive(vsi128a, vsi128b);
128 vector signed int test_vec_mulh_si(void) {
129 // CHECK: @llvm.ppc.altivec.vmulhsw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
130 // CHECK-NEXT: ret <4 x i32>
131 return vec_mulh(vsia, vsib);
134 vector unsigned int test_vec_mulh_ui(void) {
135 // CHECK: @llvm.ppc.altivec.vmulhuw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}})
136 // CHECK-NEXT: ret <4 x i32>
137 return vec_mulh(vuia, vuib);
140 vector signed long long test_vec_mulh_sll(void) {
141 // CHECK: @llvm.ppc.altivec.vmulhsd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
142 // CHECK-NEXT: ret <2 x i64>
143 return vec_mulh(vslla, vsllb);
146 vector unsigned long long test_vec_mulh_ull(void) {
147 // CHECK: @llvm.ppc.altivec.vmulhud(<2 x i64> %{{.+}}, <2 x i64> %{{.+}})
148 // CHECK-NEXT: ret <2 x i64>
149 return vec_mulh(vulla, vullb);
152 vector signed int test_vec_mod_si(void) {
153 // CHECK: srem <4 x i32>
154 // CHECK-NEXT: ret <4 x i32>
155 return vec_mod(vsia, vsib);
158 vector unsigned int test_vec_mod_ui(void) {
159 // CHECK: urem <4 x i32>
160 // CHECK-NEXT: ret <4 x i32>
161 return vec_mod(vuia, vuib);
164 vector signed long long test_vec_mod_sll(void) {
165 // CHECK: srem <2 x i64>
166 // CHECK-NEXT: ret <2 x i64>
167 return vec_mod(vslla, vsllb);
170 vector unsigned long long test_vec_mod_ull(void) {
171 // CHECK: urem <2 x i64>
172 // CHECK-NEXT: ret <2 x i64>
173 return vec_mod(vulla, vullb);
176 vector unsigned char test_xvcvspbf16(vector unsigned char vc) {
177 // CHECK-LABEL: @test_xvcvspbf16(
178 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvspbf16(<16 x i8> [[VC:%.*]])
179 return __builtin_vsx_xvcvspbf16(vc);
182 vector unsigned char test_xvcvbf16spn(vector unsigned char vc) {
183 // CHECK-LABEL: @test_xvcvbf16spn(
184 // CHECK: [[TMP0:%.*]] = call <16 x i8> @llvm.ppc.vsx.xvcvbf16spn(<16 x i8> [[VC:%.*]])
185 return __builtin_vsx_xvcvbf16spn(vc);
188 vector unsigned long long test_vpdepd(void) {
189 // CHECK: @llvm.ppc.altivec.vpdepd(<2 x i64>
190 // CHECK-NEXT: ret <2 x i64>
191 return vec_pdep(vulla, vullb);
194 vector unsigned long long test_vpextd(void) {
195 // CHECK: @llvm.ppc.altivec.vpextd(<2 x i64>
196 // CHECK-NEXT: ret <2 x i64>
197 return vec_pext(vulla, vullb);
200 vector unsigned char test_vec_stril_uc(void) {
201 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
202 // CHECK-BE-NEXT: ret <16 x i8>
203 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
204 // CHECK-LE-NEXT: ret <16 x i8>
205 return vec_stril(vuca);
208 vector signed char test_vec_stril_sc(void) {
209 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
210 // CHECK-BE-NEXT: ret <16 x i8>
211 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
212 // CHECK-LE-NEXT: ret <16 x i8>
213 return vec_stril(vsca);
216 vector unsigned short test_vec_stril_us(void) {
217 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
218 // CHECK-BE-NEXT: ret <8 x i16>
219 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
220 // CHECK-LE-NEXT: ret <8 x i16>
221 return vec_stril(vusa);
224 vector signed short test_vec_stril_ss(void) {
225 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
226 // CHECK-BE-NEXT: ret <8 x i16>
227 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
228 // CHECK-LE-NEXT: ret <8 x i16>
229 return vec_stril(vssa);
232 int test_vec_stril_p_uc(void) {
233 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
234 // CHECK-BE-NEXT: ret i32
235 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
236 // CHECK-LE-NEXT: ret i32
237 return vec_stril_p(vuca);
240 int test_vec_stril_p_sc(void) {
241 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
242 // CHECK-BE-NEXT: ret i32
243 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
244 // CHECK-LE-NEXT: ret i32
245 return vec_stril_p(vsca);
248 int test_vec_stril_p_us(void) {
249 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
250 // CHECK-BE-NEXT: ret i32
251 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
252 // CHECK-LE-NEXT: ret i32
253 return vec_stril_p(vusa);
256 int test_vec_stril_p_ss(void) {
257 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
258 // CHECK-BE-NEXT: ret i32
259 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
260 // CHECK-LE-NEXT: ret i32
261 return vec_stril_p(vssa);
264 vector unsigned char test_vec_stril_p_uc_2(vector unsigned char *ptr, int len) {
265 // CHECK-BE: icmp slt i32
266 // CHECK-BE: br i1
267 // CHECK-BE: for.body:
268 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
269 // CHECK-BE: if.then:
270 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
271 // CHECK-BE: ret <16 x i8>
272 // CHECK-LE: icmp slt i32
273 // CHECK-LE: br i1
274 // CHECK-LE: for.body:
275 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
276 // CHECK-LE: if.then:
277 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
278 // CHECK-LE: ret <16 x i8>
279 for (int i = 0; i < len; i++) {
280 if (vec_stril_p(*(ptr + i))) {
281 return vec_stril(*(ptr + i));
284 return vec_stril(*(ptr));
287 vector signed char test_vec_stril_p_sc_2(vector signed char *ptr, int len) {
288 // CHECK-BE: icmp slt i32
289 // CHECK-BE: br i1
290 // CHECK-BE: for.body:
291 // CHECK-BE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
292 // CHECK-BE: if.then:
293 // CHECK-BE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
294 // CHECK-BE: ret <16 x i8>
295 // CHECK-LE: icmp slt i32
296 // CHECK-LE: br i1
297 // CHECK-LE: for.body:
298 // CHECK-LE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
299 // CHECK-LE: if.then:
300 // CHECK-LE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
301 // CHECK-LE: ret <16 x i8>
302 for (int i = 0; i < len; i++) {
303 if (vec_stril_p(*(ptr + i))) {
304 return vec_stril(*(ptr + i));
307 return vec_stril(*(ptr));
310 vector unsigned short test_vec_stril_p_us_2(vector unsigned short *ptr, int len) {
311 // CHECK-BE: icmp slt i32
312 // CHECK-BE: br i1
313 // CHECK-BE: for.body:
314 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
315 // CHECK-BE: if.then:
316 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
317 // CHECK-BE: ret <8 x i16>
318 // CHECK-LE: icmp slt i32
319 // CHECK-LE: br i1
320 // CHECK-LE: for.body:
321 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
322 // CHECK-LE: if.then:
323 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
324 // CHECK-LE: ret <8 x i16>
325 for (int i = 0; i < len; i++) {
326 if (vec_stril_p(*(ptr + i))) {
327 return vec_stril(*(ptr + i));
330 return vec_stril(*(ptr));
333 vector signed short test_vec_stril_p_ss_2(vector signed short *ptr, int len) {
334 // CHECK-BE: icmp slt i32
335 // CHECK-BE: br i1
336 // CHECK-BE: for.body:
337 // CHECK-BE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
338 // CHECK-BE: if.then:
339 // CHECK-BE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
340 // CHECK-BE: ret <8 x i16>
341 // CHECK-LE: icmp slt i32
342 // CHECK-LE: br i1
343 // CHECK-LE: for.body:
344 // CHECK-LE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
345 // CHECK-LE: if.then:
346 // CHECK-LE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
347 // CHECK-LE: ret <8 x i16>
348 for (int i = 0; i < len; i++) {
349 if (vec_stril_p(*(ptr + i))) {
350 return vec_stril(*(ptr + i));
353 return vec_stril(*(ptr));
356 vector unsigned char test_vec_strir_uc(void) {
357 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
358 // CHECK-BE-NEXT: ret <16 x i8>
359 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
360 // CHECK-LE-NEXT: ret <16 x i8>
361 return vec_strir(vuca);
364 vector signed char test_vec_strir_sc(void) {
365 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
366 // CHECK-BE-NEXT: ret <16 x i8>
367 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
368 // CHECK-LE-NEXT: ret <16 x i8>
369 return vec_strir(vsca);
372 vector unsigned short test_vec_strir_us(void) {
373 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
374 // CHECK-BE-NEXT: ret <8 x i16>
375 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
376 // CHECK-LE-NEXT: ret <8 x i16>
377 return vec_strir(vusa);
380 vector signed short test_vec_strir_ss(void) {
381 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
382 // CHECK-BE-NEXT: ret <8 x i16>
383 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
384 // CHECK-LE-NEXT: ret <8 x i16>
385 return vec_strir(vssa);
388 int test_vec_strir_p_uc(void) {
389 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
390 // CHECK-BE-NEXT: ret i32
391 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
392 // CHECK-LE-NEXT: ret i32
393 return vec_strir_p(vuca);
396 int test_vec_strir_p_sc(void) {
397 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
398 // CHECK-BE-NEXT: ret i32
399 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
400 // CHECK-LE-NEXT: ret i32
401 return vec_strir_p(vsca);
404 int test_vec_strir_p_us(void) {
405 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
406 // CHECK-BE-NEXT: ret i32
407 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
408 // CHECK-LE-NEXT: ret i32
409 return vec_strir_p(vusa);
412 int test_vec_strir_p_ss(void) {
413 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
414 // CHECK-BE-NEXT: ret i32
415 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
416 // CHECK-LE-NEXT: ret i32
417 return vec_strir_p(vssa);
420 vector unsigned char test_vec_strir_p_uc_2(vector unsigned char *ptr, int len) {
421 // CHECK-BE: icmp slt i32
422 // CHECK-BE: br i1
423 // CHECK-BE: for.body:
424 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
425 // CHECK-BE: if.then:
426 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
427 // CHECK-BE: ret <16 x i8>
428 // CHECK-LE: icmp slt i32
429 // CHECK-LE: br i1
430 // CHECK-LE: for.body:
431 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
432 // CHECK-LE: if.then:
433 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
434 // CHECK-LE: ret <16 x i8>
435 for (int i = 0; i < len; i++) {
436 if (vec_strir_p(*(ptr + i))) {
437 return vec_strir(*(ptr + i));
440 return vec_strir(*(ptr));
443 vector signed char test_vec_strir_p_sc_2(vector signed char *ptr, int len) {
444 // CHECK-BE: icmp slt i32
445 // CHECK-BE: br i1
446 // CHECK-BE: for.body:
447 // CHECK-BE: @llvm.ppc.altivec.vstribr.p(i32 0, <16 x i8> %{{.+}})
448 // CHECK-BE: if.then:
449 // CHECK-BE: @llvm.ppc.altivec.vstribr(<16 x i8> %{{.+}})
450 // CHECK-BE: ret <16 x i8>
451 // CHECK-LE: icmp slt i32
452 // CHECK-LE: br i1
453 // CHECK-LE: for.body:
454 // CHECK-LE: @llvm.ppc.altivec.vstribl.p(i32 0, <16 x i8> %{{.+}})
455 // CHECK-LE: if.then:
456 // CHECK-LE: @llvm.ppc.altivec.vstribl(<16 x i8> %{{.+}})
457 // CHECK-LE: ret <16 x i8>
458 for (int i = 0; i < len; i++) {
459 if (vec_strir_p(*(ptr + i))) {
460 return vec_strir(*(ptr + i));
463 return vec_strir(*(ptr));
466 vector unsigned short test_vec_strir_p_us_2(vector unsigned short *ptr, int len) {
467 // CHECK-BE: icmp slt i32
468 // CHECK-BE: br i1
469 // CHECK-BE: for.body:
470 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
471 // CHECK-BE: if.then:
472 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
473 // CHECK-BE: ret <8 x i16>
474 // CHECK-LE: icmp slt i32
475 // CHECK-LE: br i1
476 // CHECK-LE: for.body:
477 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
478 // CHECK-LE: if.then:
479 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
480 // CHECK-LE: ret <8 x i16>
481 for (int i = 0; i < len; i++) {
482 if (vec_strir_p(*(ptr + i))) {
483 return vec_strir(*(ptr + i));
486 return vec_strir(*(ptr));
489 vector signed short test_vec_strir_p_ss_2(vector signed short *ptr, int len) {
490 // CHECK-BE: icmp slt i32
491 // CHECK-BE: br i1
492 // CHECK-BE: for.body:
493 // CHECK-BE: @llvm.ppc.altivec.vstrihr.p(i32 0, <8 x i16> %{{.+}})
494 // CHECK-BE: if.then:
495 // CHECK-BE: @llvm.ppc.altivec.vstrihr(<8 x i16> %{{.+}})
496 // CHECK-BE: ret <8 x i16>
497 // CHECK-LE: icmp slt i32
498 // CHECK-LE: br i1
499 // CHECK-LE: for.body:
500 // CHECK-LE: @llvm.ppc.altivec.vstrihl.p(i32 0, <8 x i16> %{{.+}})
501 // CHECK-LE: if.then:
502 // CHECK-LE: @llvm.ppc.altivec.vstrihl(<8 x i16> %{{.+}})
503 // CHECK-LE: ret <8 x i16>
504 for (int i = 0; i < len; i++) {
505 if (vec_strir_p(*(ptr + i))) {
506 return vec_strir(*(ptr + i));
509 return vec_strir(*(ptr));
512 unsigned int test_vec_extractm_uc(void) {
513 // CHECK: @llvm.ppc.altivec.vextractbm(<16 x i8> %{{.+}})
514 // CHECK-NEXT: ret i32
515 return vec_extractm(vuca);
518 unsigned int test_vec_extractm_us(void) {
519 // CHECK: @llvm.ppc.altivec.vextracthm(<8 x i16> %{{.+}})
520 // CHECK-NEXT: ret i32
521 return vec_extractm(vusa);
524 unsigned int test_vec_extractm_ui(void) {
525 // CHECK: @llvm.ppc.altivec.vextractwm(<4 x i32> %{{.+}})
526 // CHECK-NEXT: ret i32
527 return vec_extractm(vuia);
530 unsigned int test_vec_extractm_ull(void) {
531 // CHECK: @llvm.ppc.altivec.vextractdm(<2 x i64> %{{.+}})
532 // CHECK-NEXT: ret i32
533 return vec_extractm(vulla);
536 unsigned int test_vec_extractm_u128(void) {
537 // CHECK: @llvm.ppc.altivec.vextractqm(<1 x i128> %{{.+}})
538 // CHECK-NEXT: ret i32
539 return vec_extractm(vui128a);
542 vector unsigned long long test_vcfuged(void) {
543 // CHECK: @llvm.ppc.altivec.vcfuged(<2 x i64>
544 // CHECK-NEXT: ret <2 x i64>
545 return vec_cfuge(vulla, vullb);
548 vector unsigned char test_vec_expandm_uc(void) {
549 // CHECK: @llvm.ppc.altivec.vexpandbm(<16 x i8> %{{.+}})
550 // CHECK-NEXT: ret <16 x i8>
551 return vec_expandm(vuca);
554 vector unsigned short test_vec_expandm_us(void) {
555 // CHECK: @llvm.ppc.altivec.vexpandhm(<8 x i16> %{{.+}})
556 // CHECK-NEXT: ret <8 x i16>
557 return vec_expandm(vusa);
560 vector unsigned int test_vec_expandm_ui(void) {
561 // CHECK: @llvm.ppc.altivec.vexpandwm(<4 x i32> %{{.+}})
562 // CHECK-NEXT: ret <4 x i32>
563 return vec_expandm(vuia);
566 vector unsigned long long test_vec_expandm_ull(void) {
567 // CHECK: @llvm.ppc.altivec.vexpanddm(<2 x i64> %{{.+}})
568 // CHECK-NEXT: ret <2 x i64>
569 return vec_expandm(vulla);
572 vector unsigned __int128 test_vec_expandm_u128(void) {
573 // CHECK: @llvm.ppc.altivec.vexpandqm(<1 x i128> %{{.+}})
574 // CHECK-NEXT: ret <1 x i128>
575 return vec_expandm(vui128a);
578 unsigned long long test_vec_cntm_uc(void) {
579 // CHECK: @llvm.ppc.altivec.vcntmbb(<16 x i8> %{{.+}}, i32
580 // CHECK-NEXT: ret i64
581 return vec_cntm(vuca, 1);
584 unsigned long long test_vec_cntm_us(void) {
585 // CHECK: @llvm.ppc.altivec.vcntmbh(<8 x i16> %{{.+}}, i32
586 // CHECK-NEXT: ret i64
587 return vec_cntm(vusa, 0);
590 unsigned long long test_vec_cntm_ui(void) {
591 // CHECK: @llvm.ppc.altivec.vcntmbw(<4 x i32> %{{.+}}, i32
592 // CHECK-NEXT: ret i64
593 return vec_cntm(vuia, 1);
596 unsigned long long test_vec_cntm_ull(void) {
597 // CHECK: @llvm.ppc.altivec.vcntmbd(<2 x i64> %{{.+}}, i32
598 // CHECK-NEXT: ret i64
599 return vec_cntm(vulla, 0);
602 vector unsigned char test_vec_genbm(void) {
603 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
604 // CHECK-NEXT: ret <16 x i8>
605 return vec_genbm(ulla);
608 vector unsigned char test_vec_genbm_imm(void) {
609 // CHECK: store i64 1
610 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
611 // CHECK-NEXT: ret <16 x i8>
612 return vec_genbm(1);
615 vector unsigned char test_vec_genbm_imm2(void) {
616 // CHECK: store i64 255
617 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
618 // CHECK-NEXT: ret <16 x i8>
619 return vec_genbm(255);
622 vector unsigned char test_vec_genbm_imm3(void) {
623 // CHECK: store i64 65535
624 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
625 // CHECK-NEXT: ret <16 x i8>
626 return vec_genbm(65535);
629 vector unsigned char test_vec_genbm_imm4(void) {
630 // CHECK: store i64 65536
631 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
632 // CHECK-NEXT: ret <16 x i8>
633 return vec_genbm(65536);
636 vector unsigned char test_vec_genbm_imm5(void) {
637 // CHECK: store i64 65546
638 // CHECK: @llvm.ppc.altivec.mtvsrbm(i64 %{{.+}})
639 // CHECK-NEXT: ret <16 x i8>
640 return vec_genbm(65546);
643 vector unsigned short test_vec_genhm(void) {
644 // CHECK: @llvm.ppc.altivec.mtvsrhm(i64 %{{.+}})
645 // CHECK-NEXT: ret <8 x i16>
646 return vec_genhm(ulla);
649 vector unsigned int test_vec_genwm(void) {
650 // CHECK: @llvm.ppc.altivec.mtvsrwm(i64 %{{.+}})
651 // CHECK-NEXT: ret <4 x i32>
652 return vec_genwm(ulla);
655 vector unsigned long long test_vec_gendm(void) {
656 // CHECK: @llvm.ppc.altivec.mtvsrdm(i64 %{{.+}})
657 // CHECK-NEXT: ret <2 x i64>
658 return vec_gendm(ulla);
661 vector unsigned __int128 test_vec_genqm(void) {
662 // CHECK: @llvm.ppc.altivec.mtvsrqm(i64 %{{.+}})
663 // CHECK-NEXT: ret <1 x i128>
664 return vec_genqm(ulla);
667 unsigned long long test_vgnb_1(void) {
668 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 2)
669 // CHECK-NEXT: ret i64
670 return vec_gnb(vui128a, 2);
673 unsigned long long test_vgnb_2(void) {
674 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 7)
675 // CHECK-NEXT: ret i64
676 return vec_gnb(vui128a, 7);
679 unsigned long long test_vgnb_3(void) {
680 // CHECK: @llvm.ppc.altivec.vgnb(<1 x i128> %{{.+}}, i32 5)
681 // CHECK-NEXT: ret i64
682 return vec_gnb(vui128a, 5);
685 vector unsigned char test_xxeval_uc(void) {
686 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 0)
687 // CHECK: ret <16 x i8>
688 return vec_ternarylogic(vuca, vucb, vucc, 0);
691 vector unsigned short test_xxeval_us(void) {
692 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 255)
693 // CHECK: ret <8 x i16>
694 return vec_ternarylogic(vusa, vusb, vusc, 255);
697 vector unsigned int test_xxeval_ui(void) {
698 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 150)
699 // CHECK: ret <4 x i32>
700 return vec_ternarylogic(vuia, vuib, vuic, 150);
703 vector unsigned long long test_xxeval_ull(void) {
704 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 1)
705 // CHECK: ret <2 x i64>
706 return vec_ternarylogic(vulla, vullb, vullc, 1);
709 vector unsigned __int128 test_xxeval_ui128(void) {
710 // CHECK: @llvm.ppc.vsx.xxeval(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32 246)
711 // CHECK: ret <1 x i128>
712 return vec_ternarylogic(vui128a, vui128b, vui128c, 246);
715 vector unsigned char test_xxgenpcvbm(void) {
716 // CHECK: @llvm.ppc.vsx.xxgenpcvbm(<16 x i8> %{{.+}}, i32
717 // CHECK-NEXT: ret <16 x i8>
718 return vec_genpcvm(vuca, 0);
721 vector unsigned short test_xxgenpcvhm(void) {
722 // CHECK: @llvm.ppc.vsx.xxgenpcvhm(<8 x i16> %{{.+}}, i32
723 // CHECK-NEXT: ret <8 x i16>
724 return vec_genpcvm(vusa, 0);
727 vector unsigned int test_xxgenpcvwm(void) {
728 // CHECK: @llvm.ppc.vsx.xxgenpcvwm(<4 x i32> %{{.+}}, i32
729 // CHECK-NEXT: ret <4 x i32>
730 return vec_genpcvm(vuia, 0);
733 vector unsigned long long test_xxgenpcvdm(void) {
734 // CHECK: @llvm.ppc.vsx.xxgenpcvdm(<2 x i64> %{{.+}}, i32
735 // CHECK-NEXT: ret <2 x i64>
736 return vec_genpcvm(vulla, 0);
739 vector signed char test_vec_clr_first_sc(void) {
740 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
741 // CHECK-BE-NEXT: ret <16 x i8>
742 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
743 // CHECK-LE-NEXT: ret <16 x i8>
744 return vec_clr_first(vsca, uia);
747 vector unsigned char test_vec_clr_first_uc(void) {
748 // CHECK-BE: @llvm.ppc.altivec.vclrlb(<16 x i8>
749 // CHECK-BE-NEXT: ret <16 x i8>
750 // CHECK-LE: @llvm.ppc.altivec.vclrrb(<16 x i8>
751 // CHECK-LE-NEXT: ret <16 x i8>
752 return vec_clr_first(vuca, uia);
755 vector signed char test_vec_clr_last_sc(void) {
756 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
757 // CHECK-BE-NEXT: ret <16 x i8>
758 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
759 // CHECK-LE-NEXT: ret <16 x i8>
760 return vec_clr_last(vsca, uia);
763 vector unsigned char test_vec_clr_last_uc(void) {
764 // CHECK-BE: @llvm.ppc.altivec.vclrrb(<16 x i8>
765 // CHECK-BE-NEXT: ret <16 x i8>
766 // CHECK-LE: @llvm.ppc.altivec.vclrlb(<16 x i8>
767 // CHECK-LE-NEXT: ret <16 x i8>
768 return vec_clr_last(vuca, uia);
771 vector unsigned long long test_vclzdm(void) {
772 // CHECK: @llvm.ppc.altivec.vclzdm(<2 x i64>
773 // CHECK-NEXT: ret <2 x i64>
774 return vec_cntlzm(vulla, vullb);
777 vector unsigned long long test_vctzdm(void) {
778 // CHECK: @llvm.ppc.altivec.vctzdm(<2 x i64>
779 // CHECK-NEXT: ret <2 x i64>
780 return vec_cnttzm(vulla, vullb);
783 vector signed char test_vec_sldb_sc(void) {
784 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
785 // CHECK-NEXT: ret <16 x i8>
786 return vec_sldb(vsca, vscb, 0);
789 vector unsigned char test_vec_sldb_uc(void) {
790 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
791 // CHECK-NEXT: ret <16 x i8>
792 return vec_sldb(vuca, vucb, 1);
795 vector signed short test_vec_sldb_ss(void) {
796 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
797 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
798 // CHECK-NEXT: ret <8 x i16>
799 return vec_sldb(vssa, vssb, 2);
802 vector unsigned short test_vec_sldb_us(void) {
803 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
804 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
805 // CHECK-NEXT: ret <8 x i16>
806 return vec_sldb(vusa, vusb, 3);
809 vector signed int test_vec_sldb_si(void) {
810 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
811 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
812 // CHECK-NEXT: ret <4 x i32>
813 return vec_sldb(vsia, vsib, 4);
816 vector unsigned int test_vec_sldb_ui(void) {
817 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
818 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
819 // CHECK-NEXT: ret <4 x i32>
820 return vec_sldb(vuia, vuib, 5);
823 vector signed long long test_vec_sldb_sll(void) {
824 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
825 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
826 // CHECK-NEXT: ret <2 x i64>
827 return vec_sldb(vslla, vsllb, 6);
830 vector unsigned long long test_vec_sldb_ull(void) {
831 // CHECK: @llvm.ppc.altivec.vsldbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
832 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
833 // CHECK-NEXT: ret <2 x i64>
834 return vec_sldb(vulla, vullb, 7);
837 vector signed char test_vec_srdb_sc(void) {
838 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 0
839 // CHECK-NEXT: ret <16 x i8>
840 return vec_srdb(vsca, vscb, 8);
843 vector unsigned char test_vec_srdb_uc(void) {
844 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 1
845 // CHECK-NEXT: ret <16 x i8>
846 return vec_srdb(vuca, vucb, 9);
849 vector signed short test_vec_srdb_ss(void) {
850 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 2
851 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
852 // CHECK-NEXT: ret <8 x i16>
853 return vec_srdb(vssa, vssb, 10);
856 vector unsigned short test_vec_srdb_us(void) {
857 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 3
858 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
859 // CHECK-NEXT: ret <8 x i16>
860 return vec_srdb(vusa, vusb, 3);
863 vector signed int test_vec_srdb_si(void) {
864 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 4
865 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
866 // CHECK-NEXT: ret <4 x i32>
867 return vec_srdb(vsia, vsib, 4);
870 vector unsigned int test_vec_srdb_ui(void) {
871 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 5
872 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
873 // CHECK-NEXT: ret <4 x i32>
874 return vec_srdb(vuia, vuib, 5);
877 vector signed long long test_vec_srdb_sll(void) {
878 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 6
879 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
880 // CHECK-NEXT: ret <2 x i64>
881 return vec_srdb(vslla, vsllb, 6);
884 vector unsigned long long test_vec_srdb_ull(void) {
885 // CHECK: @llvm.ppc.altivec.vsrdbi(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32 7
886 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
887 // CHECK-NEXT: ret <2 x i64>
888 return vec_srdb(vulla, vullb, 7);
891 vector signed char test_vec_permx_sc(void) {
892 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
893 // CHECK-NEXT: ret <16 x i8>
894 return vec_permx(vsca, vscb, vucc, 0);
897 vector unsigned char test_vec_permx_uc(void) {
898 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
899 // CHECK-NEXT: ret <16 x i8>
900 return vec_permx(vuca, vucb, vucc, 1);
903 vector signed short test_vec_permx_ss(void) {
904 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
905 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
906 // CHECK-NEXT: ret <8 x i16>
907 return vec_permx(vssa, vssb, vucc, 2);
910 vector unsigned short test_vec_permx_us(void) {
911 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
912 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <8 x i16>
913 // CHECK-NEXT: ret <8 x i16>
914 return vec_permx(vusa, vusb, vucc, 3);
917 vector signed int test_vec_permx_si(void) {
918 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
919 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
920 // CHECK-NEXT: ret <4 x i32>
921 return vec_permx(vsia, vsib, vucc, 4);
924 vector unsigned int test_vec_permx_ui(void) {
925 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
926 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x i32>
927 // CHECK-NEXT: ret <4 x i32>
928 return vec_permx(vuia, vuib, vucc, 5);
931 vector signed long long test_vec_permx_sll(void) {
932 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
933 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
934 // CHECK-NEXT: ret <2 x i64>
935 return vec_permx(vslla, vsllb, vucc, 6);
938 vector unsigned long long test_vec_permx_ull(void) {
939 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
940 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x i64>
941 // CHECK-NEXT: ret <2 x i64>
942 return vec_permx(vulla, vullb, vucc, 7);
945 vector float test_vec_permx_f(void) {
946 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
947 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <4 x float>
948 // CHECK-NEXT: ret <4 x float>
949 return vec_permx(vfa, vfb, vucc, 0);
952 vector double test_vec_permx_d(void) {
953 // CHECK: @llvm.ppc.vsx.xxpermx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
954 // CHECK-NEXT: bitcast <16 x i8> %{{.*}} to <2 x double>
955 // CHECK-NEXT: ret <2 x double>
956 return vec_permx(vda, vdb, vucc, 1);
959 vector signed char test_vec_blend_sc(void) {
960 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
961 // CHECK-NEXT: ret <16 x i8>
962 return vec_blendv(vsca, vscb, vucc);
965 vector unsigned char test_vec_blend_uc(void) {
966 // CHECK: @llvm.ppc.vsx.xxblendvb(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, <16 x i8>
967 // CHECK-NEXT: ret <16 x i8>
968 return vec_blendv(vuca, vucb, vucc);
971 vector signed short test_vec_blend_ss(void) {
972 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
973 // CHECK-NEXT: ret <8 x i16>
974 return vec_blendv(vssa, vssb, vusc);
977 vector unsigned short test_vec_blend_us(void) {
978 // CHECK: @llvm.ppc.vsx.xxblendvh(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, <8 x i16>
979 // CHECK-NEXT: ret <8 x i16>
980 return vec_blendv(vusa, vusb, vusc);
983 vector signed int test_vec_blend_si(void) {
984 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
985 // CHECK-NEXT: ret <4 x i32>
986 return vec_blendv(vsia, vsib, vuic);
989 vector unsigned int test_vec_blend_ui(void) {
990 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
991 // CHECK-NEXT: ret <4 x i32>
992 return vec_blendv(vuia, vuib, vuic);
995 vector signed long long test_vec_blend_sll(void) {
996 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
997 // CHECK-NEXT: ret <2 x i64>
998 return vec_blendv(vslla, vsllb, vullc);
1001 vector unsigned long long test_vec_blend_ull(void) {
1002 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
1003 // CHECK-NEXT: ret <2 x i64>
1004 return vec_blendv(vulla, vullb, vullc);
1007 vector float test_vec_blend_f(void) {
1008 // CHECK: @llvm.ppc.vsx.xxblendvw(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, <4 x i32>
1009 // CHECK-NEXT: bitcast <4 x i32> %{{.*}} to <4 x float>
1010 // CHECK-NEXT: ret <4 x float>
1011 return vec_blendv(vfa, vfb, vuic);
1014 vector double test_vec_blend_d(void) {
1015 // CHECK: @llvm.ppc.vsx.xxblendvd(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, <2 x i64>
1016 // CHECK-NEXT: bitcast <2 x i64> %{{.*}} to <2 x double>
1017 // CHECK-NEXT: ret <2 x double>
1018 return vec_blendv(vda, vdb, vullc);
1021 // CHECK-BE-LABEL: @test_vec_replace_elt_si(
1022 // CHECK-BE-NEXT: entry:
1023 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vsia, align 16
1024 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1025 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i32, ptr @sia, align 4
1026 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1027 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 0)
1028 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1029 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
1030 // CHECK-BE-NEXT: ret <4 x i32> [[TMP6]]
1032 // CHECK-LE-LABEL: @test_vec_replace_elt_si(
1033 // CHECK-LE-NEXT: entry:
1034 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vsia, align 16
1035 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1036 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i32, ptr @sia, align 4
1037 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1038 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 12)
1039 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1040 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
1041 // CHECK-LE-NEXT: ret <4 x i32> [[TMP6]]
1043 vector signed int test_vec_replace_elt_si(void) {
1044 return vec_replace_elt(vsia, sia, 0);
1047 // CHECK-BE-LABEL: @test_vec_replace_elt_ui(
1048 // CHECK-BE-NEXT: entry:
1049 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vuia, align 16
1050 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1051 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i32, ptr @uia, align 4
1052 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1053 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 4)
1054 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1055 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
1056 // CHECK-BE-NEXT: ret <4 x i32> [[TMP6]]
1058 // CHECK-LE-LABEL: @test_vec_replace_elt_ui(
1059 // CHECK-LE-NEXT: entry:
1060 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vuia, align 16
1061 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1062 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i32, ptr @uia, align 4
1063 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1064 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 8)
1065 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1066 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x i32>
1067 // CHECK-LE-NEXT: ret <4 x i32> [[TMP6]]
1069 vector unsigned int test_vec_replace_elt_ui(void) {
1070 return vec_replace_elt(vuia, uia, 1);
1073 // CHECK-BE-LABEL: @test_vec_replace_elt_f(
1074 // CHECK-BE-NEXT: entry:
1075 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr @vfa, align 16
1076 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <16 x i8>
1077 // CHECK-BE-NEXT: [[TMP2:%.*]] = load float, ptr @fa, align 4
1078 // CHECK-BE-NEXT: [[CONV:%.*]] = fptoui float [[TMP2]] to i32
1079 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1080 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[CONV]], i32 8)
1081 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1082 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
1083 // CHECK-BE-NEXT: ret <4 x float> [[TMP6]]
1085 // CHECK-LE-LABEL: @test_vec_replace_elt_f(
1086 // CHECK-LE-NEXT: entry:
1087 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr @vfa, align 16
1088 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <16 x i8>
1089 // CHECK-LE-NEXT: [[TMP2:%.*]] = load float, ptr @fa, align 4
1090 // CHECK-LE-NEXT: [[CONV:%.*]] = fptoui float [[TMP2]] to i32
1091 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1092 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[CONV]], i32 4)
1093 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1094 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <4 x float>
1095 // CHECK-LE-NEXT: ret <4 x float> [[TMP6]]
1097 vector float test_vec_replace_elt_f(void) {
1098 return vec_replace_elt(vfa, fa, 2);
1101 // CHECK-BE-LABEL: @test_vec_replace_elt_sll(
1102 // CHECK-BE-NEXT: entry:
1103 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vslla, align 16
1104 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1105 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i64, ptr @llb, align 8
1106 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1107 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 0)
1108 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1109 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x i64>
1110 // CHECK-BE-NEXT: ret <2 x i64> [[TMP6]]
1112 // CHECK-LE-LABEL: @test_vec_replace_elt_sll(
1113 // CHECK-LE-NEXT: entry:
1114 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vslla, align 16
1115 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1116 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i64, ptr @llb, align 8
1117 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1118 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 8)
1119 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1120 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x i64>
1121 // CHECK-LE-NEXT: ret <2 x i64> [[TMP6]]
1123 vector signed long long test_vec_replace_elt_sll(void) {
1124 return vec_replace_elt(vslla, llb, 0);
1127 // CHECK-BE-LABEL: @test_vec_replace_elt_ull(
1128 // CHECK-BE-NEXT: entry:
1129 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vulla, align 16
1130 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1131 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i64, ptr @ulla, align 8
1132 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1133 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 0)
1134 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1135 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x i64>
1136 // CHECK-BE-NEXT: ret <2 x i64> [[TMP6]]
1138 // CHECK-LE-LABEL: @test_vec_replace_elt_ull(
1139 // CHECK-LE-NEXT: entry:
1140 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vulla, align 16
1141 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1142 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i64, ptr @ulla, align 8
1143 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1144 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 8)
1145 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1146 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x i64>
1147 // CHECK-LE-NEXT: ret <2 x i64> [[TMP6]]
1149 vector unsigned long long test_vec_replace_elt_ull(void) {
1150 return vec_replace_elt(vulla, ulla, 0);
1153 // CHECK-BE-LABEL: @test_vec_replace_elt_d(
1154 // CHECK-BE-NEXT: entry:
1155 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @vda, align 16
1156 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <16 x i8>
1157 // CHECK-BE-NEXT: [[TMP2:%.*]] = load double, ptr @da, align 8
1158 // CHECK-BE-NEXT: [[CONV:%.*]] = fptoui double [[TMP2]] to i64
1159 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1160 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[CONV]], i32 8)
1161 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1162 // CHECK-BE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double>
1163 // CHECK-BE-NEXT: ret <2 x double> [[TMP6]]
1165 // CHECK-LE-LABEL: @test_vec_replace_elt_d(
1166 // CHECK-LE-NEXT: entry:
1167 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @vda, align 16
1168 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <16 x i8>
1169 // CHECK-LE-NEXT: [[TMP2:%.*]] = load double, ptr @da, align 8
1170 // CHECK-LE-NEXT: [[CONV:%.*]] = fptoui double [[TMP2]] to i64
1171 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1172 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[CONV]], i32 0)
1173 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1174 // CHECK-LE-NEXT: [[TMP6:%.*]] = bitcast <16 x i8> [[TMP5]] to <2 x double>
1175 // CHECK-LE-NEXT: ret <2 x double> [[TMP6]]
1177 vector double test_vec_replace_elt_d(void) {
1178 return vec_replace_elt(vda, da, 1);
1181 // CHECK-BE-LABEL: @test_vec_replace_unaligned_si(
1182 // CHECK-BE-NEXT: entry:
1183 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vsia, align 16
1184 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1185 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i32, ptr @sia, align 4
1186 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1187 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 6)
1188 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1189 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1191 // CHECK-LE-LABEL: @test_vec_replace_unaligned_si(
1192 // CHECK-LE-NEXT: entry:
1193 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vsia, align 16
1194 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1195 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i32, ptr @sia, align 4
1196 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1197 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 6)
1198 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1199 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1201 vector unsigned char test_vec_replace_unaligned_si(void) {
1202 return vec_replace_unaligned(vsia, sia, 6);
1205 // CHECK-BE-LABEL: @test_vec_replace_unaligned_ui(
1206 // CHECK-BE-NEXT: entry:
1207 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vuia, align 16
1208 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1209 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i32, ptr @uia, align 4
1210 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1211 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 8)
1212 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1213 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1215 // CHECK-LE-LABEL: @test_vec_replace_unaligned_ui(
1216 // CHECK-LE-NEXT: entry:
1217 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x i32>, ptr @vuia, align 16
1218 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x i32> [[TMP0]] to <16 x i8>
1219 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i32, ptr @uia, align 4
1220 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1221 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[TMP2]], i32 8)
1222 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1223 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1225 vector unsigned char test_vec_replace_unaligned_ui(void) {
1226 return vec_replace_unaligned(vuia, uia, 8);
1229 // CHECK-BE-LABEL: @test_vec_replace_unaligned_f(
1230 // CHECK-BE-NEXT: entry:
1231 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr @vfa, align 16
1232 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <16 x i8>
1233 // CHECK-BE-NEXT: [[TMP2:%.*]] = load float, ptr @fa, align 4
1234 // CHECK-BE-NEXT: [[CONV:%.*]] = fptoui float [[TMP2]] to i32
1235 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1236 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[CONV]], i32 12)
1237 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1238 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1240 // CHECK-LE-LABEL: @test_vec_replace_unaligned_f(
1241 // CHECK-LE-NEXT: entry:
1242 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <4 x float>, ptr @vfa, align 16
1243 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <4 x float> [[TMP0]] to <16 x i8>
1244 // CHECK-LE-NEXT: [[TMP2:%.*]] = load float, ptr @fa, align 4
1245 // CHECK-LE-NEXT: [[CONV:%.*]] = fptoui float [[TMP2]] to i32
1246 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <4 x i32>
1247 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <4 x i32> @llvm.ppc.altivec.vinsw(<4 x i32> [[TMP3]], i32 [[CONV]], i32 12)
1248 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <4 x i32> [[TMP4]] to <16 x i8>
1249 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1251 vector unsigned char test_vec_replace_unaligned_f(void) {
1252 return vec_replace_unaligned(vfa, fa, 12);
1255 // CHECK-BE-LABEL: @test_vec_replace_unaligned_sll(
1256 // CHECK-BE-NEXT: entry:
1257 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vslla, align 16
1258 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1259 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i64, ptr @llb, align 8
1260 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1261 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 6)
1262 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1263 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1265 // CHECK-LE-LABEL: @test_vec_replace_unaligned_sll(
1266 // CHECK-LE-NEXT: entry:
1267 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vslla, align 16
1268 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1269 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i64, ptr @llb, align 8
1270 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1271 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 6)
1272 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1273 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1275 vector unsigned char test_vec_replace_unaligned_sll(void) {
1276 return vec_replace_unaligned(vslla, llb, 6);
1279 // CHECK-BE-LABEL: @test_vec_replace_unaligned_ull(
1280 // CHECK-BE-NEXT: entry:
1281 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vulla, align 16
1282 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1283 // CHECK-BE-NEXT: [[TMP2:%.*]] = load i64, ptr @ulla, align 8
1284 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1285 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 7)
1286 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1287 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1289 // CHECK-LE-LABEL: @test_vec_replace_unaligned_ull(
1290 // CHECK-LE-NEXT: entry:
1291 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x i64>, ptr @vulla, align 16
1292 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x i64> [[TMP0]] to <16 x i8>
1293 // CHECK-LE-NEXT: [[TMP2:%.*]] = load i64, ptr @ulla, align 8
1294 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1295 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[TMP2]], i32 7)
1296 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1297 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1299 vector unsigned char test_vec_replace_unaligned_ull(void) {
1300 return vec_replace_unaligned(vulla, ulla, 7);
1303 // CHECK-BE-LABEL: @test_vec_replace_unaligned_d(
1304 // CHECK-BE-NEXT: entry:
1305 // CHECK-BE-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @vda, align 16
1306 // CHECK-BE-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <16 x i8>
1307 // CHECK-BE-NEXT: [[TMP2:%.*]] = load double, ptr @da, align 8
1308 // CHECK-BE-NEXT: [[CONV:%.*]] = fptoui double [[TMP2]] to i64
1309 // CHECK-BE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1310 // CHECK-BE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[CONV]], i32 8)
1311 // CHECK-BE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1312 // CHECK-BE-NEXT: ret <16 x i8> [[TMP5]]
1314 // CHECK-LE-LABEL: @test_vec_replace_unaligned_d(
1315 // CHECK-LE-NEXT: entry:
1316 // CHECK-LE-NEXT: [[TMP0:%.*]] = load <2 x double>, ptr @vda, align 16
1317 // CHECK-LE-NEXT: [[TMP1:%.*]] = bitcast <2 x double> [[TMP0]] to <16 x i8>
1318 // CHECK-LE-NEXT: [[TMP2:%.*]] = load double, ptr @da, align 8
1319 // CHECK-LE-NEXT: [[CONV:%.*]] = fptoui double [[TMP2]] to i64
1320 // CHECK-LE-NEXT: [[TMP3:%.*]] = bitcast <16 x i8> [[TMP1]] to <2 x i64>
1321 // CHECK-LE-NEXT: [[TMP4:%.*]] = call <2 x i64> @llvm.ppc.altivec.vinsd(<2 x i64> [[TMP3]], i64 [[CONV]], i32 8)
1322 // CHECK-LE-NEXT: [[TMP5:%.*]] = bitcast <2 x i64> [[TMP4]] to <16 x i8>
1323 // CHECK-LE-NEXT: ret <16 x i8> [[TMP5]]
1325 vector unsigned char test_vec_replace_unaligned_d(void) {
1326 return vec_replace_unaligned(vda, da, 8);
1329 vector unsigned char test_vec_insertl_uc(void) {
1330 // CHECK-BE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1331 // CHECK-BE-NEXT: ret <16 x i8>
1332 // CHECK-LE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1333 // CHECK-LE-NEXT: ret <16 x i8>
1334 return vec_insertl(uca, vuca, uia);
1337 vector unsigned short test_vec_insertl_us(void) {
1338 // CHECK-BE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1339 // CHECK-BE-NEXT: ret <8 x i16>
1340 // CHECK-LE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1341 // CHECK-LE-NEXT: ret <8 x i16>
1342 return vec_insertl(usa, vusa, uia);
1345 vector unsigned int test_vec_insertl_ui(void) {
1346 // CHECK-BE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1347 // CHECK-BE-NEXT: ret <4 x i32>
1348 // CHECK-LE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1349 // CHECK-LE-NEXT: ret <4 x i32>
1350 return vec_insertl(uib, vuia, uia);
1353 vector unsigned long long test_vec_insertl_ul(void) {
1354 // CHECK-BE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1355 // CHECK-BE-NEXT: ret <2 x i64>
1356 // CHECK-LE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1357 // CHECK-LE-NEXT: ret <2 x i64>
1358 return vec_insertl(ulla, vulla, uia);
1361 vector unsigned char test_vec_insertl_ucv(void) {
1362 // CHECK-BE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1363 // CHECK-BE-NEXT: ret <16 x i8>
1364 // CHECK-LE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1365 // CHECK-LE-NEXT: ret <16 x i8>
1366 return vec_insertl(vuca, vucb, uia);
1369 vector unsigned short test_vec_insertl_usv(void) {
1370 // CHECK-BE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1371 // CHECK-BE-NEXT: ret <8 x i16>
1372 // CHECK-LE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1373 // CHECK-LE-NEXT: ret <8 x i16>
1374 return vec_insertl(vusa, vusb, uia);
1377 vector unsigned int test_vec_insertl_uiv(void) {
1378 // CHECK-BE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1379 // CHECK-BE-NEXT: ret <4 x i32>
1380 // CHECK-LE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1381 // CHECK-LE-NEXT: ret <4 x i32>
1382 return vec_insertl(vuia, vuib, uia);
1385 vector unsigned char test_vec_inserth_uc(void) {
1386 // CHECK-BE: @llvm.ppc.altivec.vinsbrx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1387 // CHECK-BE-NEXT: ret <16 x i8>
1388 // CHECK-LE: @llvm.ppc.altivec.vinsblx(<16 x i8> %{{.+}}, i32 %{{.+}}, i32
1389 // CHECK-LE-NEXT: ret <16 x i8>
1390 return vec_inserth(uca, vuca, uia);
1393 vector unsigned short test_vec_inserth_us(void) {
1394 // CHECK-BE: @llvm.ppc.altivec.vinshrx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1395 // CHECK-BE-NEXT: ret <8 x i16>
1396 // CHECK-LE: @llvm.ppc.altivec.vinshlx(<8 x i16> %{{.+}}, i32 %{{.+}}, i32
1397 // CHECK-LE-NEXT: ret <8 x i16>
1398 return vec_inserth(usa, vusa, uia);
1401 vector unsigned int test_vec_inserth_ui(void) {
1402 // CHECK-BE: @llvm.ppc.altivec.vinswrx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1403 // CHECK-BE-NEXT: ret <4 x i32>
1404 // CHECK-LE: @llvm.ppc.altivec.vinswlx(<4 x i32> %{{.+}}, i32 %{{.+}}, i32
1405 // CHECK-LE-NEXT: ret <4 x i32>
1406 return vec_inserth(uib, vuia, uia);
1409 vector unsigned long long test_vec_inserth_ul(void) {
1410 // CHECK-BE: @llvm.ppc.altivec.vinsdrx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1411 // CHECK-BE-NEXT: ret <2 x i64>
1412 // CHECK-LE: @llvm.ppc.altivec.vinsdlx(<2 x i64> %{{.+}}, i64 %{{.+}}, i64
1413 // CHECK-LE-NEXT: ret <2 x i64>
1414 return vec_inserth(ulla, vulla, uia);
1417 vector unsigned char test_vec_inserth_ucv(void) {
1418 // CHECK-BE: @llvm.ppc.altivec.vinsbvrx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1419 // CHECK-BE-NEXT: ret <16 x i8>
1420 // CHECK-LE: @llvm.ppc.altivec.vinsbvlx(<16 x i8> %{{.+}}, i32 %{{.+}}, <16 x i8>
1421 // CHECK-LE-NEXT: ret <16 x i8>
1422 return vec_inserth(vuca, vucb, uia);
1425 vector unsigned short test_vec_inserth_usv(void) {
1426 // CHECK-BE: @llvm.ppc.altivec.vinshvrx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1427 // CHECK-BE-NEXT: ret <8 x i16>
1428 // CHECK-LE: @llvm.ppc.altivec.vinshvlx(<8 x i16> %{{.+}}, i32 %{{.+}}, <8 x i16>
1429 // CHECK-LE-NEXT: ret <8 x i16>
1430 return vec_inserth(vusa, vusb, uia);
1433 vector unsigned int test_vec_inserth_uiv(void) {
1434 // CHECK-BE: @llvm.ppc.altivec.vinswvrx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1435 // CHECK-BE-NEXT: ret <4 x i32>
1436 // CHECK-LE: @llvm.ppc.altivec.vinswvlx(<4 x i32> %{{.+}}, i32 %{{.+}}, <4 x i32>
1437 // CHECK-LE-NEXT: ret <4 x i32>
1438 return vec_inserth(vuia, vuib, uia);
1441 vector unsigned long long test_vec_extractl_uc(void) {
1442 // CHECK-BE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1443 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1444 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1445 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1446 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1447 // CHECK-BE: ret <2 x i64>
1448 // CHECK-LE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1449 // CHECK-LE-NEXT: ret <2 x i64>
1450 return vec_extractl(vuca, vucb, uia);
1453 vector unsigned long long test_vec_extractl_us(void) {
1454 // CHECK-BE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1455 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1456 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1457 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1458 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1459 // CHECK-BE: ret <2 x i64>
1460 // CHECK-LE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1461 // CHECK-LE-NEXT: ret <2 x i64>
1462 return vec_extractl(vusa, vusb, uia);
1465 vector unsigned long long test_vec_extractl_ui(void) {
1466 // CHECK-BE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1467 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1468 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1469 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1470 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1471 // CHECK-BE: ret <2 x i64>
1472 // CHECK-LE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1473 // CHECK-LE-NEXT: ret <2 x i64>
1474 return vec_extractl(vuia, vuib, uia);
1477 vector unsigned long long test_vec_extractl_ul(void) {
1478 // CHECK-BE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1479 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1480 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1481 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1482 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1483 // CHECK-BE: ret <2 x i64>
1484 // CHECK-LE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1485 // CHECK-LE-NEXT: ret <2 x i64>
1486 return vec_extractl(vulla, vullb, uia);
1489 vector unsigned long long test_vec_extracth_uc(void) {
1490 // CHECK-BE: @llvm.ppc.altivec.vextdubvrx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1491 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1492 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1493 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1494 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1495 // CHECK-BE: ret <2 x i64>
1496 // CHECK-LE: @llvm.ppc.altivec.vextdubvlx(<16 x i8> %{{.+}}, <16 x i8> %{{.+}}, i32
1497 // CHECK-LE-NEXT: ret <2 x i64>
1498 return vec_extracth(vuca, vucb, uia);
1501 vector unsigned long long test_vec_extracth_us(void) {
1502 // CHECK-BE: @llvm.ppc.altivec.vextduhvrx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1503 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1504 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1505 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1506 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1507 // CHECK-BE: ret <2 x i64>
1508 // CHECK-LE: @llvm.ppc.altivec.vextduhvlx(<8 x i16> %{{.+}}, <8 x i16> %{{.+}}, i32
1509 // CHECK-LE-NEXT: ret <2 x i64>
1510 return vec_extracth(vusa, vusb, uia);
1513 vector unsigned long long test_vec_extracth_ui(void) {
1514 // CHECK-BE: @llvm.ppc.altivec.vextduwvrx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1515 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1516 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1517 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1518 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1519 // CHECK-BE: ret <2 x i64>
1520 // CHECK-LE: @llvm.ppc.altivec.vextduwvlx(<4 x i32> %{{.+}}, <4 x i32> %{{.+}}, i32
1521 // CHECK-LE-NEXT: ret <2 x i64>
1522 return vec_extracth(vuia, vuib, uia);
1525 vector unsigned long long test_vec_extracth_ul(void) {
1526 // CHECK-BE: @llvm.ppc.altivec.vextddvrx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1527 // CHECK-BE: [[T1:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1528 // CHECK-BE: [[T2:%.+]] = bitcast <2 x i64> %{{.*}} to <4 x i32>
1529 // CHECK-BE: [[T3:%.+]] = call <4 x i32> @llvm.ppc.altivec.vperm(<4 x i32> [[T1]], <4 x i32> [[T2]], <16 x i8> {{.+}})
1530 // CHECK-BE: [[T4:%.+]] = bitcast <4 x i32> [[T3]] to <2 x i64>
1531 // CHECK-BE: ret <2 x i64>
1532 // CHECK-LE: @llvm.ppc.altivec.vextddvlx(<2 x i64> %{{.+}}, <2 x i64> %{{.+}}, i32
1533 // CHECK-LE-NEXT: ret <2 x i64>
1534 return vec_extracth(vulla, vullb, uia);
1537 vector signed int test_vec_vec_splati_si(void) {
1538 // CHECK: ret <4 x i32> <i32 -17, i32 -17, i32 -17, i32 -17>
1539 return vec_splati(-17);
1542 vector unsigned int test_vec_vec_splati_ui(void) {
1543 // CHECK: ret <4 x i32> <i32 16, i32 16, i32 16, i32 16>
1544 return vec_splati(16U);
1547 vector float test_vec_vec_splati_f(void) {
1548 // CHECK: ret <4 x float> <float 1.000000e+00, float 1.000000e+00, float 1.000000e+00, float 1.000000e+00>
1549 return vec_splati(1.0f);
1552 vector double test_vec_vec_splatid(void) {
1553 // CHECK-BE: [[T1:%.+]] = fpext float %{{.+}} to double
1554 // CHECK-BE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i64 0
1555 // CHECK-BE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1556 // CHECK-BE-NEXT: ret <2 x double> [[T3:%.+]]
1557 // CHECK-LE: [[T1:%.+]] = fpext float %{{.+}} to double
1558 // CHECK-LE-NEXT: [[T2:%.+]] = insertelement <2 x double> poison, double [[T1:%.+]], i64 0
1559 // CHECK-LE-NEXT: [[T3:%.+]] = shufflevector <2 x double> [[T2:%.+]], <2 x double> poison, <2 x i32> zeroinitialize
1560 // CHECK-LE-NEXT: ret <2 x double> [[T3:%.+]]
1561 return vec_splatid(1.0);
1564 vector signed int test_vec_vec_splati_ins_si(void) {
1565 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1566 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1567 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1568 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1569 // CHECK-BE: ret <4 x i32>
1570 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1571 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1572 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1573 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1574 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1575 // CHECK-LE: ret <4 x i32>
1576 return vec_splati_ins(vsia, 0, -17);
1579 vector unsigned int test_vec_vec_splati_ins_ui(void) {
1580 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1581 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1582 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1583 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1584 // CHECK-BE: ret <4 x i32>
1585 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1586 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1587 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1588 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1589 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1590 // CHECK-LE: ret <4 x i32>
1591 return vec_splati_ins(vuia, 1, 16U);
1594 vector float test_vec_vec_splati_ins_f(void) {
1595 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1596 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 %{{.+}}
1597 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1598 // CHECK-BE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1599 // CHECK-BE: ret <4 x float>
1600 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1601 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1602 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T1]]
1603 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1604 // CHECK-LE: insertelement <4 x float> %{{.+}}, float %{{.+}}, i32 [[T2]]
1605 // CHECK-LE: ret <4 x float>
1606 return vec_splati_ins(vfa, 0, 1.0f);
1609 // In this test case, the second argument of vec_splati_ins is outside of the
1610 // expected range [0,1]. A mask of 0x01 is applied to obtain an in-range value
1611 // for the second argument.
1612 vector signed int test_vec_vec_splati_ins_range(void) {
1613 // CHECK-BE: [[T0:%.+]] = and i32 %{{.+}}, 1
1614 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 %{{.+}}
1615 // CHECK-BE: [[T1:%.+]] = add i32 2, %{{.+}}
1616 // CHECK-BE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1617 // CHECK-BE: ret <4 x i32>
1618 // CHECK-LE: [[T0:%.+]] = and i32 %{{.+}}, 1
1619 // CHECK-LE: [[T1:%.+]] = sub i32 1, %{{.+}}
1620 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T1]]
1621 // CHECK-LE: [[T2:%.+]] = sub i32 3, %{{.+}}
1622 // CHECK-LE: insertelement <4 x i32> %{{.+}}, i32 %{{.+}}, i32 [[T2]]
1623 // CHECK-LE: ret <4 x i32>
1624 return vec_splati_ins(vsia, 2, -17);
1627 void test_vec_xst_trunc_sc(vector signed __int128 __a, signed long long __b,
1628 signed char *__c) {
1629 // CHECK: store i8 %{{.+}}, ptr %{{.+}}, align 1
1630 vec_xst_trunc(__a, __b, __c);
1633 void test_vec_xst_trunc_uc(vector unsigned __int128 __a, signed long long __b,
1634 unsigned char *__c) {
1635 // CHECK: store i8 %{{.+}}, ptr %{{.+}}, align 1
1636 vec_xst_trunc(__a, __b, __c);
1639 void test_vec_xst_trunc_ss(vector signed __int128 __a, signed long long __b,
1640 signed short *__c) {
1641 // CHECK: store i16 %{{.+}}, ptr %{{.+}}, align 2
1642 vec_xst_trunc(__a, __b, __c);
1645 void test_vec_xst_trunc_us(vector unsigned __int128 __a, signed long long __b,
1646 unsigned short *__c) {
1647 // CHECK: store i16 %{{.+}}, ptr %{{.+}}, align 2
1648 vec_xst_trunc(__a, __b, __c);
1651 void test_vec_xst_trunc_si(vector signed __int128 __a, signed long long __b,
1652 signed int *__c) {
1653 // CHECK: store i32 %{{.+}}, ptr %{{.+}}, align 4
1654 vec_xst_trunc(__a, __b, __c);
1657 void test_vec_xst_trunc_ui(vector unsigned __int128 __a, signed long long __b,
1658 unsigned int *__c) {
1659 // CHECK: store i32 %{{.+}}, ptr %{{.+}}, align 4
1660 vec_xst_trunc(__a, __b, __c);
1663 void test_vec_xst_trunc_sll(vector signed __int128 __a, signed long long __b,
1664 signed long long *__c) {
1665 // CHECK: store i64 %{{.+}}, ptr %{{.+}}, align 8
1666 vec_xst_trunc(__a, __b, __c);
1669 void test_vec_xst_trunc_ull(vector unsigned __int128 __a, signed long long __b,
1670 unsigned long long *__c) {
1671 // CHECK: store i64 %{{.+}}, ptr %{{.+}}, align 8
1672 vec_xst_trunc(__a, __b, __c);
1675 vector unsigned __int128 test_vec_slq_unsigned (void) {
1676 // CHECK-LABEL: test_vec_slq_unsigned
1677 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1678 // CHECK: ret <1 x i128> %{{.+}}
1679 return vec_sl(vui128a, vui128b);
1682 vector signed __int128 test_vec_slq_signed (void) {
1683 // CHECK-LABEL: test_vec_slq_signed
1684 // CHECK: shl <1 x i128> %{{.+}}, %{{.+}}
1685 // CHECK: ret <1 x i128>
1686 return vec_sl(vi128a, vui128a);
1689 vector unsigned __int128 test_vec_srq_unsigned (void) {
1690 // CHECK-LABEL: test_vec_srq_unsigned
1691 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1692 // CHECK: ret <1 x i128>
1693 return vec_sr(vui128a, vui128b);
1696 vector signed __int128 test_vec_srq_signed (void) {
1697 // CHECK-LABEL: test_vec_srq_signed
1698 // CHECK: lshr <1 x i128> %{{.+}}, %{{.+}}
1699 // CHECK: ret <1 x i128>
1700 return vec_sr(vi128a, vui128a);
1703 vector unsigned __int128 test_vec_sraq_unsigned (void) {
1704 // CHECK-LABEL: test_vec_sraq_unsigned
1705 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1706 // CHECK: ret <1 x i128>
1707 return vec_sra(vui128a, vui128b);
1710 vector signed __int128 test_vec_sraq_signed (void) {
1711 // CHECK-LABEL: test_vec_sraq_signed
1712 // CHECK: ashr <1 x i128> %{{.+}}, %{{.+}}
1713 // CHECK: ret <1 x i128>
1714 return vec_sra(vi128a, vui128a);
1718 int test_vec_test_lsbb_all_ones(void) {
1719 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 1
1720 // CHECK-NEXT: ret i32
1721 return vec_test_lsbb_all_ones(vuca);
1724 int test_vec_test_lsbb_all_zeros(void) {
1725 // CHECK: @llvm.ppc.vsx.xvtlsbb(<16 x i8> %{{.+}}, i32 0
1726 // CHECK-NEXT: ret i32
1727 return vec_test_lsbb_all_zeros(vuca);
1730 vector unsigned __int128 test_vec_mule_u128(void) {
1731 // CHECK-BE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1732 // CHECK-BE-NEXT: ret <1 x i128>
1733 // CHECK-LE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1734 // CHECK-LE-NEXT: ret <1 x i128>
1735 return vec_mule(vulla, vullb);
1738 vector signed __int128 test_vec_mule_s128(void) {
1739 // CHECK-BE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1740 // CHECK-BE-NEXT: ret <1 x i128>
1741 // CHECK-LE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1742 // CHECK-LE-NEXT: ret <1 x i128>
1743 return vec_mule(vslla, vsllb);
1746 vector unsigned __int128 test_vec_mulo_u128(void) {
1747 // CHECK-BE: @llvm.ppc.altivec.vmuloud(<2 x i64>
1748 // CHECK-BE-NEXT: ret <1 x i128>
1749 // CHECK-LE: @llvm.ppc.altivec.vmuleud(<2 x i64>
1750 // CHECK-LE-NEXT: ret <1 x i128>
1751 return vec_mulo(vulla, vullb);
1754 vector signed __int128 test_vec_mulo_s128(void) {
1755 // CHECK-BE: @llvm.ppc.altivec.vmulosd(<2 x i64>
1756 // CHECK-BE-NEXT: ret <1 x i128>
1757 // CHECK-LE: @llvm.ppc.altivec.vmulesd(<2 x i64>
1758 // CHECK-LE-NEXT: ret <1 x i128>
1759 return vec_mulo(vslla, vsllb);
1762 vector unsigned __int128 test_vec_msumc_u128(void) {
1763 // CHECK: @llvm.ppc.altivec.vmsumcud(<2 x i64>
1764 // CHECK-NEXT: ret <1 x i128>
1765 return vec_msumc(vulla, vullb, vui128a);
1768 vector signed __int128 test_vec_xl_sext_i8(void) {
1769 // CHECK: load i8
1770 // CHECK: sext i8
1771 // CHECK: ret <1 x i128>
1772 return vec_xl_sext(llb, cap);
1775 vector signed __int128 test_vec_xl_sext_i16(void) {
1776 // CHECK: load i16
1777 // CHECK: sext i16
1778 // CHECK: ret <1 x i128>
1779 return vec_xl_sext(llb, sap);
1782 vector signed __int128 test_vec_xl_sext_i32(void) {
1783 // CHECK: load i32
1784 // CHECK: sext i32
1785 // CHECK: ret <1 x i128>
1786 return vec_xl_sext(llb, iap);
1789 vector signed __int128 test_vec_xl_sext_i64(void) {
1790 // CHECK: load i64
1791 // CHECK: sext i64
1792 // CHECK: ret <1 x i128>
1793 return vec_xl_sext(llb, llap);
1796 vector unsigned __int128 test_vec_xl_zext_i8(void) {
1797 // CHECK: load i8
1798 // CHECK: zext i8
1799 // CHECK: ret <1 x i128>
1800 return vec_xl_zext(llb, ucap);
1803 vector unsigned __int128 test_vec_xl_zext_i16(void) {
1804 // CHECK: load i16
1805 // CHECK: zext i16
1806 // CHECK: ret <1 x i128>
1807 return vec_xl_zext(llb, usap);
1810 vector unsigned __int128 test_vec_xl_zext_i32(void) {
1811 // CHECK: load i32
1812 // CHECK: zext i32
1813 // CHECK: ret <1 x i128>
1814 return vec_xl_zext(llb, uiap);
1817 vector unsigned __int128 test_vec_xl_zext_i64(void) {
1818 // CHECK: load i64
1819 // CHECK: zext i64
1820 // CHECK: ret <1 x i128>
1821 return vec_xl_zext(llb, ullap);
1824 vector signed __int128 test_vec_signextq_s128(void) {
1825 // CHECK: @llvm.ppc.altivec.vextsd2q(<2 x i64>
1826 // CHECK-NEXT: ret <1 x i128>
1827 return vec_signextq(vslla);
1830 vector unsigned __int128 test_vec_mod_u128(void) {
1831 // CHECK: urem <1 x i128>
1832 // CHECK-NEXT: ret <1 x i128>
1833 return vec_mod(vui128a, vui128b);
1836 vector signed __int128 test_vec_mod_s128(void) {
1837 // CHECK: srem <1 x i128>
1838 // CHECK-NEXT: ret <1 x i128>
1839 return vec_mod(vsi128a, vsi128b);
1842 vector bool __int128 test_vec_cmpeq_s128(void) {
1843 // CHECK-LABEL: @test_vec_cmpeq_s128(
1844 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1845 // CHECK-NEXT: ret <1 x i128>
1846 return vec_cmpeq(vsi128a, vsi128b);
1849 vector bool __int128 test_vec_cmpeq_u128(void) {
1850 // CHECK-LABEL: @test_vec_cmpeq_u128(
1851 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1852 // CHECK-NEXT: ret <1 x i128>
1853 return vec_cmpeq(vui128a, vui128b);
1856 vector bool __int128 test_vec_cmpeq_bool_int128(void) {
1857 // CHECK-LABEL: @test_vec_cmpeq_bool_int128(
1858 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1859 // CHECK-NEXT: ret <1 x i128>
1860 return vec_cmpeq(vbi128a, vbi128b);
1863 vector bool __int128 test_vec_cmpne_s128(void) {
1864 // CHECK-LABEL: @test_vec_cmpne_s128(
1865 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1866 // CHECK-NEXT: %not.i = xor <1 x i128> %4, <i128 -1>
1867 // CHECK-NEXT: ret <1 x i128> %not.i
1868 return vec_cmpne(vsi128a, vsi128b);
1871 vector bool __int128 test_vec_cmpne_u128(void) {
1872 // CHECK-LABEL: @test_vec_cmpne_u128(
1873 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1874 // CHECK-NEXT: xor <1 x i128> %4, <i128 -1>
1875 // CHECK-NEXT: ret <1 x i128>
1876 return vec_cmpne(vui128a, vui128b);
1879 vector bool __int128 test_vec_cmpne_bool_int128(void) {
1880 // CHECK-LABEL: @test_vec_cmpne_bool_int128(
1881 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpequq(<1 x i128>
1882 // CHECK-NEXT: xor <1 x i128> %4, <i128 -1>
1883 // CHECK-NEXT: ret <1 x i128>
1884 return vec_cmpne(vbi128a, vbi128b);
1887 vector bool __int128 test_vec_cmpgt_s128(void) {
1888 // CHECK-LABEL: @test_vec_cmpgt_s128(
1889 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1890 // CHECK-NEXT: ret <1 x i128>
1891 return vec_cmpgt(vsi128a, vsi128b);
1894 vector bool __int128 test_vec_cmpgt_u128(void) {
1895 // CHECK-LABEL: @test_vec_cmpgt_u128(
1896 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1897 // CHECK-NEXT: ret <1 x i128>
1898 return vec_cmpgt(vui128a, vui128b);
1901 vector bool __int128 test_vec_cmplt_s128(void) {
1902 // CHECK-LABEL: @test_vec_cmplt_s128(
1903 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1904 // CHECK-NEXT: ret <1 x i128>
1905 return vec_cmplt(vsi128a, vsi128b);
1908 vector bool __int128 test_vec_cmplt_u128(void) {
1909 // CHECK-LABEL: @test_vec_cmplt_u128(
1910 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1911 // CHECK-NEXT: ret <1 x i128>
1912 return vec_cmplt(vui128a, vui128b);
1915 vector bool __int128 test_vec_cmpge_s128(void) {
1916 // CHECK-LABEL: @test_vec_cmpge_s128(
1917 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1918 // CHECK-NEXT: xor <1 x i128> %6, <i128 -1>
1919 // CHECK-NEXT: ret <1 x i128>
1920 return vec_cmpge(vsi128a, vsi128b);
1923 vector bool __int128 test_vec_cmpge_u128(void) {
1924 // CHECK-LABEL: @test_vec_cmpge_u128(
1925 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1926 // CHECK-NEXT: xor <1 x i128> %6, <i128 -1>
1927 // CHECK-NEXT: ret <1 x i128>
1928 return vec_cmpge(vui128a, vui128b);
1931 vector bool __int128 test_vec_cmple_s128(void) {
1932 // CHECK-LABEL: @test_vec_cmple_s128(
1933 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtsq(<1 x i128>
1934 // CHECK-NEXT: xor <1 x i128> %8, <i128 -1>
1935 // CHECK-NEXT: ret <1 x i128>
1936 return vec_cmple(vsi128a, vsi128b);
1939 vector bool __int128 test_vec_cmple_u128(void) {
1940 // CHECK-LABEL: @test_vec_cmple_u128(
1941 // CHECK: call <1 x i128> @llvm.ppc.altivec.vcmpgtuq(<1 x i128>
1942 // CHECK-NEXT: xor <1 x i128> %8, <i128 -1>
1943 // CHECK-NEXT: ret <1 x i128>
1944 return vec_cmple(vui128a, vui128b);
1947 int test_vec_any_eq_u128(void) {
1948 // CHECK-LABEL: @test_vec_any_eq_u128(
1949 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1950 // CHECK-NEXT: ret i32
1951 return vec_any_eq(vui128a, vui128b);
1954 int test_vec_any_eq_s128(void) {
1955 // CHECK-LABEL: @test_vec_any_eq_s128(
1956 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1957 // CHECK-NEXT: ret i32
1958 return vec_any_eq(vsi128a, vsi128b);
1961 int test_vec_any_eq_bool_int128(void) {
1962 // CHECK-LABEL: @test_vec_any_eq_bool_int128(
1963 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1964 // CHECK-NEXT: ret i32
1965 return vec_any_eq(vbi128a, vbi128b);
1968 int test_vec_any_ne_s128(void) {
1969 // CHECK-LABEL: @test_vec_any_ne_s128(
1970 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1971 // CHECK-NEXT: ret i32
1972 return vec_any_ne(vsi128a, vsi128b);
1975 int test_vec_any_ne_u128(void) {
1976 // CHECK-LABEL: @test_vec_any_ne_u128(
1977 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1978 // CHECK-NEXT: ret i32
1979 return vec_any_ne(vui128a, vui128b);
1982 int test_vec_any_ne_bool_int128(void) {
1983 // CHECK-LABEL: @test_vec_any_ne_bool_int128(
1984 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
1985 // CHECK-NEXT: ret i32
1986 return vec_any_ne(vbi128a, vbi128b);
1989 int test_vec_any_lt_s128(void) {
1990 // CHECK-LABEL: @test_vec_any_lt_s128(
1991 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1992 // CHECK-NEXT: ret i32
1993 return vec_any_lt(vsi128a, vsi128b);
1996 int test_vec_any_lt_u128(void) {
1997 // CHECK-LABEL: @test_vec_any_lt_u128(
1998 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
1999 // CHECK-NEXT: ret i32
2000 return vec_any_lt(vui128a, vui128b);
2003 int test_vec_any_gt_s128(void) {
2004 // CHECK-LABEL: @test_vec_any_gt_s128(
2005 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
2006 // CHECK-NEXT: ret i32
2007 return vec_any_gt(vsi128a, vsi128b);
2010 int test_vec_any_gt_u128(void) {
2011 // CHECK-LABEL: @test_vec_any_gt_u128(
2012 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 1, <1 x i128> %2, <1 x i128> %3)
2013 // CHECK-NEXT: ret i32
2014 return vec_any_gt(vui128a, vui128b);
2017 int test_vec_any_le_s128(void) {
2018 // CHECK-LABEL: @test_vec_any_le_s128(
2019 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
2020 // CHECK-NEXT: ret i32
2021 return vec_any_le(vsi128a, vsi128b);
2024 int test_vec_any_le_u128(void) {
2025 // CHECK-LABEL: @test_vec_any_le_u128(
2026 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
2027 // CHECK-NEXT: ret i32
2028 return vec_any_le(vui128a, vui128b);
2031 int test_vec_any_ge_s128(void) {
2032 // CHECK-LABEL: @test_vec_any_ge_s128(
2033 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
2034 // CHECK-NEXT: ret i32
2035 return vec_any_ge(vsi128a, vsi128b);
2038 int test_vec_any_ge_u128(void) {
2039 // CHECK-LABEL: @test_vec_any_ge_u128(
2040 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 3, <1 x i128> %2, <1 x i128> %3)
2041 // CHECK-NEXT: ret i32
2042 return vec_any_ge(vui128a, vui128b);
2045 int test_vec_all_eq_s128(void) {
2046 // CHECK-LABEL: @test_vec_all_eq_s128(
2047 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2048 // CHECK-NEXT: ret i32
2049 return vec_all_eq(vsi128a, vsi128b);
2052 int test_vec_all_eq_u128(void) {
2053 // CHECK-LABEL: @test_vec_all_eq_u128(
2054 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2055 // CHECK-NEXT: ret i32
2056 return vec_all_eq(vui128a, vui128b);
2059 int test_vec_all_eq_bool_int128(void) {
2060 // CHECK-LABEL: @test_vec_all_eq_bool_int128
2061 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2062 // CHECK-NEXT: ret i32
2063 return vec_all_eq(vbi128a, vbi128b);
2066 int test_vec_all_ne_s128(void) {
2067 // CHECK-LABEL: @test_vec_all_ne_s128(
2068 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2069 // CHECK-NEXT: ret i32
2070 return vec_all_ne(vsi128a, vsi128b);
2073 int test_vec_all_ne_u128(void) {
2074 // CHECK-LABEL: @test_vec_all_ne_u128(
2075 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2076 // CHECK-NEXT: ret i32
2077 return vec_all_ne(vui128a, vui128b);
2080 int test_vec_all_ne_bool_int128(void) {
2081 // CHECK-LABEL: test_vec_all_ne_bool_int128
2082 // CHECK: call i32 @llvm.ppc.altivec.vcmpequq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2083 // CHECK-NEXT: ret i32
2084 return vec_all_ne(vbi128a, vbi128b);
2087 int test_vec_all_lt_s128(void) {
2088 // CHECK-LABEL: @test_vec_all_lt_s128(
2089 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2090 // CHECK-NEXT: ret i32
2091 return vec_all_lt(vsi128a, vsi128b);
2094 int test_vec_all_lt_u128(void) {
2095 // CHECK-LABEL: @test_vec_all_lt_u128(
2096 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2097 // CHECK: ret i32
2098 return vec_all_lt(vui128a, vui128b);
2101 int test_vec_all_gt_s128(void) {
2102 // CHECK-LABEL: @test_vec_all_gt_s128(
2103 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2104 // CHECK-NEXT: ret i32
2105 return vec_all_gt(vsi128a, vsi128b);
2108 int test_vec_all_gt_u128(void) {
2109 // CHECK-LABEL: @test_vec_all_gt_u128(
2110 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 2, <1 x i128> %2, <1 x i128> %3)
2111 // CHECK-NEXT: ret i32
2112 return vec_all_gt(vui128a, vui128b);
2115 int test_vec_all_le_s128(void) {
2116 // CHECK-LABEL: @test_vec_all_le_s128(
2117 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2118 // CHECK-NEXT: ret i32
2119 return vec_all_le(vsi128a, vsi128b);
2122 int test_vec_all_le_u128(void) {
2123 // CHECK-LABEL: @test_vec_all_le_u128(
2124 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2125 // CHECK-NEXT: ret i32
2126 return vec_all_le(vui128a, vui128b);
2129 int test_vec_all_ge_s128(void) {
2130 // CHECK-LABEL: @test_vec_all_ge_s128(
2131 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtsq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2132 // CHECK-NEXT: ret i32
2133 return vec_all_ge(vsi128a, vsi128b);
2136 int test_vec_all_ge_u128(void) {
2137 // CHECK-LABEL: @test_vec_all_ge_u128(
2138 // CHECK: call i32 @llvm.ppc.altivec.vcmpgtuq.p(i32 0, <1 x i128> %2, <1 x i128> %3)
2139 // CHECK-NEXT: ret i32
2140 return vec_all_ge(vui128a, vui128b);
2143 vector signed __int128 test_vec_rl_s128(void) {
2144 // CHECK-LABEL: @test_vec_rl_s128(
2145 // CHECK: sub <1 x i128>
2146 // CHECK-NEXT: lshr <1 x i128>
2147 // CHECK-NEXT: or <1 x i128>
2148 // CHECK-NEXT: ret <1 x i128>
2149 return vec_rl(vsi128a, vui128b);
2152 vector unsigned __int128 test_vec_rl_u128(void) {
2153 // CHECK-LABEL: @test_vec_rl_u128(
2154 // CHECK: sub <1 x i128>
2155 // CHECK: lshr <1 x i128>
2156 // CHECK: or <1 x i128>
2157 // CHECK-NEXT: ret <1 x i128>
2158 return vec_rl(vui128a, vui128b);
2161 vector signed __int128 test_vec_rlnm_s128(void) {
2162 // CHECK-LABEL: @test_vec_rlnm_s128(
2163 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2164 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2165 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
2166 // CHECK-NEXT: ret <1 x i128>
2167 return vec_rlnm(vsi128a, vsi128b, vsi128c);
2170 vector unsigned __int128 test_vec_rlnm_u128(void) {
2171 // CHECK-LABEL: @test_vec_rlnm_u128(
2172 // CHECK-LE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 16, i32 0, i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2173 // CHECK-BE: %shuffle.i = shufflevector <16 x i8> %7, <16 x i8> %8, <16 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 31, i32 30, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
2174 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqnm(<1 x i128>
2175 // CHECK-NEXT: ret <1 x i128>
2176 return vec_rlnm(vui128a, vui128b, vui128c);
2179 vector signed __int128 test_vec_rlmi_s128(void) {
2180 // CHECK-LABEL: @test_vec_rlmi_s128(
2181 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
2182 // CHECK-NEXT: ret <1 x i128>
2183 return vec_rlmi(vsi128a, vsi128b, vsi128c);
2186 vector unsigned __int128 test_vec_rlmi_u128(void) {
2187 // CHECK-LABEL: @test_vec_rlmi_u128(
2188 // CHECK: call <1 x i128> @llvm.ppc.altivec.vrlqmi(<1 x i128>
2189 // CHECK-NEXT: ret <1 x i128>
2190 return vec_rlmi(vui128a, vui128b, vui128c);