1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3 ; RUN: llc < %s -mtriple=x86_64-apple-darwin -mattr=+avx | FileCheck %s --check-prefix=CHECK --check-prefix=X64
5 define void @and_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
6 ; X32-LABEL: and_masks:
8 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
9 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
10 ; X32-NEXT: movl {{[0-9]+}}(%esp), %edx
11 ; X32-NEXT: vmovups (%edx), %ymm0
12 ; X32-NEXT: vmovups (%ecx), %ymm1
13 ; X32-NEXT: vcmpltps %ymm0, %ymm1, %ymm1
14 ; X32-NEXT: vmovups (%eax), %ymm2
15 ; X32-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
16 ; X32-NEXT: vandps %ymm1, %ymm0, %ymm0
17 ; X32-NEXT: vandps LCPI0_0, %ymm0, %ymm0
18 ; X32-NEXT: vmovaps %ymm0, (%eax)
19 ; X32-NEXT: vzeroupper
22 ; X64-LABEL: and_masks:
24 ; X64-NEXT: vmovups (%rdi), %ymm0
25 ; X64-NEXT: vmovups (%rsi), %ymm1
26 ; X64-NEXT: vcmpltps %ymm0, %ymm1, %ymm1
27 ; X64-NEXT: vmovups (%rdx), %ymm2
28 ; X64-NEXT: vcmpltps %ymm0, %ymm2, %ymm0
29 ; X64-NEXT: vandps %ymm1, %ymm0, %ymm0
30 ; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
31 ; X64-NEXT: vmovaps %ymm0, (%rax)
32 ; X64-NEXT: vzeroupper
34 %v0 = load <8 x float>, <8 x float>* %a, align 16
35 %v1 = load <8 x float>, <8 x float>* %b, align 16
36 %m0 = fcmp olt <8 x float> %v1, %v0
37 %v2 = load <8 x float>, <8 x float>* %c, align 16
38 %m1 = fcmp olt <8 x float> %v2, %v0
39 %mand = and <8 x i1> %m1, %m0
40 %r = zext <8 x i1> %mand to <8 x i32>
41 store <8 x i32> %r, <8 x i32>* undef, align 32
45 define void @neg_masks(<8 x float>* %a, <8 x float>* %b, <8 x float>* %c) nounwind uwtable noinline ssp {
46 ; X32-LABEL: neg_masks:
48 ; X32-NEXT: movl {{[0-9]+}}(%esp), %eax
49 ; X32-NEXT: movl {{[0-9]+}}(%esp), %ecx
50 ; X32-NEXT: vmovups (%ecx), %ymm0
51 ; X32-NEXT: vcmpnltps (%eax), %ymm0, %ymm0
52 ; X32-NEXT: vandps LCPI1_0, %ymm0, %ymm0
53 ; X32-NEXT: vmovaps %ymm0, (%eax)
54 ; X32-NEXT: vzeroupper
57 ; X64-LABEL: neg_masks:
59 ; X64-NEXT: vmovups (%rsi), %ymm0
60 ; X64-NEXT: vcmpnltps (%rdi), %ymm0, %ymm0
61 ; X64-NEXT: vandps {{.*}}(%rip), %ymm0, %ymm0
62 ; X64-NEXT: vmovaps %ymm0, (%rax)
63 ; X64-NEXT: vzeroupper
65 %v0 = load <8 x float>, <8 x float>* %a, align 16
66 %v1 = load <8 x float>, <8 x float>* %b, align 16
67 %m0 = fcmp olt <8 x float> %v1, %v0
68 %mand = xor <8 x i1> %m0, <i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1, i1 1>
69 %r = zext <8 x i1> %mand to <8 x i32>
70 store <8 x i32> %r, <8 x i32>* undef, align 32