1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X86
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vp2intersect -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,X64
5 ; Test with more than four live mask pairs
7 define void @test(<16 x i32> %a0, <16 x i32> %b0, <16 x i32> %a1, <16 x i32> %b1, <16 x i32> %a2, <16 x i32> %b2, <16 x i32> %a3, <16 x i32> %b3, <16 x i32> %a4, <16 x i32> %b4, i16* nocapture %m0, i16* nocapture %m1) nounwind {
9 ; X86: # %bb.0: # %entry
10 ; X86-NEXT: pushl %ebp
11 ; X86-NEXT: movl %esp, %ebp
12 ; X86-NEXT: pushl %edi
13 ; X86-NEXT: pushl %esi
14 ; X86-NEXT: andl $-64, %esp
15 ; X86-NEXT: subl $64, %esp
16 ; X86-NEXT: movl 456(%ebp), %esi
17 ; X86-NEXT: vmovaps 328(%ebp), %zmm3
18 ; X86-NEXT: vmovaps 200(%ebp), %zmm4
19 ; X86-NEXT: vmovaps 72(%ebp), %zmm5
20 ; X86-NEXT: vp2intersectd %zmm1, %zmm0, %k0
21 ; X86-NEXT: kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
22 ; X86-NEXT: kmovw %k1, {{[0-9]+}}(%esp)
23 ; X86-NEXT: vp2intersectd 8(%ebp), %zmm2, %k0
24 ; X86-NEXT: kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
25 ; X86-NEXT: kmovw %k1, {{[0-9]+}}(%esp)
26 ; X86-NEXT: vp2intersectd 136(%ebp), %zmm5, %k0
27 ; X86-NEXT: kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
28 ; X86-NEXT: kmovw %k1, {{[0-9]+}}(%esp)
29 ; X86-NEXT: vp2intersectd 264(%ebp), %zmm4, %k0
30 ; X86-NEXT: kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
31 ; X86-NEXT: kmovw %k1, {{[0-9]+}}(%esp)
32 ; X86-NEXT: vp2intersectd 392(%ebp), %zmm3, %k0
33 ; X86-NEXT: kmovw %k0, {{[-0-9]+}}(%e{{[sb]}}p) # 4-byte Folded Spill
34 ; X86-NEXT: kmovw %k1, {{[0-9]+}}(%esp)
35 ; X86-NEXT: vzeroupper
36 ; X86-NEXT: calll dummy
37 ; X86-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
38 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
39 ; X86-NEXT: kmovw %k0, %eax
40 ; X86-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
41 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
42 ; X86-NEXT: kmovw %k0, %ecx
43 ; X86-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
44 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
45 ; X86-NEXT: kmovw %k0, %edx
46 ; X86-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k0 # 4-byte Folded Reload
47 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k1
48 ; X86-NEXT: kmovw %k0, %edi
49 ; X86-NEXT: addl %edi, %eax
50 ; X86-NEXT: kmovw {{[-0-9]+}}(%e{{[sb]}}p), %k2 # 4-byte Folded Reload
51 ; X86-NEXT: kmovw {{[0-9]+}}(%esp), %k3
52 ; X86-NEXT: kmovw %k2, %edi
53 ; X86-NEXT: addl %ecx, %edx
54 ; X86-NEXT: kmovw %k1, %ecx
55 ; X86-NEXT: addl %edi, %ecx
56 ; X86-NEXT: addl %eax, %ecx
57 ; X86-NEXT: addl %edx, %ecx
58 ; X86-NEXT: movw %cx, (%esi)
59 ; X86-NEXT: leal -8(%ebp), %esp
66 ; X64: # %bb.0: # %entry
67 ; X64-NEXT: pushq %rbp
68 ; X64-NEXT: movq %rsp, %rbp
69 ; X64-NEXT: pushq %r14
70 ; X64-NEXT: pushq %rbx
71 ; X64-NEXT: andq $-64, %rsp
72 ; X64-NEXT: subq $64, %rsp
73 ; X64-NEXT: movq %rdi, %r14
74 ; X64-NEXT: vmovaps 16(%rbp), %zmm8
75 ; X64-NEXT: vp2intersectd %zmm1, %zmm0, %k0
76 ; X64-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
77 ; X64-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
78 ; X64-NEXT: vp2intersectd %zmm3, %zmm2, %k0
79 ; X64-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
80 ; X64-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
81 ; X64-NEXT: vp2intersectd %zmm5, %zmm4, %k0
82 ; X64-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
83 ; X64-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
84 ; X64-NEXT: vp2intersectd %zmm7, %zmm6, %k0
85 ; X64-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
86 ; X64-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
87 ; X64-NEXT: vp2intersectd 80(%rbp), %zmm8, %k0
88 ; X64-NEXT: kmovw %k0, {{[-0-9]+}}(%r{{[sb]}}p) # 4-byte Folded Spill
89 ; X64-NEXT: kmovw %k1, {{[0-9]+}}(%rsp)
90 ; X64-NEXT: vzeroupper
91 ; X64-NEXT: callq dummy
92 ; X64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
93 ; X64-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
94 ; X64-NEXT: kmovw %k0, %eax
95 ; X64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
96 ; X64-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
97 ; X64-NEXT: kmovw %k0, %ecx
98 ; X64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
99 ; X64-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
100 ; X64-NEXT: kmovw %k0, %edx
101 ; X64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
102 ; X64-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
103 ; X64-NEXT: kmovw %k0, %esi
104 ; X64-NEXT: kmovw {{[-0-9]+}}(%r{{[sb]}}p), %k0 # 4-byte Folded Reload
105 ; X64-NEXT: kmovw {{[0-9]+}}(%rsp), %k1
106 ; X64-NEXT: kmovw %k0, %edi
107 ; X64-NEXT: kmovw %k1, %ebx
108 ; X64-NEXT: addl %edi, %eax
109 ; X64-NEXT: addl %ecx, %edx
110 ; X64-NEXT: leal (%rbx,%rsi), %ecx
111 ; X64-NEXT: addl %eax, %ecx
112 ; X64-NEXT: addl %edx, %ecx
113 ; X64-NEXT: movw %cx, (%r14)
114 ; X64-NEXT: leaq -16(%rbp), %rsp
115 ; X64-NEXT: popq %rbx
116 ; X64-NEXT: popq %r14
117 ; X64-NEXT: popq %rbp
120 %0 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a0, <16 x i32> %b0)
121 %1 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a1, <16 x i32> %b1)
122 %2 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a2, <16 x i32> %b2)
123 %3 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a3, <16 x i32> %b3)
124 %4 = call { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32> %a4, <16 x i32> %b4)
126 %5 = extractvalue { <16 x i1>, <16 x i1> } %0, 0
127 %6 = extractvalue { <16 x i1>, <16 x i1> } %1, 0
128 %7 = extractvalue { <16 x i1>, <16 x i1> } %2, 0
129 %8 = extractvalue { <16 x i1>, <16 x i1> } %3, 0
130 %9 = extractvalue { <16 x i1>, <16 x i1> } %4, 0
131 %10 = extractvalue { <16 x i1>, <16 x i1> } %0, 1
132 %11 = extractvalue { <16 x i1>, <16 x i1> } %1, 1
136 %12 = bitcast <16 x i1> %5 to i16
137 %13 = bitcast <16 x i1> %6 to i16
138 %14 = bitcast <16 x i1> %7 to i16
139 %15 = bitcast <16 x i1> %8 to i16
140 %16 = bitcast <16 x i1> %9 to i16
141 %17 = bitcast <16 x i1> %10 to i16
142 %18 = bitcast <16 x i1> %11 to i16
144 %19 = add i16 %12, %13
145 %20 = add i16 %14, %15
146 %21 = add i16 %16, %17
147 %22 = add i16 %19, %21
148 %23 = add i16 %22, %20
150 store i16 %23, i16* %m0, align 16
154 declare { <16 x i1>, <16 x i1> } @llvm.x86.avx512.vp2intersect.d.512(<16 x i32>, <16 x i32>)
155 declare void @dummy()