1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512f | FileCheck %s --check-prefixes=AVX,AVX512,AVX512F
6 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512bw | FileCheck %s --check-prefixes=AVX,AVX512,AVX512BW
8 @a = global [1024 x i8] zeroinitializer, align 16
9 @b = global [1024 x i8] zeroinitializer, align 16
11 define i32 @sad_16i8() nounwind {
12 ; SSE2-LABEL: sad_16i8:
13 ; SSE2: # %bb.0: # %entry
14 ; SSE2-NEXT: pxor %xmm0, %xmm0
15 ; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
16 ; SSE2-NEXT: pxor %xmm1, %xmm1
17 ; SSE2-NEXT: .p2align 4, 0x90
18 ; SSE2-NEXT: .LBB0_1: # %vector.body
19 ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
20 ; SSE2-NEXT: movdqu a+1024(%rax), %xmm2
21 ; SSE2-NEXT: movdqu b+1024(%rax), %xmm3
22 ; SSE2-NEXT: psadbw %xmm2, %xmm3
23 ; SSE2-NEXT: paddd %xmm3, %xmm1
24 ; SSE2-NEXT: addq $4, %rax
25 ; SSE2-NEXT: jne .LBB0_1
26 ; SSE2-NEXT: # %bb.2: # %middle.block
27 ; SSE2-NEXT: paddd %xmm0, %xmm1
28 ; SSE2-NEXT: paddd %xmm0, %xmm0
29 ; SSE2-NEXT: paddd %xmm1, %xmm0
30 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
31 ; SSE2-NEXT: paddd %xmm0, %xmm1
32 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
33 ; SSE2-NEXT: paddd %xmm1, %xmm0
34 ; SSE2-NEXT: movd %xmm0, %eax
37 ; AVX1-LABEL: sad_16i8:
38 ; AVX1: # %bb.0: # %entry
39 ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
40 ; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
41 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
42 ; AVX1-NEXT: .p2align 4, 0x90
43 ; AVX1-NEXT: .LBB0_1: # %vector.body
44 ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
45 ; AVX1-NEXT: vmovdqu a+1024(%rax), %xmm2
46 ; AVX1-NEXT: vpsadbw b+1024(%rax), %xmm2, %xmm2
47 ; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm2
48 ; AVX1-NEXT: vblendps {{.*#+}} ymm1 = ymm2[0,1,2,3],ymm1[4,5,6,7]
49 ; AVX1-NEXT: addq $4, %rax
50 ; AVX1-NEXT: jne .LBB0_1
51 ; AVX1-NEXT: # %bb.2: # %middle.block
52 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm2
53 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
54 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
55 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
56 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
57 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
58 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
59 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
60 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
61 ; AVX1-NEXT: vmovd %xmm0, %eax
62 ; AVX1-NEXT: vzeroupper
65 ; AVX2-LABEL: sad_16i8:
66 ; AVX2: # %bb.0: # %entry
67 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
68 ; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
69 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
70 ; AVX2-NEXT: .p2align 4, 0x90
71 ; AVX2-NEXT: .LBB0_1: # %vector.body
72 ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
73 ; AVX2-NEXT: vmovdqu a+1024(%rax), %xmm2
74 ; AVX2-NEXT: vpsadbw b+1024(%rax), %xmm2, %xmm2
75 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
76 ; AVX2-NEXT: addq $4, %rax
77 ; AVX2-NEXT: jne .LBB0_1
78 ; AVX2-NEXT: # %bb.2: # %middle.block
79 ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
80 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
81 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
82 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
83 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
84 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
85 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
86 ; AVX2-NEXT: vmovd %xmm0, %eax
87 ; AVX2-NEXT: vzeroupper
90 ; AVX512-LABEL: sad_16i8:
91 ; AVX512: # %bb.0: # %entry
92 ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
93 ; AVX512-NEXT: movq $-1024, %rax # imm = 0xFC00
94 ; AVX512-NEXT: .p2align 4, 0x90
95 ; AVX512-NEXT: .LBB0_1: # %vector.body
96 ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
97 ; AVX512-NEXT: vmovdqu a+1024(%rax), %xmm1
98 ; AVX512-NEXT: vpsadbw b+1024(%rax), %xmm1, %xmm1
99 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0
100 ; AVX512-NEXT: addq $4, %rax
101 ; AVX512-NEXT: jne .LBB0_1
102 ; AVX512-NEXT: # %bb.2: # %middle.block
103 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
104 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
105 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
106 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
107 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
108 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
109 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
110 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
111 ; AVX512-NEXT: vmovd %xmm0, %eax
112 ; AVX512-NEXT: vzeroupper
115 br label %vector.body
118 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
119 %vec.phi = phi <16 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
120 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
121 %1 = bitcast i8* %0 to <16 x i8>*
122 %wide.load = load <16 x i8>, <16 x i8>* %1, align 4
123 %2 = zext <16 x i8> %wide.load to <16 x i32>
124 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
125 %4 = bitcast i8* %3 to <16 x i8>*
126 %wide.load1 = load <16 x i8>, <16 x i8>* %4, align 4
127 %5 = zext <16 x i8> %wide.load1 to <16 x i32>
128 %6 = sub nsw <16 x i32> %2, %5
129 %7 = icmp sgt <16 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
130 %8 = sub nsw <16 x i32> zeroinitializer, %6
131 %9 = select <16 x i1> %7, <16 x i32> %6, <16 x i32> %8
132 %10 = add nsw <16 x i32> %9, %vec.phi
133 %index.next = add i64 %index, 4
134 %11 = icmp eq i64 %index.next, 1024
135 br i1 %11, label %middle.block, label %vector.body
138 %.lcssa = phi <16 x i32> [ %10, %vector.body ]
139 %rdx.shuf = shufflevector <16 x i32> %.lcssa, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
140 %bin.rdx = add <16 x i32> %.lcssa, %rdx.shuf
141 %rdx.shuf2 = shufflevector <16 x i32> %bin.rdx, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
142 %bin.rdx2 = add <16 x i32> %bin.rdx, %rdx.shuf2
143 %rdx.shuf3 = shufflevector <16 x i32> %bin.rdx2, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
144 %bin.rdx3 = add <16 x i32> %bin.rdx2, %rdx.shuf3
145 %rdx.shuf4 = shufflevector <16 x i32> %bin.rdx3, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
146 %bin.rdx4 = add <16 x i32> %bin.rdx3, %rdx.shuf4
147 %12 = extractelement <16 x i32> %bin.rdx4, i32 0
151 define i32 @sad_32i8() nounwind {
152 ; SSE2-LABEL: sad_32i8:
153 ; SSE2: # %bb.0: # %entry
154 ; SSE2-NEXT: pxor %xmm12, %xmm12
155 ; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
156 ; SSE2-NEXT: pxor %xmm0, %xmm0
157 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
158 ; SSE2-NEXT: pxor %xmm0, %xmm0
159 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
160 ; SSE2-NEXT: pxor %xmm6, %xmm6
161 ; SSE2-NEXT: pxor %xmm13, %xmm13
162 ; SSE2-NEXT: pxor %xmm0, %xmm0
163 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
164 ; SSE2-NEXT: pxor %xmm15, %xmm15
165 ; SSE2-NEXT: pxor %xmm0, %xmm0
166 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
167 ; SSE2-NEXT: pxor %xmm14, %xmm14
168 ; SSE2-NEXT: .p2align 4, 0x90
169 ; SSE2-NEXT: .LBB1_1: # %vector.body
170 ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
171 ; SSE2-NEXT: movdqa a+1040(%rax), %xmm8
172 ; SSE2-NEXT: movdqa a+1024(%rax), %xmm3
173 ; SSE2-NEXT: movdqa %xmm3, %xmm4
174 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3],xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
175 ; SSE2-NEXT: movdqa %xmm4, %xmm7
176 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm12[0],xmm7[1],xmm12[1],xmm7[2],xmm12[2],xmm7[3],xmm12[3]
177 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm12[4],xmm4[5],xmm12[5],xmm4[6],xmm12[6],xmm4[7],xmm12[7]
178 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm12[8],xmm3[9],xmm12[9],xmm3[10],xmm12[10],xmm3[11],xmm12[11],xmm3[12],xmm12[12],xmm3[13],xmm12[13],xmm3[14],xmm12[14],xmm3[15],xmm12[15]
179 ; SSE2-NEXT: movdqa %xmm3, %xmm1
180 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm12[0],xmm1[1],xmm12[1],xmm1[2],xmm12[2],xmm1[3],xmm12[3]
181 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm12[4],xmm3[5],xmm12[5],xmm3[6],xmm12[6],xmm3[7],xmm12[7]
182 ; SSE2-NEXT: movdqa %xmm8, %xmm0
183 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3],xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
184 ; SSE2-NEXT: movdqa %xmm0, %xmm5
185 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm12[0],xmm5[1],xmm12[1],xmm5[2],xmm12[2],xmm5[3],xmm12[3]
186 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm12[4],xmm0[5],xmm12[5],xmm0[6],xmm12[6],xmm0[7],xmm12[7]
187 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm8 = xmm8[8],xmm12[8],xmm8[9],xmm12[9],xmm8[10],xmm12[10],xmm8[11],xmm12[11],xmm8[12],xmm12[12],xmm8[13],xmm12[13],xmm8[14],xmm12[14],xmm8[15],xmm12[15]
188 ; SSE2-NEXT: movdqa b+1024(%rax), %xmm11
189 ; SSE2-NEXT: movdqa %xmm11, %xmm10
190 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm12[0],xmm10[1],xmm12[1],xmm10[2],xmm12[2],xmm10[3],xmm12[3],xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
191 ; SSE2-NEXT: movdqa %xmm10, %xmm2
192 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
193 ; SSE2-NEXT: psubd %xmm2, %xmm7
194 ; SSE2-NEXT: movdqa b+1040(%rax), %xmm9
195 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm12[4],xmm10[5],xmm12[5],xmm10[6],xmm12[6],xmm10[7],xmm12[7]
196 ; SSE2-NEXT: psubd %xmm10, %xmm4
197 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm12[8],xmm11[9],xmm12[9],xmm11[10],xmm12[10],xmm11[11],xmm12[11],xmm11[12],xmm12[12],xmm11[13],xmm12[13],xmm11[14],xmm12[14],xmm11[15],xmm12[15]
198 ; SSE2-NEXT: movdqa %xmm11, %xmm2
199 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
200 ; SSE2-NEXT: psubd %xmm2, %xmm1
201 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm12[4],xmm11[5],xmm12[5],xmm11[6],xmm12[6],xmm11[7],xmm12[7]
202 ; SSE2-NEXT: psubd %xmm11, %xmm3
203 ; SSE2-NEXT: movdqa %xmm6, %xmm10
204 ; SSE2-NEXT: movdqa %xmm9, %xmm6
205 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3],xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
206 ; SSE2-NEXT: movdqa %xmm6, %xmm2
207 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
208 ; SSE2-NEXT: psubd %xmm2, %xmm5
209 ; SSE2-NEXT: movdqa %xmm8, %xmm2
210 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
211 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm6 = xmm6[4],xmm12[4],xmm6[5],xmm12[5],xmm6[6],xmm12[6],xmm6[7],xmm12[7]
212 ; SSE2-NEXT: psubd %xmm6, %xmm0
213 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm9 = xmm9[8],xmm12[8],xmm9[9],xmm12[9],xmm9[10],xmm12[10],xmm9[11],xmm12[11],xmm9[12],xmm12[12],xmm9[13],xmm12[13],xmm9[14],xmm12[14],xmm9[15],xmm12[15]
214 ; SSE2-NEXT: movdqa %xmm9, %xmm6
215 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm12[0],xmm6[1],xmm12[1],xmm6[2],xmm12[2],xmm6[3],xmm12[3]
216 ; SSE2-NEXT: psubd %xmm6, %xmm2
217 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm12[4],xmm8[5],xmm12[5],xmm8[6],xmm12[6],xmm8[7],xmm12[7]
218 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm12[4],xmm9[5],xmm12[5],xmm9[6],xmm12[6],xmm9[7],xmm12[7]
219 ; SSE2-NEXT: psubd %xmm9, %xmm8
220 ; SSE2-NEXT: movdqa %xmm7, %xmm6
221 ; SSE2-NEXT: psrad $31, %xmm6
222 ; SSE2-NEXT: paddd %xmm6, %xmm7
223 ; SSE2-NEXT: pxor %xmm6, %xmm7
224 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
225 ; SSE2-NEXT: paddd %xmm7, %xmm6
226 ; SSE2-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
227 ; SSE2-NEXT: movdqa %xmm4, %xmm6
228 ; SSE2-NEXT: psrad $31, %xmm6
229 ; SSE2-NEXT: paddd %xmm6, %xmm4
230 ; SSE2-NEXT: pxor %xmm6, %xmm4
231 ; SSE2-NEXT: movdqa %xmm10, %xmm6
232 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
233 ; SSE2-NEXT: paddd %xmm4, %xmm7
234 ; SSE2-NEXT: movdqa %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
235 ; SSE2-NEXT: movdqa %xmm1, %xmm4
236 ; SSE2-NEXT: psrad $31, %xmm4
237 ; SSE2-NEXT: paddd %xmm4, %xmm1
238 ; SSE2-NEXT: pxor %xmm4, %xmm1
239 ; SSE2-NEXT: paddd %xmm1, %xmm6
240 ; SSE2-NEXT: movdqa %xmm3, %xmm1
241 ; SSE2-NEXT: psrad $31, %xmm1
242 ; SSE2-NEXT: paddd %xmm1, %xmm3
243 ; SSE2-NEXT: pxor %xmm1, %xmm3
244 ; SSE2-NEXT: paddd %xmm3, %xmm13
245 ; SSE2-NEXT: movdqa %xmm5, %xmm1
246 ; SSE2-NEXT: psrad $31, %xmm1
247 ; SSE2-NEXT: paddd %xmm1, %xmm5
248 ; SSE2-NEXT: pxor %xmm1, %xmm5
249 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
250 ; SSE2-NEXT: paddd %xmm5, %xmm1
251 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
252 ; SSE2-NEXT: movdqa %xmm0, %xmm1
253 ; SSE2-NEXT: psrad $31, %xmm1
254 ; SSE2-NEXT: paddd %xmm1, %xmm0
255 ; SSE2-NEXT: pxor %xmm1, %xmm0
256 ; SSE2-NEXT: paddd %xmm0, %xmm15
257 ; SSE2-NEXT: movdqa %xmm2, %xmm0
258 ; SSE2-NEXT: psrad $31, %xmm0
259 ; SSE2-NEXT: paddd %xmm0, %xmm2
260 ; SSE2-NEXT: pxor %xmm0, %xmm2
261 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
262 ; SSE2-NEXT: paddd %xmm2, %xmm0
263 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
264 ; SSE2-NEXT: movdqa %xmm8, %xmm0
265 ; SSE2-NEXT: psrad $31, %xmm0
266 ; SSE2-NEXT: paddd %xmm0, %xmm8
267 ; SSE2-NEXT: pxor %xmm0, %xmm8
268 ; SSE2-NEXT: paddd %xmm8, %xmm14
269 ; SSE2-NEXT: addq $4, %rax
270 ; SSE2-NEXT: jne .LBB1_1
271 ; SSE2-NEXT: # %bb.2: # %middle.block
272 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
273 ; SSE2-NEXT: paddd %xmm15, %xmm0
274 ; SSE2-NEXT: paddd %xmm14, %xmm13
275 ; SSE2-NEXT: paddd %xmm0, %xmm13
276 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
277 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
278 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
279 ; SSE2-NEXT: paddd %xmm13, %xmm6
280 ; SSE2-NEXT: paddd %xmm0, %xmm6
281 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm6[2,3,0,1]
282 ; SSE2-NEXT: paddd %xmm6, %xmm0
283 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
284 ; SSE2-NEXT: paddd %xmm0, %xmm1
285 ; SSE2-NEXT: movd %xmm1, %eax
288 ; AVX1-LABEL: sad_32i8:
289 ; AVX1: # %bb.0: # %entry
290 ; AVX1-NEXT: vpxor %xmm0, %xmm0, %xmm0
291 ; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
292 ; AVX1-NEXT: vpxor %xmm1, %xmm1, %xmm1
293 ; AVX1-NEXT: .p2align 4, 0x90
294 ; AVX1-NEXT: .LBB1_1: # %vector.body
295 ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
296 ; AVX1-NEXT: vmovdqa a+1040(%rax), %xmm2
297 ; AVX1-NEXT: vpsadbw b+1040(%rax), %xmm2, %xmm2
298 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
299 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
300 ; AVX1-NEXT: vmovdqa a+1024(%rax), %xmm3
301 ; AVX1-NEXT: vpsadbw b+1024(%rax), %xmm3, %xmm3
302 ; AVX1-NEXT: vpaddd %xmm1, %xmm3, %xmm1
303 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
304 ; AVX1-NEXT: addq $4, %rax
305 ; AVX1-NEXT: jne .LBB1_1
306 ; AVX1-NEXT: # %bb.2: # %middle.block
307 ; AVX1-NEXT: vpaddd %xmm0, %xmm0, %xmm2
308 ; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm3
309 ; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
310 ; AVX1-NEXT: vpaddd %xmm4, %xmm4, %xmm5
311 ; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
312 ; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
313 ; AVX1-NEXT: vpaddd %xmm2, %xmm0, %xmm0
314 ; AVX1-NEXT: vpaddd %xmm3, %xmm0, %xmm0
315 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
316 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
317 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
318 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
319 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
320 ; AVX1-NEXT: vmovd %xmm0, %eax
321 ; AVX1-NEXT: vzeroupper
324 ; AVX2-LABEL: sad_32i8:
325 ; AVX2: # %bb.0: # %entry
326 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
327 ; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
328 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
329 ; AVX2-NEXT: .p2align 4, 0x90
330 ; AVX2-NEXT: .LBB1_1: # %vector.body
331 ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
332 ; AVX2-NEXT: vmovdqa a+1024(%rax), %ymm2
333 ; AVX2-NEXT: vpsadbw b+1024(%rax), %ymm2, %ymm2
334 ; AVX2-NEXT: vpaddd %ymm1, %ymm2, %ymm1
335 ; AVX2-NEXT: addq $4, %rax
336 ; AVX2-NEXT: jne .LBB1_1
337 ; AVX2-NEXT: # %bb.2: # %middle.block
338 ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm1
339 ; AVX2-NEXT: vpaddd %ymm0, %ymm0, %ymm0
340 ; AVX2-NEXT: vpaddd %ymm0, %ymm1, %ymm0
341 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
342 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
343 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
344 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
345 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
346 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
347 ; AVX2-NEXT: vmovd %xmm0, %eax
348 ; AVX2-NEXT: vzeroupper
351 ; AVX512-LABEL: sad_32i8:
352 ; AVX512: # %bb.0: # %entry
353 ; AVX512-NEXT: vpxor %xmm0, %xmm0, %xmm0
354 ; AVX512-NEXT: movq $-1024, %rax # imm = 0xFC00
355 ; AVX512-NEXT: vpxor %xmm1, %xmm1, %xmm1
356 ; AVX512-NEXT: .p2align 4, 0x90
357 ; AVX512-NEXT: .LBB1_1: # %vector.body
358 ; AVX512-NEXT: # =>This Inner Loop Header: Depth=1
359 ; AVX512-NEXT: vmovdqa a+1024(%rax), %ymm2
360 ; AVX512-NEXT: vpsadbw b+1024(%rax), %ymm2, %ymm2
361 ; AVX512-NEXT: vpaddd %zmm1, %zmm2, %zmm1
362 ; AVX512-NEXT: addq $4, %rax
363 ; AVX512-NEXT: jne .LBB1_1
364 ; AVX512-NEXT: # %bb.2: # %middle.block
365 ; AVX512-NEXT: vpaddd %zmm0, %zmm1, %zmm0
366 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
367 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
368 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
369 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
370 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
371 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
372 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
373 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
374 ; AVX512-NEXT: vmovd %xmm0, %eax
375 ; AVX512-NEXT: vzeroupper
378 br label %vector.body
381 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
382 %vec.phi = phi <32 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
383 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
384 %1 = bitcast i8* %0 to <32 x i8>*
385 %wide.load = load <32 x i8>, <32 x i8>* %1, align 32
386 %2 = zext <32 x i8> %wide.load to <32 x i32>
387 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
388 %4 = bitcast i8* %3 to <32 x i8>*
389 %wide.load1 = load <32 x i8>, <32 x i8>* %4, align 32
390 %5 = zext <32 x i8> %wide.load1 to <32 x i32>
391 %6 = sub nsw <32 x i32> %2, %5
392 %7 = icmp sgt <32 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
393 %8 = sub nsw <32 x i32> zeroinitializer, %6
394 %9 = select <32 x i1> %7, <32 x i32> %6, <32 x i32> %8
395 %10 = add nsw <32 x i32> %9, %vec.phi
396 %index.next = add i64 %index, 4
397 %11 = icmp eq i64 %index.next, 1024
398 br i1 %11, label %middle.block, label %vector.body
401 %.lcssa = phi <32 x i32> [ %10, %vector.body ]
402 %rdx.shuf = shufflevector <32 x i32> %.lcssa, <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
403 %bin.rdx = add <32 x i32> %.lcssa, %rdx.shuf
404 %rdx.shuf2 = shufflevector <32 x i32> %bin.rdx, <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
405 %bin.rdx2 = add <32 x i32> %bin.rdx, %rdx.shuf2
406 %rdx.shuf3 = shufflevector <32 x i32> %bin.rdx2, <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
407 %bin.rdx3 = add <32 x i32> %bin.rdx2, %rdx.shuf3
408 %rdx.shuf4 = shufflevector <32 x i32> %bin.rdx3, <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
409 %bin.rdx4 = add <32 x i32> %bin.rdx3, %rdx.shuf4
410 %rdx.shuf5 = shufflevector <32 x i32> %bin.rdx4, <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
411 %bin.rdx5 = add <32 x i32> %bin.rdx4, %rdx.shuf5
412 %12 = extractelement <32 x i32> %bin.rdx5, i32 0
416 define i32 @sad_avx64i8() nounwind {
417 ; SSE2-LABEL: sad_avx64i8:
418 ; SSE2: # %bb.0: # %entry
419 ; SSE2-NEXT: subq $200, %rsp
420 ; SSE2-NEXT: pxor %xmm14, %xmm14
421 ; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
422 ; SSE2-NEXT: pxor %xmm0, %xmm0
423 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
424 ; SSE2-NEXT: pxor %xmm0, %xmm0
425 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
426 ; SSE2-NEXT: pxor %xmm0, %xmm0
427 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
428 ; SSE2-NEXT: pxor %xmm0, %xmm0
429 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
430 ; SSE2-NEXT: pxor %xmm0, %xmm0
431 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
432 ; SSE2-NEXT: pxor %xmm0, %xmm0
433 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
434 ; SSE2-NEXT: pxor %xmm0, %xmm0
435 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
436 ; SSE2-NEXT: pxor %xmm0, %xmm0
437 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
438 ; SSE2-NEXT: pxor %xmm0, %xmm0
439 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
440 ; SSE2-NEXT: pxor %xmm0, %xmm0
441 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
442 ; SSE2-NEXT: pxor %xmm0, %xmm0
443 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
444 ; SSE2-NEXT: pxor %xmm0, %xmm0
445 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
446 ; SSE2-NEXT: pxor %xmm0, %xmm0
447 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
448 ; SSE2-NEXT: pxor %xmm0, %xmm0
449 ; SSE2-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
450 ; SSE2-NEXT: pxor %xmm0, %xmm0
451 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
452 ; SSE2-NEXT: pxor %xmm0, %xmm0
453 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
454 ; SSE2-NEXT: .p2align 4, 0x90
455 ; SSE2-NEXT: .LBB2_1: # %vector.body
456 ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
457 ; SSE2-NEXT: movaps a+1040(%rax), %xmm0
458 ; SSE2-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
459 ; SSE2-NEXT: movdqa a+1024(%rax), %xmm12
460 ; SSE2-NEXT: movdqa a+1056(%rax), %xmm15
461 ; SSE2-NEXT: movdqa a+1072(%rax), %xmm4
462 ; SSE2-NEXT: movdqa %xmm4, %xmm6
463 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm6 = xmm6[8],xmm14[8],xmm6[9],xmm14[9],xmm6[10],xmm14[10],xmm6[11],xmm14[11],xmm6[12],xmm14[12],xmm6[13],xmm14[13],xmm6[14],xmm14[14],xmm6[15],xmm14[15]
464 ; SSE2-NEXT: movdqa %xmm6, %xmm1
465 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm14[4],xmm1[5],xmm14[5],xmm1[6],xmm14[6],xmm1[7],xmm14[7]
466 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm14[0],xmm6[1],xmm14[1],xmm6[2],xmm14[2],xmm6[3],xmm14[3]
467 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3],xmm4[4],xmm14[4],xmm4[5],xmm14[5],xmm4[6],xmm14[6],xmm4[7],xmm14[7]
468 ; SSE2-NEXT: movdqa %xmm4, %xmm5
469 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm14[4],xmm5[5],xmm14[5],xmm5[6],xmm14[6],xmm5[7],xmm14[7]
470 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm14[0],xmm4[1],xmm14[1],xmm4[2],xmm14[2],xmm4[3],xmm14[3]
471 ; SSE2-NEXT: movdqa %xmm15, %xmm11
472 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm11 = xmm11[8],xmm14[8],xmm11[9],xmm14[9],xmm11[10],xmm14[10],xmm11[11],xmm14[11],xmm11[12],xmm14[12],xmm11[13],xmm14[13],xmm11[14],xmm14[14],xmm11[15],xmm14[15]
473 ; SSE2-NEXT: movdqa %xmm11, %xmm8
474 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm8 = xmm8[4],xmm14[4],xmm8[5],xmm14[5],xmm8[6],xmm14[6],xmm8[7],xmm14[7]
475 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm11 = xmm11[0],xmm14[0],xmm11[1],xmm14[1],xmm11[2],xmm14[2],xmm11[3],xmm14[3]
476 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3],xmm15[4],xmm14[4],xmm15[5],xmm14[5],xmm15[6],xmm14[6],xmm15[7],xmm14[7]
477 ; SSE2-NEXT: movdqa %xmm15, %xmm0
478 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
479 ; SSE2-NEXT: movdqa %xmm0, %xmm2
480 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm14[0],xmm15[1],xmm14[1],xmm15[2],xmm14[2],xmm15[3],xmm14[3]
481 ; SSE2-NEXT: movdqa %xmm12, %xmm10
482 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm14[0],xmm10[1],xmm14[1],xmm10[2],xmm14[2],xmm10[3],xmm14[3],xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
483 ; SSE2-NEXT: movdqa %xmm10, %xmm0
484 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
485 ; SSE2-NEXT: movdqa %xmm0, %xmm9
486 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm14[4],xmm10[5],xmm14[5],xmm10[6],xmm14[6],xmm10[7],xmm14[7]
487 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm14[8],xmm12[9],xmm14[9],xmm12[10],xmm14[10],xmm12[11],xmm14[11],xmm12[12],xmm14[12],xmm12[13],xmm14[13],xmm12[14],xmm14[14],xmm12[15],xmm14[15]
488 ; SSE2-NEXT: movdqa %xmm12, %xmm0
489 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
490 ; SSE2-NEXT: movdqa %xmm0, %xmm13
491 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm12 = xmm12[4],xmm14[4],xmm12[5],xmm14[5],xmm12[6],xmm14[6],xmm12[7],xmm14[7]
492 ; SSE2-NEXT: movdqa b+1072(%rax), %xmm3
493 ; SSE2-NEXT: movdqa %xmm3, %xmm7
494 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
495 ; SSE2-NEXT: movdqa %xmm7, %xmm0
496 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
497 ; SSE2-NEXT: psubd %xmm0, %xmm1
498 ; SSE2-NEXT: movdqa b+1056(%rax), %xmm0
499 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
500 ; SSE2-NEXT: psubd %xmm7, %xmm6
501 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
502 ; SSE2-NEXT: movdqa %xmm3, %xmm7
503 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
504 ; SSE2-NEXT: psubd %xmm7, %xmm5
505 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
506 ; SSE2-NEXT: psubd %xmm3, %xmm4
507 ; SSE2-NEXT: movdqa %xmm0, %xmm3
508 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm14[8],xmm3[9],xmm14[9],xmm3[10],xmm14[10],xmm3[11],xmm14[11],xmm3[12],xmm14[12],xmm3[13],xmm14[13],xmm3[14],xmm14[14],xmm3[15],xmm14[15]
509 ; SSE2-NEXT: movdqa %xmm3, %xmm7
510 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
511 ; SSE2-NEXT: psubd %xmm7, %xmm8
512 ; SSE2-NEXT: movdqa b+1024(%rax), %xmm7
513 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
514 ; SSE2-NEXT: psubd %xmm3, %xmm11
515 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
516 ; SSE2-NEXT: movdqa %xmm0, %xmm3
517 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
518 ; SSE2-NEXT: psubd %xmm3, %xmm2
519 ; SSE2-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
520 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
521 ; SSE2-NEXT: psubd %xmm0, %xmm15
522 ; SSE2-NEXT: movdqa %xmm7, %xmm0
523 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3],xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
524 ; SSE2-NEXT: movdqa %xmm0, %xmm3
525 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
526 ; SSE2-NEXT: psubd %xmm3, %xmm9
527 ; SSE2-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
528 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
529 ; SSE2-NEXT: movdqa %xmm2, %xmm9
530 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm14[0],xmm9[1],xmm14[1],xmm9[2],xmm14[2],xmm9[3],xmm14[3],xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
531 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm0 = xmm0[4],xmm14[4],xmm0[5],xmm14[5],xmm0[6],xmm14[6],xmm0[7],xmm14[7]
532 ; SSE2-NEXT: psubd %xmm0, %xmm10
533 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm14[8],xmm7[9],xmm14[9],xmm7[10],xmm14[10],xmm7[11],xmm14[11],xmm7[12],xmm14[12],xmm7[13],xmm14[13],xmm7[14],xmm14[14],xmm7[15],xmm14[15]
534 ; SSE2-NEXT: movdqa %xmm7, %xmm0
535 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
536 ; SSE2-NEXT: psubd %xmm0, %xmm13
537 ; SSE2-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
538 ; SSE2-NEXT: movdqa %xmm9, %xmm0
539 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm14[0],xmm0[1],xmm14[1],xmm0[2],xmm14[2],xmm0[3],xmm14[3]
540 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm7 = xmm7[4],xmm14[4],xmm7[5],xmm14[5],xmm7[6],xmm14[6],xmm7[7],xmm14[7]
541 ; SSE2-NEXT: psubd %xmm7, %xmm12
542 ; SSE2-NEXT: movdqa b+1040(%rax), %xmm13
543 ; SSE2-NEXT: movdqa %xmm13, %xmm3
544 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3],xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
545 ; SSE2-NEXT: movdqa %xmm3, %xmm7
546 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
547 ; SSE2-NEXT: psubd %xmm7, %xmm0
548 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm9 = xmm9[4],xmm14[4],xmm9[5],xmm14[5],xmm9[6],xmm14[6],xmm9[7],xmm14[7]
549 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm3 = xmm3[4],xmm14[4],xmm3[5],xmm14[5],xmm3[6],xmm14[6],xmm3[7],xmm14[7]
550 ; SSE2-NEXT: psubd %xmm3, %xmm9
551 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm14[8],xmm2[9],xmm14[9],xmm2[10],xmm14[10],xmm2[11],xmm14[11],xmm2[12],xmm14[12],xmm2[13],xmm14[13],xmm2[14],xmm14[14],xmm2[15],xmm14[15]
552 ; SSE2-NEXT: movdqa %xmm2, %xmm7
553 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm14[0],xmm7[1],xmm14[1],xmm7[2],xmm14[2],xmm7[3],xmm14[3]
554 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm13 = xmm13[8],xmm14[8],xmm13[9],xmm14[9],xmm13[10],xmm14[10],xmm13[11],xmm14[11],xmm13[12],xmm14[12],xmm13[13],xmm14[13],xmm13[14],xmm14[14],xmm13[15],xmm14[15]
555 ; SSE2-NEXT: movdqa %xmm13, %xmm3
556 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm14[0],xmm3[1],xmm14[1],xmm3[2],xmm14[2],xmm3[3],xmm14[3]
557 ; SSE2-NEXT: psubd %xmm3, %xmm7
558 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm14[4],xmm2[5],xmm14[5],xmm2[6],xmm14[6],xmm2[7],xmm14[7]
559 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm14[4],xmm13[5],xmm14[5],xmm13[6],xmm14[6],xmm13[7],xmm14[7]
560 ; SSE2-NEXT: psubd %xmm13, %xmm2
561 ; SSE2-NEXT: movdqa %xmm2, %xmm13
562 ; SSE2-NEXT: movdqa %xmm1, %xmm3
563 ; SSE2-NEXT: psrad $31, %xmm3
564 ; SSE2-NEXT: paddd %xmm3, %xmm1
565 ; SSE2-NEXT: pxor %xmm3, %xmm1
566 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
567 ; SSE2-NEXT: paddd %xmm1, %xmm3
568 ; SSE2-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
569 ; SSE2-NEXT: movdqa %xmm6, %xmm1
570 ; SSE2-NEXT: psrad $31, %xmm1
571 ; SSE2-NEXT: paddd %xmm1, %xmm6
572 ; SSE2-NEXT: pxor %xmm1, %xmm6
573 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
574 ; SSE2-NEXT: paddd %xmm6, %xmm1
575 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
576 ; SSE2-NEXT: movdqa %xmm5, %xmm1
577 ; SSE2-NEXT: psrad $31, %xmm1
578 ; SSE2-NEXT: paddd %xmm1, %xmm5
579 ; SSE2-NEXT: pxor %xmm1, %xmm5
580 ; SSE2-NEXT: movdqa (%rsp), %xmm1 # 16-byte Reload
581 ; SSE2-NEXT: paddd %xmm5, %xmm1
582 ; SSE2-NEXT: movdqa %xmm1, (%rsp) # 16-byte Spill
583 ; SSE2-NEXT: movdqa %xmm4, %xmm1
584 ; SSE2-NEXT: psrad $31, %xmm1
585 ; SSE2-NEXT: paddd %xmm1, %xmm4
586 ; SSE2-NEXT: pxor %xmm1, %xmm4
587 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
588 ; SSE2-NEXT: paddd %xmm4, %xmm1
589 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
590 ; SSE2-NEXT: movdqa %xmm8, %xmm1
591 ; SSE2-NEXT: psrad $31, %xmm1
592 ; SSE2-NEXT: paddd %xmm1, %xmm8
593 ; SSE2-NEXT: pxor %xmm1, %xmm8
594 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
595 ; SSE2-NEXT: paddd %xmm8, %xmm1
596 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
597 ; SSE2-NEXT: movdqa %xmm11, %xmm1
598 ; SSE2-NEXT: psrad $31, %xmm1
599 ; SSE2-NEXT: paddd %xmm1, %xmm11
600 ; SSE2-NEXT: pxor %xmm1, %xmm11
601 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
602 ; SSE2-NEXT: paddd %xmm11, %xmm1
603 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
604 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
605 ; SSE2-NEXT: movdqa %xmm2, %xmm1
606 ; SSE2-NEXT: psrad $31, %xmm1
607 ; SSE2-NEXT: paddd %xmm1, %xmm2
608 ; SSE2-NEXT: pxor %xmm1, %xmm2
609 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
610 ; SSE2-NEXT: paddd %xmm2, %xmm1
611 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
612 ; SSE2-NEXT: movdqa %xmm15, %xmm1
613 ; SSE2-NEXT: psrad $31, %xmm1
614 ; SSE2-NEXT: paddd %xmm1, %xmm15
615 ; SSE2-NEXT: pxor %xmm1, %xmm15
616 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
617 ; SSE2-NEXT: paddd %xmm15, %xmm1
618 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
619 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
620 ; SSE2-NEXT: movdqa %xmm2, %xmm1
621 ; SSE2-NEXT: psrad $31, %xmm1
622 ; SSE2-NEXT: paddd %xmm1, %xmm2
623 ; SSE2-NEXT: pxor %xmm1, %xmm2
624 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
625 ; SSE2-NEXT: paddd %xmm2, %xmm1
626 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
627 ; SSE2-NEXT: movdqa %xmm10, %xmm1
628 ; SSE2-NEXT: psrad $31, %xmm1
629 ; SSE2-NEXT: paddd %xmm1, %xmm10
630 ; SSE2-NEXT: pxor %xmm1, %xmm10
631 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
632 ; SSE2-NEXT: paddd %xmm10, %xmm1
633 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
634 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
635 ; SSE2-NEXT: movdqa %xmm2, %xmm1
636 ; SSE2-NEXT: psrad $31, %xmm1
637 ; SSE2-NEXT: paddd %xmm1, %xmm2
638 ; SSE2-NEXT: pxor %xmm1, %xmm2
639 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
640 ; SSE2-NEXT: paddd %xmm2, %xmm1
641 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
642 ; SSE2-NEXT: movdqa %xmm12, %xmm1
643 ; SSE2-NEXT: psrad $31, %xmm1
644 ; SSE2-NEXT: paddd %xmm1, %xmm12
645 ; SSE2-NEXT: pxor %xmm1, %xmm12
646 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
647 ; SSE2-NEXT: paddd %xmm12, %xmm1
648 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
649 ; SSE2-NEXT: movdqa %xmm0, %xmm1
650 ; SSE2-NEXT: psrad $31, %xmm1
651 ; SSE2-NEXT: paddd %xmm1, %xmm0
652 ; SSE2-NEXT: pxor %xmm1, %xmm0
653 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
654 ; SSE2-NEXT: paddd %xmm0, %xmm1
655 ; SSE2-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
656 ; SSE2-NEXT: movdqa %xmm9, %xmm0
657 ; SSE2-NEXT: psrad $31, %xmm0
658 ; SSE2-NEXT: paddd %xmm0, %xmm9
659 ; SSE2-NEXT: pxor %xmm0, %xmm9
660 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
661 ; SSE2-NEXT: paddd %xmm9, %xmm0
662 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
663 ; SSE2-NEXT: movdqa %xmm7, %xmm0
664 ; SSE2-NEXT: psrad $31, %xmm0
665 ; SSE2-NEXT: paddd %xmm0, %xmm7
666 ; SSE2-NEXT: pxor %xmm0, %xmm7
667 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
668 ; SSE2-NEXT: paddd %xmm7, %xmm0
669 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
670 ; SSE2-NEXT: movdqa %xmm13, %xmm1
671 ; SSE2-NEXT: movdqa %xmm13, %xmm0
672 ; SSE2-NEXT: psrad $31, %xmm0
673 ; SSE2-NEXT: paddd %xmm0, %xmm1
674 ; SSE2-NEXT: pxor %xmm0, %xmm1
675 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
676 ; SSE2-NEXT: paddd %xmm1, %xmm0
677 ; SSE2-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
678 ; SSE2-NEXT: addq $4, %rax
679 ; SSE2-NEXT: jne .LBB2_1
680 ; SSE2-NEXT: # %bb.2: # %middle.block
681 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
682 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
683 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
684 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
685 ; SSE2-NEXT: paddd %xmm0, %xmm1
686 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
687 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
688 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
689 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
690 ; SSE2-NEXT: paddd %xmm1, %xmm3
691 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
692 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
693 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
694 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
695 ; SSE2-NEXT: paddd %xmm1, %xmm4
696 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
697 ; SSE2-NEXT: paddd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
698 ; SSE2-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
699 ; SSE2-NEXT: paddd (%rsp), %xmm1 # 16-byte Folded Reload
700 ; SSE2-NEXT: paddd %xmm4, %xmm1
701 ; SSE2-NEXT: paddd %xmm2, %xmm1
702 ; SSE2-NEXT: paddd %xmm3, %xmm1
703 ; SSE2-NEXT: paddd %xmm0, %xmm1
704 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
705 ; SSE2-NEXT: paddd %xmm1, %xmm0
706 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
707 ; SSE2-NEXT: paddd %xmm0, %xmm1
708 ; SSE2-NEXT: movd %xmm1, %eax
709 ; SSE2-NEXT: addq $200, %rsp
712 ; AVX1-LABEL: sad_avx64i8:
713 ; AVX1: # %bb.0: # %entry
714 ; AVX1-NEXT: subq $24, %rsp
715 ; AVX1-NEXT: vpxor %xmm14, %xmm14, %xmm14
716 ; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
717 ; AVX1-NEXT: vpxor %xmm15, %xmm15, %xmm15
718 ; AVX1-NEXT: vpxor %xmm7, %xmm7, %xmm7
719 ; AVX1-NEXT: vpxor %xmm13, %xmm13, %xmm13
720 ; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
721 ; AVX1-NEXT: vpxor %xmm9, %xmm9, %xmm9
722 ; AVX1-NEXT: vpxor %xmm10, %xmm10, %xmm10
723 ; AVX1-NEXT: vpxor %xmm12, %xmm12, %xmm12
724 ; AVX1-NEXT: .p2align 4, 0x90
725 ; AVX1-NEXT: .LBB2_1: # %vector.body
726 ; AVX1-NEXT: # =>This Inner Loop Header: Depth=1
727 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
728 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
729 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
730 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm3 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
731 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
732 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
733 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
734 ; AVX1-NEXT: vmovdqa %ymm7, %ymm11
735 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
736 ; AVX1-NEXT: vpsubd %xmm7, %xmm0, %xmm0
737 ; AVX1-NEXT: vmovdqa %xmm0, (%rsp) # 16-byte Spill
738 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
739 ; AVX1-NEXT: vpsubd %xmm0, %xmm1, %xmm0
740 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
741 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
742 ; AVX1-NEXT: vpsubd %xmm0, %xmm2, %xmm0
743 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
744 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
745 ; AVX1-NEXT: vpsubd %xmm0, %xmm3, %xmm0
746 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
747 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
748 ; AVX1-NEXT: vpsubd %xmm0, %xmm4, %xmm0
749 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
750 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
751 ; AVX1-NEXT: vpsubd %xmm0, %xmm5, %xmm0
752 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
753 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
754 ; AVX1-NEXT: vpsubd %xmm0, %xmm6, %xmm0
755 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
756 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
757 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
758 ; AVX1-NEXT: vpsubd %xmm4, %xmm0, %xmm0
759 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
760 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm0 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
761 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
762 ; AVX1-NEXT: vpsubd %xmm5, %xmm0, %xmm0
763 ; AVX1-NEXT: vmovdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
764 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
765 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
766 ; AVX1-NEXT: vpsubd %xmm6, %xmm5, %xmm4
767 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
768 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
769 ; AVX1-NEXT: vpsubd %xmm6, %xmm5, %xmm3
770 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
771 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
772 ; AVX1-NEXT: vpsubd %xmm6, %xmm5, %xmm0
773 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
774 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
775 ; AVX1-NEXT: vpsubd %xmm6, %xmm5, %xmm5
776 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
777 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
778 ; AVX1-NEXT: vpsubd %xmm7, %xmm6, %xmm6
779 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
780 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm1 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
781 ; AVX1-NEXT: vpsubd %xmm1, %xmm7, %xmm1
782 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
783 ; AVX1-NEXT: vpmovzxbd {{.*#+}} xmm2 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero
784 ; AVX1-NEXT: vpsubd %xmm2, %xmm7, %xmm2
785 ; AVX1-NEXT: vpabsd %xmm2, %xmm2
786 ; AVX1-NEXT: vextractf128 $1, %ymm11, %xmm7
787 ; AVX1-NEXT: vpaddd %xmm7, %xmm2, %xmm2
788 ; AVX1-NEXT: vpabsd %xmm1, %xmm1
789 ; AVX1-NEXT: vpaddd %xmm11, %xmm1, %xmm1
790 ; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm7
791 ; AVX1-NEXT: vpabsd %xmm6, %xmm1
792 ; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm2
793 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
794 ; AVX1-NEXT: vpabsd %xmm5, %xmm2
795 ; AVX1-NEXT: vpaddd %xmm15, %xmm2, %xmm2
796 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm15
797 ; AVX1-NEXT: vpabsd %xmm0, %xmm1
798 ; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm2
799 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
800 ; AVX1-NEXT: vpabsd %xmm3, %xmm2
801 ; AVX1-NEXT: vpaddd %xmm14, %xmm2, %xmm2
802 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm2, %ymm14
803 ; AVX1-NEXT: vpabsd %xmm4, %xmm1
804 ; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm2
805 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
806 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
807 ; AVX1-NEXT: vpaddd %xmm13, %xmm0, %xmm0
808 ; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm13
809 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
810 ; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm1
811 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
812 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
813 ; AVX1-NEXT: vpaddd %xmm8, %xmm1, %xmm1
814 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
815 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm8
816 ; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm0
817 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
818 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
819 ; AVX1-NEXT: vpaddd %xmm9, %xmm1, %xmm1
820 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
821 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm9
822 ; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm0
823 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
824 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
825 ; AVX1-NEXT: vpaddd %xmm10, %xmm1, %xmm1
826 ; AVX1-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
827 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm10
828 ; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm0
829 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
830 ; AVX1-NEXT: vpabsd (%rsp), %xmm1 # 16-byte Folded Reload
831 ; AVX1-NEXT: vpaddd %xmm12, %xmm1, %xmm1
832 ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm1, %ymm12
833 ; AVX1-NEXT: addq $4, %rax
834 ; AVX1-NEXT: jne .LBB2_1
835 ; AVX1-NEXT: # %bb.2: # %middle.block
836 ; AVX1-NEXT: vextractf128 $1, %ymm15, %xmm0
837 ; AVX1-NEXT: vextractf128 $1, %ymm9, %xmm1
838 ; AVX1-NEXT: vextractf128 $1, %ymm13, %xmm2
839 ; AVX1-NEXT: vextractf128 $1, %ymm12, %xmm3
840 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
841 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
842 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
843 ; AVX1-NEXT: vextractf128 $1, %ymm14, %xmm1
844 ; AVX1-NEXT: vextractf128 $1, %ymm8, %xmm2
845 ; AVX1-NEXT: vextractf128 $1, %ymm7, %xmm3
846 ; AVX1-NEXT: vextractf128 $1, %ymm10, %xmm4
847 ; AVX1-NEXT: vpaddd %xmm4, %xmm3, %xmm3
848 ; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
849 ; AVX1-NEXT: vpaddd %xmm0, %xmm2, %xmm0
850 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
851 ; AVX1-NEXT: vpaddd %xmm12, %xmm13, %xmm1
852 ; AVX1-NEXT: vpaddd %xmm10, %xmm7, %xmm2
853 ; AVX1-NEXT: vpaddd %xmm2, %xmm8, %xmm2
854 ; AVX1-NEXT: vpaddd %xmm1, %xmm9, %xmm1
855 ; AVX1-NEXT: vpaddd %xmm1, %xmm15, %xmm1
856 ; AVX1-NEXT: vpaddd %xmm1, %xmm2, %xmm1
857 ; AVX1-NEXT: vpaddd %xmm0, %xmm1, %xmm0
858 ; AVX1-NEXT: vpaddd %xmm0, %xmm14, %xmm0
859 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
860 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
861 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
862 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
863 ; AVX1-NEXT: vmovd %xmm0, %eax
864 ; AVX1-NEXT: addq $24, %rsp
865 ; AVX1-NEXT: vzeroupper
868 ; AVX2-LABEL: sad_avx64i8:
869 ; AVX2: # %bb.0: # %entry
870 ; AVX2-NEXT: vpxor %xmm0, %xmm0, %xmm0
871 ; AVX2-NEXT: movq $-1024, %rax # imm = 0xFC00
872 ; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
873 ; AVX2-NEXT: vpxor %xmm1, %xmm1, %xmm1
874 ; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
875 ; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
876 ; AVX2-NEXT: vpxor %xmm6, %xmm6, %xmm6
877 ; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
878 ; AVX2-NEXT: vpxor %xmm7, %xmm7, %xmm7
879 ; AVX2-NEXT: .p2align 4, 0x90
880 ; AVX2-NEXT: .LBB2_1: # %vector.body
881 ; AVX2-NEXT: # =>This Inner Loop Header: Depth=1
882 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
883 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm9 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
884 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm10 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
885 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm11 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
886 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm12 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
887 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm13 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
888 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm14 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
889 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
890 ; AVX2-NEXT: vmovdqu %ymm15, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
891 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
892 ; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm8
893 ; AVX2-NEXT: vmovdqu %ymm8, {{[-0-9]+}}(%r{{[sb]}}p) # 32-byte Spill
894 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
895 ; AVX2-NEXT: vpsubd %ymm15, %ymm9, %ymm9
896 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
897 ; AVX2-NEXT: vpsubd %ymm15, %ymm10, %ymm10
898 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
899 ; AVX2-NEXT: vpsubd %ymm15, %ymm11, %ymm11
900 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
901 ; AVX2-NEXT: vpsubd %ymm15, %ymm12, %ymm12
902 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
903 ; AVX2-NEXT: vpsubd %ymm15, %ymm13, %ymm13
904 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
905 ; AVX2-NEXT: vpsubd %ymm15, %ymm14, %ymm14
906 ; AVX2-NEXT: vpmovzxbd {{.*#+}} ymm15 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero
907 ; AVX2-NEXT: vmovdqu {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Reload
908 ; AVX2-NEXT: vpsubd %ymm15, %ymm8, %ymm15
909 ; AVX2-NEXT: vpabsd {{[-0-9]+}}(%r{{[sb]}}p), %ymm8 # 32-byte Folded Reload
910 ; AVX2-NEXT: vpaddd %ymm7, %ymm8, %ymm7
911 ; AVX2-NEXT: vpabsd %ymm9, %ymm8
912 ; AVX2-NEXT: vpaddd %ymm5, %ymm8, %ymm5
913 ; AVX2-NEXT: vpabsd %ymm10, %ymm8
914 ; AVX2-NEXT: vpaddd %ymm6, %ymm8, %ymm6
915 ; AVX2-NEXT: vpabsd %ymm11, %ymm8
916 ; AVX2-NEXT: vpaddd %ymm3, %ymm8, %ymm3
917 ; AVX2-NEXT: vpabsd %ymm12, %ymm8
918 ; AVX2-NEXT: vpaddd %ymm0, %ymm8, %ymm0
919 ; AVX2-NEXT: vpabsd %ymm13, %ymm8
920 ; AVX2-NEXT: vpaddd %ymm2, %ymm8, %ymm2
921 ; AVX2-NEXT: vpabsd %ymm14, %ymm8
922 ; AVX2-NEXT: vpaddd %ymm1, %ymm8, %ymm1
923 ; AVX2-NEXT: vpabsd %ymm15, %ymm8
924 ; AVX2-NEXT: vpaddd %ymm4, %ymm8, %ymm4
925 ; AVX2-NEXT: addq $4, %rax
926 ; AVX2-NEXT: jne .LBB2_1
927 ; AVX2-NEXT: # %bb.2: # %middle.block
928 ; AVX2-NEXT: vpaddd %ymm6, %ymm2, %ymm2
929 ; AVX2-NEXT: vpaddd %ymm7, %ymm4, %ymm4
930 ; AVX2-NEXT: vpaddd %ymm4, %ymm2, %ymm2
931 ; AVX2-NEXT: vpaddd %ymm3, %ymm0, %ymm0
932 ; AVX2-NEXT: vpaddd %ymm5, %ymm1, %ymm1
933 ; AVX2-NEXT: vpaddd %ymm2, %ymm1, %ymm1
934 ; AVX2-NEXT: vpaddd %ymm1, %ymm0, %ymm0
935 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
936 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
937 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
938 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
939 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
940 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
941 ; AVX2-NEXT: vmovd %xmm0, %eax
942 ; AVX2-NEXT: vzeroupper
945 ; AVX512F-LABEL: sad_avx64i8:
946 ; AVX512F: # %bb.0: # %entry
947 ; AVX512F-NEXT: vpxor %xmm0, %xmm0, %xmm0
948 ; AVX512F-NEXT: movq $-1024, %rax # imm = 0xFC00
949 ; AVX512F-NEXT: vpxor %xmm1, %xmm1, %xmm1
950 ; AVX512F-NEXT: vpxor %xmm2, %xmm2, %xmm2
951 ; AVX512F-NEXT: vpxor %xmm3, %xmm3, %xmm3
952 ; AVX512F-NEXT: .p2align 4, 0x90
953 ; AVX512F-NEXT: .LBB2_1: # %vector.body
954 ; AVX512F-NEXT: # =>This Inner Loop Header: Depth=1
955 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm4 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
956 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm5 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
957 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm6 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
958 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm7 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
959 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
960 ; AVX512F-NEXT: vpsubd %zmm8, %zmm4, %zmm4
961 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
962 ; AVX512F-NEXT: vpsubd %zmm8, %zmm5, %zmm5
963 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
964 ; AVX512F-NEXT: vpsubd %zmm8, %zmm6, %zmm6
965 ; AVX512F-NEXT: vpmovzxbd {{.*#+}} zmm8 = mem[0],zero,zero,zero,mem[1],zero,zero,zero,mem[2],zero,zero,zero,mem[3],zero,zero,zero,mem[4],zero,zero,zero,mem[5],zero,zero,zero,mem[6],zero,zero,zero,mem[7],zero,zero,zero,mem[8],zero,zero,zero,mem[9],zero,zero,zero,mem[10],zero,zero,zero,mem[11],zero,zero,zero,mem[12],zero,zero,zero,mem[13],zero,zero,zero,mem[14],zero,zero,zero,mem[15],zero,zero,zero
966 ; AVX512F-NEXT: vpsubd %zmm8, %zmm7, %zmm7
967 ; AVX512F-NEXT: vpabsd %zmm4, %zmm4
968 ; AVX512F-NEXT: vpaddd %zmm0, %zmm4, %zmm0
969 ; AVX512F-NEXT: vpabsd %zmm5, %zmm4
970 ; AVX512F-NEXT: vpaddd %zmm1, %zmm4, %zmm1
971 ; AVX512F-NEXT: vpabsd %zmm6, %zmm4
972 ; AVX512F-NEXT: vpaddd %zmm2, %zmm4, %zmm2
973 ; AVX512F-NEXT: vpabsd %zmm7, %zmm4
974 ; AVX512F-NEXT: vpaddd %zmm3, %zmm4, %zmm3
975 ; AVX512F-NEXT: addq $4, %rax
976 ; AVX512F-NEXT: jne .LBB2_1
977 ; AVX512F-NEXT: # %bb.2: # %middle.block
978 ; AVX512F-NEXT: vpaddd %zmm2, %zmm0, %zmm0
979 ; AVX512F-NEXT: vpaddd %zmm3, %zmm1, %zmm1
980 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
981 ; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm1
982 ; AVX512F-NEXT: vpaddd %zmm1, %zmm0, %zmm0
983 ; AVX512F-NEXT: vextracti128 $1, %ymm0, %xmm1
984 ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
985 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
986 ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
987 ; AVX512F-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
988 ; AVX512F-NEXT: vpaddd %xmm1, %xmm0, %xmm0
989 ; AVX512F-NEXT: vmovd %xmm0, %eax
990 ; AVX512F-NEXT: vzeroupper
993 ; AVX512BW-LABEL: sad_avx64i8:
994 ; AVX512BW: # %bb.0: # %entry
995 ; AVX512BW-NEXT: vpxor %xmm0, %xmm0, %xmm0
996 ; AVX512BW-NEXT: movq $-1024, %rax # imm = 0xFC00
997 ; AVX512BW-NEXT: vpxor %xmm1, %xmm1, %xmm1
998 ; AVX512BW-NEXT: .p2align 4, 0x90
999 ; AVX512BW-NEXT: .LBB2_1: # %vector.body
1000 ; AVX512BW-NEXT: # =>This Inner Loop Header: Depth=1
1001 ; AVX512BW-NEXT: vmovdqa64 a+1024(%rax), %zmm2
1002 ; AVX512BW-NEXT: vpsadbw b+1024(%rax), %zmm2, %zmm2
1003 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm2, %zmm1
1004 ; AVX512BW-NEXT: addq $4, %rax
1005 ; AVX512BW-NEXT: jne .LBB2_1
1006 ; AVX512BW-NEXT: # %bb.2: # %middle.block
1007 ; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm1
1008 ; AVX512BW-NEXT: vpaddd %zmm0, %zmm0, %zmm0
1009 ; AVX512BW-NEXT: vpaddd %zmm0, %zmm1, %zmm0
1010 ; AVX512BW-NEXT: vextracti64x4 $1, %zmm0, %ymm1
1011 ; AVX512BW-NEXT: vpaddd %zmm1, %zmm0, %zmm0
1012 ; AVX512BW-NEXT: vextracti128 $1, %ymm0, %xmm1
1013 ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1014 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1015 ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1016 ; AVX512BW-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1017 ; AVX512BW-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1018 ; AVX512BW-NEXT: vmovd %xmm0, %eax
1019 ; AVX512BW-NEXT: vzeroupper
1020 ; AVX512BW-NEXT: retq
1022 br label %vector.body
1025 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
1026 %vec.phi = phi <64 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
1027 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
1028 %1 = bitcast i8* %0 to <64 x i8>*
1029 %wide.load = load <64 x i8>, <64 x i8>* %1, align 64
1030 %2 = zext <64 x i8> %wide.load to <64 x i32>
1031 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
1032 %4 = bitcast i8* %3 to <64 x i8>*
1033 %wide.load1 = load <64 x i8>, <64 x i8>* %4, align 64
1034 %5 = zext <64 x i8> %wide.load1 to <64 x i32>
1035 %6 = sub nsw <64 x i32> %2, %5
1036 %7 = icmp sgt <64 x i32> %6, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
1037 %8 = sub nsw <64 x i32> zeroinitializer, %6
1038 %9 = select <64 x i1> %7, <64 x i32> %6, <64 x i32> %8
1039 %10 = add nsw <64 x i32> %9, %vec.phi
1040 %index.next = add i64 %index, 4
1041 %11 = icmp eq i64 %index.next, 1024
1042 br i1 %11, label %middle.block, label %vector.body
1045 %.lcssa = phi <64 x i32> [ %10, %vector.body ]
1046 %rdx.shuf = shufflevector <64 x i32> %.lcssa, <64 x i32> undef, <64 x i32> <i32 32, i32 33, i32 34, i32 35, i32 36, i32 37, i32 38, i32 39, i32 40, i32 41, i32 42, i32 43, i32 44, i32 45, i32 46, i32 47, i32 48, i32 49, i32 50, i32 51, i32 52, i32 53, i32 54, i32 55, i32 56, i32 57, i32 58, i32 59, i32 60, i32 61, i32 62, i32 63, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1047 %bin.rdx = add <64 x i32> %.lcssa, %rdx.shuf
1048 %rdx.shuf2 = shufflevector <64 x i32> %bin.rdx, <64 x i32> undef, <64 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1049 %bin.rdx2 = add <64 x i32> %bin.rdx, %rdx.shuf2
1050 %rdx.shuf3 = shufflevector <64 x i32> %bin.rdx2, <64 x i32> undef, <64 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1051 %bin.rdx3 = add <64 x i32> %bin.rdx2, %rdx.shuf3
1052 %rdx.shuf4 = shufflevector <64 x i32> %bin.rdx3, <64 x i32> undef, <64 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1053 %bin.rdx4 = add <64 x i32> %bin.rdx3, %rdx.shuf4
1054 %rdx.shuf5 = shufflevector <64 x i32> %bin.rdx4, <64 x i32> undef, <64 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1055 %bin.rdx5 = add <64 x i32> %bin.rdx4, %rdx.shuf5
1056 %rdx.shuf6 = shufflevector <64 x i32> %bin.rdx5, <64 x i32> undef, <64 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1057 %bin.rdx6 = add <64 x i32> %bin.rdx5, %rdx.shuf6
1058 %12 = extractelement <64 x i32> %bin.rdx6, i32 0
1062 define i32 @sad_2i8() nounwind {
1063 ; SSE2-LABEL: sad_2i8:
1064 ; SSE2: # %bb.0: # %entry
1065 ; SSE2-NEXT: pxor %xmm0, %xmm0
1066 ; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
1067 ; SSE2-NEXT: movl $65535, %ecx # imm = 0xFFFF
1068 ; SSE2-NEXT: movd %ecx, %xmm1
1069 ; SSE2-NEXT: .p2align 4, 0x90
1070 ; SSE2-NEXT: .LBB3_1: # %vector.body
1071 ; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
1072 ; SSE2-NEXT: movd {{.*#+}} xmm2 = mem[0],zero,zero,zero
1073 ; SSE2-NEXT: movd {{.*#+}} xmm3 = mem[0],zero,zero,zero
1074 ; SSE2-NEXT: pand %xmm1, %xmm3
1075 ; SSE2-NEXT: pand %xmm1, %xmm2
1076 ; SSE2-NEXT: psadbw %xmm3, %xmm2
1077 ; SSE2-NEXT: paddd %xmm2, %xmm0
1078 ; SSE2-NEXT: addq $4, %rax
1079 ; SSE2-NEXT: jne .LBB3_1
1080 ; SSE2-NEXT: # %bb.2: # %middle.block
1081 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1082 ; SSE2-NEXT: paddd %xmm0, %xmm1
1083 ; SSE2-NEXT: movd %xmm1, %eax
1086 ; AVX-LABEL: sad_2i8:
1087 ; AVX: # %bb.0: # %entry
1088 ; AVX-NEXT: vpxor %xmm0, %xmm0, %xmm0
1089 ; AVX-NEXT: movq $-1024, %rax # imm = 0xFC00
1090 ; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
1091 ; AVX-NEXT: .p2align 4, 0x90
1092 ; AVX-NEXT: .LBB3_1: # %vector.body
1093 ; AVX-NEXT: # =>This Inner Loop Header: Depth=1
1094 ; AVX-NEXT: vmovd {{.*#+}} xmm2 = mem[0],zero,zero,zero
1095 ; AVX-NEXT: vmovd {{.*#+}} xmm3 = mem[0],zero,zero,zero
1096 ; AVX-NEXT: vpblendw {{.*#+}} xmm3 = xmm3[0],xmm0[1,2,3,4,5,6,7]
1097 ; AVX-NEXT: vpblendw {{.*#+}} xmm2 = xmm2[0],xmm0[1,2,3,4,5,6,7]
1098 ; AVX-NEXT: vpsadbw %xmm3, %xmm2, %xmm2
1099 ; AVX-NEXT: vpaddd %xmm1, %xmm2, %xmm1
1100 ; AVX-NEXT: addq $4, %rax
1101 ; AVX-NEXT: jne .LBB3_1
1102 ; AVX-NEXT: # %bb.2: # %middle.block
1103 ; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
1104 ; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
1105 ; AVX-NEXT: vmovd %xmm0, %eax
1108 br label %vector.body
1111 %index = phi i64 [ 0, %entry ], [ %index.next, %vector.body ]
1112 %vec.phi = phi <2 x i32> [ zeroinitializer, %entry ], [ %10, %vector.body ]
1113 %0 = getelementptr inbounds [1024 x i8], [1024 x i8]* @a, i64 0, i64 %index
1114 %1 = bitcast i8* %0 to <2 x i8>*
1115 %wide.load = load <2 x i8>, <2 x i8>* %1, align 4
1116 %2 = zext <2 x i8> %wide.load to <2 x i32>
1117 %3 = getelementptr inbounds [1024 x i8], [1024 x i8]* @b, i64 0, i64 %index
1118 %4 = bitcast i8* %3 to <2 x i8>*
1119 %wide.load1 = load <2 x i8>, <2 x i8>* %4, align 4
1120 %5 = zext <2 x i8> %wide.load1 to <2 x i32>
1121 %6 = sub nsw <2 x i32> %2, %5
1122 %7 = icmp sgt <2 x i32> %6, <i32 -1, i32 -1>
1123 %8 = sub nsw <2 x i32> zeroinitializer, %6
1124 %9 = select <2 x i1> %7, <2 x i32> %6, <2 x i32> %8
1125 %10 = add nsw <2 x i32> %9, %vec.phi
1126 %index.next = add i64 %index, 4
1127 %11 = icmp eq i64 %index.next, 1024
1128 br i1 %11, label %middle.block, label %vector.body
1131 %.lcssa = phi <2 x i32> [ %10, %vector.body ]
1132 %rdx.shuf = shufflevector <2 x i32> %.lcssa, <2 x i32> undef, <2 x i32> <i32 1, i32 undef>
1133 %bin.rdx = add <2 x i32> %.lcssa, %rdx.shuf
1134 %12 = extractelement <2 x i32> %bin.rdx, i32 0
1138 define i32 @sad_nonloop_4i8(<4 x i8>* nocapture readonly %p, i64, <4 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
1139 ; SSE2-LABEL: sad_nonloop_4i8:
1141 ; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1142 ; SSE2-NEXT: movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1143 ; SSE2-NEXT: psadbw %xmm0, %xmm1
1144 ; SSE2-NEXT: movd %xmm1, %eax
1147 ; AVX-LABEL: sad_nonloop_4i8:
1149 ; AVX-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero
1150 ; AVX-NEXT: vmovd {{.*#+}} xmm1 = mem[0],zero,zero,zero
1151 ; AVX-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
1152 ; AVX-NEXT: vmovd %xmm0, %eax
1154 %v1 = load <4 x i8>, <4 x i8>* %p, align 1
1155 %z1 = zext <4 x i8> %v1 to <4 x i32>
1156 %v2 = load <4 x i8>, <4 x i8>* %q, align 1
1157 %z2 = zext <4 x i8> %v2 to <4 x i32>
1158 %sub = sub nsw <4 x i32> %z1, %z2
1159 %isneg = icmp sgt <4 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1>
1160 %neg = sub nsw <4 x i32> zeroinitializer, %sub
1161 %abs = select <4 x i1> %isneg, <4 x i32> %sub, <4 x i32> %neg
1162 %h2 = shufflevector <4 x i32> %abs, <4 x i32> undef, <4 x i32> <i32 2, i32 3, i32 undef, i32 undef>
1163 %sum2 = add <4 x i32> %abs, %h2
1164 %h3 = shufflevector <4 x i32> %sum2, <4 x i32> undef, <4 x i32> <i32 1, i32 undef, i32 undef, i32 undef>
1165 %sum3 = add <4 x i32> %sum2, %h3
1166 %sum = extractelement <4 x i32> %sum3, i32 0
1170 define i32 @sad_nonloop_8i8(<8 x i8>* nocapture readonly %p, i64, <8 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
1171 ; SSE2-LABEL: sad_nonloop_8i8:
1173 ; SSE2-NEXT: movq {{.*#+}} xmm0 = mem[0],zero
1174 ; SSE2-NEXT: movq {{.*#+}} xmm1 = mem[0],zero
1175 ; SSE2-NEXT: psadbw %xmm0, %xmm1
1176 ; SSE2-NEXT: movd %xmm1, %eax
1179 ; AVX-LABEL: sad_nonloop_8i8:
1181 ; AVX-NEXT: vmovq {{.*#+}} xmm0 = mem[0],zero
1182 ; AVX-NEXT: vmovq {{.*#+}} xmm1 = mem[0],zero
1183 ; AVX-NEXT: vpsadbw %xmm0, %xmm1, %xmm0
1184 ; AVX-NEXT: vmovd %xmm0, %eax
1186 %v1 = load <8 x i8>, <8 x i8>* %p, align 1
1187 %z1 = zext <8 x i8> %v1 to <8 x i32>
1188 %v2 = load <8 x i8>, <8 x i8>* %q, align 1
1189 %z2 = zext <8 x i8> %v2 to <8 x i32>
1190 %sub = sub nsw <8 x i32> %z1, %z2
1191 %isneg = icmp sgt <8 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
1192 %neg = sub nsw <8 x i32> zeroinitializer, %sub
1193 %abs = select <8 x i1> %isneg, <8 x i32> %sub, <8 x i32> %neg
1194 %h1 = shufflevector <8 x i32> %abs, <8 x i32> undef, <8 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef>
1195 %sum1 = add <8 x i32> %abs, %h1
1196 %h2 = shufflevector <8 x i32> %sum1, <8 x i32> undef, <8 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1197 %sum2 = add <8 x i32> %sum1, %h2
1198 %h3 = shufflevector <8 x i32> %sum2, <8 x i32> undef, <8 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1199 %sum3 = add <8 x i32> %sum2, %h3
1200 %sum = extractelement <8 x i32> %sum3, i32 0
1204 define i32 @sad_nonloop_16i8(<16 x i8>* nocapture readonly %p, i64, <16 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
1205 ; SSE2-LABEL: sad_nonloop_16i8:
1207 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1208 ; SSE2-NEXT: movdqu (%rdx), %xmm1
1209 ; SSE2-NEXT: psadbw %xmm0, %xmm1
1210 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
1211 ; SSE2-NEXT: paddq %xmm1, %xmm0
1212 ; SSE2-NEXT: movd %xmm0, %eax
1215 ; AVX-LABEL: sad_nonloop_16i8:
1217 ; AVX-NEXT: vmovdqu (%rdi), %xmm0
1218 ; AVX-NEXT: vpsadbw (%rdx), %xmm0, %xmm0
1219 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1220 ; AVX-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1221 ; AVX-NEXT: vmovd %xmm0, %eax
1223 %v1 = load <16 x i8>, <16 x i8>* %p, align 1
1224 %z1 = zext <16 x i8> %v1 to <16 x i32>
1225 %v2 = load <16 x i8>, <16 x i8>* %q, align 1
1226 %z2 = zext <16 x i8> %v2 to <16 x i32>
1227 %sub = sub nsw <16 x i32> %z1, %z2
1228 %isneg = icmp sgt <16 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
1229 %neg = sub nsw <16 x i32> zeroinitializer, %sub
1230 %abs = select <16 x i1> %isneg, <16 x i32> %sub, <16 x i32> %neg
1231 %h0 = shufflevector <16 x i32> %abs, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1232 %sum0 = add <16 x i32> %abs, %h0
1233 %h1 = shufflevector <16 x i32> %sum0, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1234 %sum1 = add <16 x i32> %sum0, %h1
1235 %h2 = shufflevector <16 x i32> %sum1, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1236 %sum2 = add <16 x i32> %sum1, %h2
1237 %h3 = shufflevector <16 x i32> %sum2, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1238 %sum3 = add <16 x i32> %sum2, %h3
1239 %sum = extractelement <16 x i32> %sum3, i32 0
1243 define i32 @sad_nonloop_32i8(<32 x i8>* nocapture readonly %p, i64, <32 x i8>* nocapture readonly %q) local_unnamed_addr #0 {
1244 ; SSE2-LABEL: sad_nonloop_32i8:
1246 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1247 ; SSE2-NEXT: movdqu 16(%rdi), %xmm12
1248 ; SSE2-NEXT: pxor %xmm1, %xmm1
1249 ; SSE2-NEXT: movdqa %xmm12, %xmm8
1250 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3],xmm8[4],xmm1[4],xmm8[5],xmm1[5],xmm8[6],xmm1[6],xmm8[7],xmm1[7]
1251 ; SSE2-NEXT: movdqa %xmm8, %xmm10
1252 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm10 = xmm10[4],xmm1[4],xmm10[5],xmm1[5],xmm10[6],xmm1[6],xmm10[7],xmm1[7]
1253 ; SSE2-NEXT: movdqa %xmm0, %xmm9
1254 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3],xmm9[4],xmm1[4],xmm9[5],xmm1[5],xmm9[6],xmm1[6],xmm9[7],xmm1[7]
1255 ; SSE2-NEXT: movdqa %xmm9, %xmm11
1256 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm11 = xmm11[4],xmm1[4],xmm11[5],xmm1[5],xmm11[6],xmm1[6],xmm11[7],xmm1[7]
1257 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm12 = xmm12[8],xmm1[8],xmm12[9],xmm1[9],xmm12[10],xmm1[10],xmm12[11],xmm1[11],xmm12[12],xmm1[12],xmm12[13],xmm1[13],xmm12[14],xmm1[14],xmm12[15],xmm1[15]
1258 ; SSE2-NEXT: movdqa %xmm12, %xmm13
1259 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm13 = xmm13[4],xmm1[4],xmm13[5],xmm1[5],xmm13[6],xmm1[6],xmm13[7],xmm1[7]
1260 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm0 = xmm0[8],xmm1[8],xmm0[9],xmm1[9],xmm0[10],xmm1[10],xmm0[11],xmm1[11],xmm0[12],xmm1[12],xmm0[13],xmm1[13],xmm0[14],xmm1[14],xmm0[15],xmm1[15]
1261 ; SSE2-NEXT: movdqa %xmm0, %xmm4
1262 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm4 = xmm4[4],xmm1[4],xmm4[5],xmm1[5],xmm4[6],xmm1[6],xmm4[7],xmm1[7]
1263 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1],xmm8[2],xmm1[2],xmm8[3],xmm1[3]
1264 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm1[0],xmm9[1],xmm1[1],xmm9[2],xmm1[2],xmm9[3],xmm1[3]
1265 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm12 = xmm12[0],xmm1[0],xmm12[1],xmm1[1],xmm12[2],xmm1[2],xmm12[3],xmm1[3]
1266 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
1267 ; SSE2-NEXT: movdqu (%rdx), %xmm7
1268 ; SSE2-NEXT: movdqu 16(%rdx), %xmm3
1269 ; SSE2-NEXT: movdqa %xmm3, %xmm6
1270 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
1271 ; SSE2-NEXT: movdqa %xmm6, %xmm5
1272 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
1273 ; SSE2-NEXT: psubd %xmm5, %xmm10
1274 ; SSE2-NEXT: movdqa %xmm7, %xmm2
1275 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3],xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
1276 ; SSE2-NEXT: movdqa %xmm2, %xmm5
1277 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
1278 ; SSE2-NEXT: psubd %xmm5, %xmm11
1279 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm1[8],xmm3[9],xmm1[9],xmm3[10],xmm1[10],xmm3[11],xmm1[11],xmm3[12],xmm1[12],xmm3[13],xmm1[13],xmm3[14],xmm1[14],xmm3[15],xmm1[15]
1280 ; SSE2-NEXT: movdqa %xmm3, %xmm5
1281 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
1282 ; SSE2-NEXT: psubd %xmm5, %xmm13
1283 ; SSE2-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm1[8],xmm7[9],xmm1[9],xmm7[10],xmm1[10],xmm7[11],xmm1[11],xmm7[12],xmm1[12],xmm7[13],xmm1[13],xmm7[14],xmm1[14],xmm7[15],xmm1[15]
1284 ; SSE2-NEXT: movdqa %xmm7, %xmm5
1285 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm5 = xmm5[4],xmm1[4],xmm5[5],xmm1[5],xmm5[6],xmm1[6],xmm5[7],xmm1[7]
1286 ; SSE2-NEXT: psubd %xmm5, %xmm4
1287 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
1288 ; SSE2-NEXT: psubd %xmm6, %xmm8
1289 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
1290 ; SSE2-NEXT: psubd %xmm2, %xmm9
1291 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
1292 ; SSE2-NEXT: psubd %xmm3, %xmm12
1293 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm1[0],xmm7[1],xmm1[1],xmm7[2],xmm1[2],xmm7[3],xmm1[3]
1294 ; SSE2-NEXT: psubd %xmm7, %xmm0
1295 ; SSE2-NEXT: movdqa %xmm10, %xmm1
1296 ; SSE2-NEXT: psrad $31, %xmm1
1297 ; SSE2-NEXT: paddd %xmm1, %xmm10
1298 ; SSE2-NEXT: pxor %xmm1, %xmm10
1299 ; SSE2-NEXT: movdqa %xmm11, %xmm1
1300 ; SSE2-NEXT: psrad $31, %xmm1
1301 ; SSE2-NEXT: paddd %xmm1, %xmm11
1302 ; SSE2-NEXT: pxor %xmm1, %xmm11
1303 ; SSE2-NEXT: movdqa %xmm13, %xmm1
1304 ; SSE2-NEXT: psrad $31, %xmm1
1305 ; SSE2-NEXT: paddd %xmm1, %xmm13
1306 ; SSE2-NEXT: pxor %xmm1, %xmm13
1307 ; SSE2-NEXT: movdqa %xmm4, %xmm1
1308 ; SSE2-NEXT: psrad $31, %xmm1
1309 ; SSE2-NEXT: paddd %xmm1, %xmm4
1310 ; SSE2-NEXT: pxor %xmm1, %xmm4
1311 ; SSE2-NEXT: paddd %xmm13, %xmm4
1312 ; SSE2-NEXT: paddd %xmm10, %xmm4
1313 ; SSE2-NEXT: paddd %xmm11, %xmm4
1314 ; SSE2-NEXT: movdqa %xmm8, %xmm1
1315 ; SSE2-NEXT: psrad $31, %xmm1
1316 ; SSE2-NEXT: paddd %xmm1, %xmm8
1317 ; SSE2-NEXT: pxor %xmm1, %xmm8
1318 ; SSE2-NEXT: movdqa %xmm9, %xmm1
1319 ; SSE2-NEXT: psrad $31, %xmm1
1320 ; SSE2-NEXT: paddd %xmm1, %xmm9
1321 ; SSE2-NEXT: pxor %xmm1, %xmm9
1322 ; SSE2-NEXT: movdqa %xmm12, %xmm1
1323 ; SSE2-NEXT: psrad $31, %xmm1
1324 ; SSE2-NEXT: paddd %xmm1, %xmm12
1325 ; SSE2-NEXT: pxor %xmm1, %xmm12
1326 ; SSE2-NEXT: movdqa %xmm0, %xmm1
1327 ; SSE2-NEXT: psrad $31, %xmm1
1328 ; SSE2-NEXT: paddd %xmm1, %xmm0
1329 ; SSE2-NEXT: pxor %xmm1, %xmm0
1330 ; SSE2-NEXT: paddd %xmm12, %xmm0
1331 ; SSE2-NEXT: paddd %xmm8, %xmm0
1332 ; SSE2-NEXT: paddd %xmm4, %xmm0
1333 ; SSE2-NEXT: paddd %xmm9, %xmm0
1334 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1335 ; SSE2-NEXT: paddd %xmm0, %xmm1
1336 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,2,3]
1337 ; SSE2-NEXT: paddd %xmm1, %xmm0
1338 ; SSE2-NEXT: movd %xmm0, %eax
1341 ; AVX1-LABEL: sad_nonloop_32i8:
1343 ; AVX1-NEXT: vmovdqu (%rdi), %xmm0
1344 ; AVX1-NEXT: vmovdqu 16(%rdi), %xmm1
1345 ; AVX1-NEXT: vpsadbw 16(%rdx), %xmm1, %xmm1
1346 ; AVX1-NEXT: vpsadbw (%rdx), %xmm0, %xmm0
1347 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1348 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1349 ; AVX1-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1350 ; AVX1-NEXT: vmovd %xmm0, %eax
1353 ; AVX2-LABEL: sad_nonloop_32i8:
1355 ; AVX2-NEXT: vmovdqu (%rdi), %ymm0
1356 ; AVX2-NEXT: vpsadbw (%rdx), %ymm0, %ymm0
1357 ; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
1358 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1359 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1360 ; AVX2-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1361 ; AVX2-NEXT: vmovd %xmm0, %eax
1362 ; AVX2-NEXT: vzeroupper
1365 ; AVX512-LABEL: sad_nonloop_32i8:
1367 ; AVX512-NEXT: vmovdqu (%rdi), %ymm0
1368 ; AVX512-NEXT: vpsadbw (%rdx), %ymm0, %ymm0
1369 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
1370 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1371 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1372 ; AVX512-NEXT: vpaddq %xmm1, %xmm0, %xmm0
1373 ; AVX512-NEXT: vmovd %xmm0, %eax
1374 ; AVX512-NEXT: vzeroupper
1376 %v1 = load <32 x i8>, <32 x i8>* %p, align 1
1377 %z1 = zext <32 x i8> %v1 to <32 x i32>
1378 %v2 = load <32 x i8>, <32 x i8>* %q, align 1
1379 %z2 = zext <32 x i8> %v2 to <32 x i32>
1380 %sub = sub nsw <32 x i32> %z1, %z2
1381 %isneg = icmp sgt <32 x i32> %sub, <i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1, i32 -1>
1382 %neg = sub nsw <32 x i32> zeroinitializer, %sub
1383 %abs = select <32 x i1> %isneg, <32 x i32> %sub, <32 x i32> %neg
1384 %h32 = shufflevector <32 x i32> %abs, <32 x i32> undef, <32 x i32> <i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27, i32 28, i32 29, i32 30, i32 31, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1385 %sum32 = add <32 x i32> %abs, %h32
1386 %h0 = shufflevector <32 x i32> %sum32, <32 x i32> undef, <32 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1387 %sum0 = add <32 x i32> %sum32, %h0
1388 %h1 = shufflevector <32 x i32> %sum0, <32 x i32> undef, <32 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1389 %sum1 = add <32 x i32> %sum0, %h1
1390 %h2 = shufflevector <32 x i32> %sum1, <32 x i32> undef, <32 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1391 %sum2 = add <32 x i32> %sum1, %h2
1392 %h3 = shufflevector <32 x i32> %sum2, <32 x i32> undef, <32 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1393 %sum3 = add <32 x i32> %sum2, %h3
1394 %sum = extractelement <32 x i32> %sum3, i32 0
1398 ; This contains an unrolled sad loop with a non-zero initial value.
1399 ; DAGCombiner reassociation previously rewrote the adds to move the constant vector further down the tree. This resulted in the vector-reduction flag being lost.
1400 define i32 @sad_unroll_nonzero_initial(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) {
1401 ; SSE2-LABEL: sad_unroll_nonzero_initial:
1402 ; SSE2: # %bb.0: # %bb
1403 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1404 ; SSE2-NEXT: movdqu (%rsi), %xmm1
1405 ; SSE2-NEXT: psadbw %xmm0, %xmm1
1406 ; SSE2-NEXT: movl $1, %eax
1407 ; SSE2-NEXT: movd %eax, %xmm0
1408 ; SSE2-NEXT: movdqu (%rdx), %xmm2
1409 ; SSE2-NEXT: movdqu (%rcx), %xmm3
1410 ; SSE2-NEXT: psadbw %xmm2, %xmm3
1411 ; SSE2-NEXT: paddd %xmm0, %xmm3
1412 ; SSE2-NEXT: paddd %xmm1, %xmm3
1413 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm3[2,3,0,1]
1414 ; SSE2-NEXT: paddd %xmm3, %xmm0
1415 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1416 ; SSE2-NEXT: paddd %xmm0, %xmm1
1417 ; SSE2-NEXT: movd %xmm1, %eax
1420 ; AVX1-LABEL: sad_unroll_nonzero_initial:
1421 ; AVX1: # %bb.0: # %bb
1422 ; AVX1-NEXT: vmovdqu (%rdi), %xmm0
1423 ; AVX1-NEXT: vpsadbw (%rsi), %xmm0, %xmm0
1424 ; AVX1-NEXT: vmovdqu (%rdx), %xmm1
1425 ; AVX1-NEXT: vpsadbw (%rcx), %xmm1, %xmm1
1426 ; AVX1-NEXT: movl $1, %eax
1427 ; AVX1-NEXT: vmovd %eax, %xmm2
1428 ; AVX1-NEXT: vpaddd %xmm2, %xmm1, %xmm1
1429 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1430 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1431 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1432 ; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1433 ; AVX1-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1434 ; AVX1-NEXT: vmovd %xmm0, %eax
1437 ; AVX2-LABEL: sad_unroll_nonzero_initial:
1438 ; AVX2: # %bb.0: # %bb
1439 ; AVX2-NEXT: vmovdqu (%rdi), %xmm0
1440 ; AVX2-NEXT: vpsadbw (%rsi), %xmm0, %xmm0
1441 ; AVX2-NEXT: movl $1, %eax
1442 ; AVX2-NEXT: vmovd %eax, %xmm1
1443 ; AVX2-NEXT: vmovdqu (%rdx), %xmm2
1444 ; AVX2-NEXT: vpsadbw (%rcx), %xmm2, %xmm2
1445 ; AVX2-NEXT: vpaddd %xmm2, %xmm1, %xmm1
1446 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1447 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1448 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1449 ; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1450 ; AVX2-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1451 ; AVX2-NEXT: vmovd %xmm0, %eax
1454 ; AVX512-LABEL: sad_unroll_nonzero_initial:
1455 ; AVX512: # %bb.0: # %bb
1456 ; AVX512-NEXT: vmovdqu (%rdi), %xmm0
1457 ; AVX512-NEXT: vpsadbw (%rsi), %xmm0, %xmm0
1458 ; AVX512-NEXT: movl $1, %eax
1459 ; AVX512-NEXT: vmovd %eax, %xmm1
1460 ; AVX512-NEXT: vmovdqu (%rdx), %xmm2
1461 ; AVX512-NEXT: vpsadbw (%rcx), %xmm2, %xmm2
1462 ; AVX512-NEXT: vpaddd %zmm2, %zmm1, %zmm1
1463 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
1464 ; AVX512-NEXT: vextracti64x4 $1, %zmm0, %ymm1
1465 ; AVX512-NEXT: vpaddd %zmm1, %zmm0, %zmm0
1466 ; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1
1467 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1468 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1469 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1470 ; AVX512-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1471 ; AVX512-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1472 ; AVX512-NEXT: vmovd %xmm0, %eax
1473 ; AVX512-NEXT: vzeroupper
1476 %tmp = load <16 x i8>, <16 x i8>* %arg, align 1
1477 %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1
1478 %tmp5 = zext <16 x i8> %tmp to <16 x i32>
1479 %tmp6 = zext <16 x i8> %tmp4 to <16 x i32>
1480 %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6
1481 %tmp8 = icmp slt <16 x i32> %tmp7, zeroinitializer
1482 %tmp9 = sub nsw <16 x i32> zeroinitializer, %tmp7
1483 %tmp10 = select <16 x i1> %tmp8, <16 x i32> %tmp9, <16 x i32> %tmp7
1484 %tmp11 = add nuw nsw <16 x i32> %tmp10, <i32 1, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>
1485 %tmp12 = load <16 x i8>, <16 x i8>* %arg2, align 1
1486 %tmp13 = load <16 x i8>, <16 x i8>* %arg3, align 1
1487 %tmp14 = zext <16 x i8> %tmp12 to <16 x i32>
1488 %tmp15 = zext <16 x i8> %tmp13 to <16 x i32>
1489 %tmp16 = sub nsw <16 x i32> %tmp14, %tmp15
1490 %tmp17 = icmp slt <16 x i32> %tmp16, zeroinitializer
1491 %tmp18 = sub nsw <16 x i32> zeroinitializer, %tmp16
1492 %tmp19 = select <16 x i1> %tmp17, <16 x i32> %tmp18, <16 x i32> %tmp16
1493 %tmp20 = add nuw nsw <16 x i32> %tmp19, %tmp11
1494 %tmp21 = shufflevector <16 x i32> %tmp20, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1495 %tmp22 = add <16 x i32> %tmp20, %tmp21
1496 %tmp23 = shufflevector <16 x i32> %tmp22, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1497 %tmp24 = add <16 x i32> %tmp22, %tmp23
1498 %tmp25 = shufflevector <16 x i32> %tmp24, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1499 %tmp26 = add <16 x i32> %tmp24, %tmp25
1500 %tmp27 = shufflevector <16 x i32> %tmp26, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1501 %tmp28 = add <16 x i32> %tmp26, %tmp27
1502 %tmp29 = extractelement <16 x i32> %tmp28, i64 0
1506 ; This test contains two absolute difference patterns joined by an add. The result of that add is then reduced to a single element.
1507 ; SelectionDAGBuilder should tag the joining add as a vector reduction. We neeed to recognize that both sides can use psadbw.
1508 define i32 @sad_double_reduction(<16 x i8>* %arg, <16 x i8>* %arg1, <16 x i8>* %arg2, <16 x i8>* %arg3) {
1509 ; SSE2-LABEL: sad_double_reduction:
1510 ; SSE2: # %bb.0: # %bb
1511 ; SSE2-NEXT: movdqu (%rdi), %xmm0
1512 ; SSE2-NEXT: movdqu (%rsi), %xmm1
1513 ; SSE2-NEXT: psadbw %xmm0, %xmm1
1514 ; SSE2-NEXT: movdqu (%rdx), %xmm0
1515 ; SSE2-NEXT: movdqu (%rcx), %xmm2
1516 ; SSE2-NEXT: psadbw %xmm0, %xmm2
1517 ; SSE2-NEXT: paddd %xmm1, %xmm2
1518 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[2,3,0,1]
1519 ; SSE2-NEXT: paddd %xmm2, %xmm0
1520 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1521 ; SSE2-NEXT: paddd %xmm0, %xmm1
1522 ; SSE2-NEXT: movd %xmm1, %eax
1525 ; AVX-LABEL: sad_double_reduction:
1526 ; AVX: # %bb.0: # %bb
1527 ; AVX-NEXT: vmovdqu (%rdi), %xmm0
1528 ; AVX-NEXT: vmovdqu (%rdx), %xmm1
1529 ; AVX-NEXT: vpsadbw (%rcx), %xmm1, %xmm1
1530 ; AVX-NEXT: vpsadbw (%rsi), %xmm0, %xmm0
1531 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1532 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
1533 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1534 ; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm0[1,1,2,3]
1535 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
1536 ; AVX-NEXT: vmovd %xmm0, %eax
1539 %tmp = load <16 x i8>, <16 x i8>* %arg, align 1
1540 %tmp4 = load <16 x i8>, <16 x i8>* %arg1, align 1
1541 %tmp5 = zext <16 x i8> %tmp to <16 x i32>
1542 %tmp6 = zext <16 x i8> %tmp4 to <16 x i32>
1543 %tmp7 = sub nsw <16 x i32> %tmp5, %tmp6
1544 %tmp8 = icmp slt <16 x i32> %tmp7, zeroinitializer
1545 %tmp9 = sub nsw <16 x i32> zeroinitializer, %tmp7
1546 %tmp10 = select <16 x i1> %tmp8, <16 x i32> %tmp9, <16 x i32> %tmp7
1547 %tmp11 = load <16 x i8>, <16 x i8>* %arg2, align 1
1548 %tmp12 = load <16 x i8>, <16 x i8>* %arg3, align 1
1549 %tmp13 = zext <16 x i8> %tmp11 to <16 x i32>
1550 %tmp14 = zext <16 x i8> %tmp12 to <16 x i32>
1551 %tmp15 = sub nsw <16 x i32> %tmp13, %tmp14
1552 %tmp16 = icmp slt <16 x i32> %tmp15, zeroinitializer
1553 %tmp17 = sub nsw <16 x i32> zeroinitializer, %tmp15
1554 %tmp18 = select <16 x i1> %tmp16, <16 x i32> %tmp17, <16 x i32> %tmp15
1555 %tmp19 = add nuw nsw <16 x i32> %tmp18, %tmp10
1556 %tmp20 = shufflevector <16 x i32> %tmp19, <16 x i32> undef, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1557 %tmp21 = add <16 x i32> %tmp19, %tmp20
1558 %tmp22 = shufflevector <16 x i32> %tmp21, <16 x i32> undef, <16 x i32> <i32 4, i32 5, i32 6, i32 7, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1559 %tmp23 = add <16 x i32> %tmp21, %tmp22
1560 %tmp24 = shufflevector <16 x i32> %tmp23, <16 x i32> undef, <16 x i32> <i32 2, i32 3, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1561 %tmp25 = add <16 x i32> %tmp23, %tmp24
1562 %tmp26 = shufflevector <16 x i32> %tmp25, <16 x i32> undef, <16 x i32> <i32 1, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef>
1563 %tmp27 = add <16 x i32> %tmp25, %tmp26
1564 %tmp28 = extractelement <16 x i32> %tmp27, i64 0