Merge "Skip computation of distortion in vp8_pick_inter_mode if active_map is used"
[libvpx.git] / vp8 / encoder / x86 / variance_mmx.c
blob92b695f1750c56ef9839226afe6eb2d8d1dcc6d5
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
11 #include "vpx_config.h"
12 #include "vp8/encoder/variance.h"
13 #include "vp8/common/pragmas.h"
14 #include "vpx_ports/mem.h"
16 extern void filter_block1d_h6_mmx
18 const unsigned char *src_ptr,
19 unsigned short *output_ptr,
20 unsigned int src_pixels_per_line,
21 unsigned int pixel_step,
22 unsigned int output_height,
23 unsigned int output_width,
24 short *vp7_filter
26 extern void filter_block1d_v6_mmx
28 const short *src_ptr,
29 unsigned char *output_ptr,
30 unsigned int pixels_per_line,
31 unsigned int pixel_step,
32 unsigned int output_height,
33 unsigned int output_width,
34 short *vp7_filter
37 extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr);
38 extern unsigned int vp8_get8x8var_mmx
40 const unsigned char *src_ptr,
41 int source_stride,
42 const unsigned char *ref_ptr,
43 int recon_stride,
44 unsigned int *SSE,
45 int *Sum
47 extern unsigned int vp8_get4x4var_mmx
49 const unsigned char *src_ptr,
50 int source_stride,
51 const unsigned char *ref_ptr,
52 int recon_stride,
53 unsigned int *SSE,
54 int *Sum
56 extern void vp8_filter_block2d_bil4x4_var_mmx
58 const unsigned char *ref_ptr,
59 int ref_pixels_per_line,
60 const unsigned char *src_ptr,
61 int src_pixels_per_line,
62 const short *HFilter,
63 const short *VFilter,
64 int *sum,
65 unsigned int *sumsquared
67 extern void vp8_filter_block2d_bil_var_mmx
69 const unsigned char *ref_ptr,
70 int ref_pixels_per_line,
71 const unsigned char *src_ptr,
72 int src_pixels_per_line,
73 unsigned int Height,
74 const short *HFilter,
75 const short *VFilter,
76 int *sum,
77 unsigned int *sumsquared
81 unsigned int vp8_variance4x4_mmx(
82 const unsigned char *src_ptr,
83 int source_stride,
84 const unsigned char *ref_ptr,
85 int recon_stride,
86 unsigned int *sse)
88 unsigned int var;
89 int avg;
91 vp8_get4x4var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
92 *sse = var;
93 return (var - ((avg * avg) >> 4));
97 unsigned int vp8_variance8x8_mmx(
98 const unsigned char *src_ptr,
99 int source_stride,
100 const unsigned char *ref_ptr,
101 int recon_stride,
102 unsigned int *sse)
104 unsigned int var;
105 int avg;
107 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &var, &avg) ;
108 *sse = var;
110 return (var - ((avg * avg) >> 6));
114 unsigned int vp8_mse16x16_mmx(
115 const unsigned char *src_ptr,
116 int source_stride,
117 const unsigned char *ref_ptr,
118 int recon_stride,
119 unsigned int *sse)
121 unsigned int sse0, sse1, sse2, sse3, var;
122 int sum0, sum1, sum2, sum3;
125 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
126 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
127 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
128 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
130 var = sse0 + sse1 + sse2 + sse3;
131 *sse = var;
132 return var;
136 unsigned int vp8_variance16x16_mmx(
137 const unsigned char *src_ptr,
138 int source_stride,
139 const unsigned char *ref_ptr,
140 int recon_stride,
141 unsigned int *sse)
143 unsigned int sse0, sse1, sse2, sse3, var;
144 int sum0, sum1, sum2, sum3, avg;
147 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
148 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
149 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse2, &sum2) ;
150 vp8_get8x8var_mmx(src_ptr + 8 * source_stride + 8, source_stride, ref_ptr + 8 * recon_stride + 8, recon_stride, &sse3, &sum3);
152 var = sse0 + sse1 + sse2 + sse3;
153 avg = sum0 + sum1 + sum2 + sum3;
154 *sse = var;
155 return (var - ((avg * avg) >> 8));
158 unsigned int vp8_variance16x8_mmx(
159 const unsigned char *src_ptr,
160 int source_stride,
161 const unsigned char *ref_ptr,
162 int recon_stride,
163 unsigned int *sse)
165 unsigned int sse0, sse1, var;
166 int sum0, sum1, avg;
168 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
169 vp8_get8x8var_mmx(src_ptr + 8, source_stride, ref_ptr + 8, recon_stride, &sse1, &sum1);
171 var = sse0 + sse1;
172 avg = sum0 + sum1;
173 *sse = var;
174 return (var - ((avg * avg) >> 7));
179 unsigned int vp8_variance8x16_mmx(
180 const unsigned char *src_ptr,
181 int source_stride,
182 const unsigned char *ref_ptr,
183 int recon_stride,
184 unsigned int *sse)
186 unsigned int sse0, sse1, var;
187 int sum0, sum1, avg;
189 vp8_get8x8var_mmx(src_ptr, source_stride, ref_ptr, recon_stride, &sse0, &sum0) ;
190 vp8_get8x8var_mmx(src_ptr + 8 * source_stride, source_stride, ref_ptr + 8 * recon_stride, recon_stride, &sse1, &sum1) ;
192 var = sse0 + sse1;
193 avg = sum0 + sum1;
194 *sse = var;
196 return (var - ((avg * avg) >> 7));
203 ///////////////////////////////////////////////////////////////////////////
204 // the mmx function that does the bilinear filtering and var calculation //
205 // int one pass //
206 ///////////////////////////////////////////////////////////////////////////
207 DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx[8][8]) =
209 { 128, 128, 128, 128, 0, 0, 0, 0 },
210 { 112, 112, 112, 112, 16, 16, 16, 16 },
211 { 96, 96, 96, 96, 32, 32, 32, 32 },
212 { 80, 80, 80, 80, 48, 48, 48, 48 },
213 { 64, 64, 64, 64, 64, 64, 64, 64 },
214 { 48, 48, 48, 48, 80, 80, 80, 80 },
215 { 32, 32, 32, 32, 96, 96, 96, 96 },
216 { 16, 16, 16, 16, 112, 112, 112, 112 }
219 unsigned int vp8_sub_pixel_variance4x4_mmx
221 const unsigned char *src_ptr,
222 int src_pixels_per_line,
223 int xoffset,
224 int yoffset,
225 const unsigned char *dst_ptr,
226 int dst_pixels_per_line,
227 unsigned int *sse)
230 int xsum;
231 unsigned int xxsum;
232 vp8_filter_block2d_bil4x4_var_mmx(
233 src_ptr, src_pixels_per_line,
234 dst_ptr, dst_pixels_per_line,
235 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
236 &xsum, &xxsum
238 *sse = xxsum;
239 return (xxsum - ((xsum * xsum) >> 4));
243 unsigned int vp8_sub_pixel_variance8x8_mmx
245 const unsigned char *src_ptr,
246 int src_pixels_per_line,
247 int xoffset,
248 int yoffset,
249 const unsigned char *dst_ptr,
250 int dst_pixels_per_line,
251 unsigned int *sse
255 int xsum;
256 unsigned int xxsum;
257 vp8_filter_block2d_bil_var_mmx(
258 src_ptr, src_pixels_per_line,
259 dst_ptr, dst_pixels_per_line, 8,
260 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
261 &xsum, &xxsum
263 *sse = xxsum;
264 return (xxsum - ((xsum * xsum) >> 6));
267 unsigned int vp8_sub_pixel_variance16x16_mmx
269 const unsigned char *src_ptr,
270 int src_pixels_per_line,
271 int xoffset,
272 int yoffset,
273 const unsigned char *dst_ptr,
274 int dst_pixels_per_line,
275 unsigned int *sse
279 int xsum0, xsum1;
280 unsigned int xxsum0, xxsum1;
283 vp8_filter_block2d_bil_var_mmx(
284 src_ptr, src_pixels_per_line,
285 dst_ptr, dst_pixels_per_line, 16,
286 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
287 &xsum0, &xxsum0
291 vp8_filter_block2d_bil_var_mmx(
292 src_ptr + 8, src_pixels_per_line,
293 dst_ptr + 8, dst_pixels_per_line, 16,
294 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
295 &xsum1, &xxsum1
298 xsum0 += xsum1;
299 xxsum0 += xxsum1;
301 *sse = xxsum0;
302 return (xxsum0 - ((xsum0 * xsum0) >> 8));
307 unsigned int vp8_sub_pixel_mse16x16_mmx(
308 const unsigned char *src_ptr,
309 int src_pixels_per_line,
310 int xoffset,
311 int yoffset,
312 const unsigned char *dst_ptr,
313 int dst_pixels_per_line,
314 unsigned int *sse
317 vp8_sub_pixel_variance16x16_mmx(src_ptr, src_pixels_per_line, xoffset, yoffset, dst_ptr, dst_pixels_per_line, sse);
318 return *sse;
321 unsigned int vp8_sub_pixel_variance16x8_mmx
323 const unsigned char *src_ptr,
324 int src_pixels_per_line,
325 int xoffset,
326 int yoffset,
327 const unsigned char *dst_ptr,
328 int dst_pixels_per_line,
329 unsigned int *sse
332 int xsum0, xsum1;
333 unsigned int xxsum0, xxsum1;
336 vp8_filter_block2d_bil_var_mmx(
337 src_ptr, src_pixels_per_line,
338 dst_ptr, dst_pixels_per_line, 8,
339 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
340 &xsum0, &xxsum0
344 vp8_filter_block2d_bil_var_mmx(
345 src_ptr + 8, src_pixels_per_line,
346 dst_ptr + 8, dst_pixels_per_line, 8,
347 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
348 &xsum1, &xxsum1
351 xsum0 += xsum1;
352 xxsum0 += xxsum1;
354 *sse = xxsum0;
355 return (xxsum0 - ((xsum0 * xsum0) >> 7));
358 unsigned int vp8_sub_pixel_variance8x16_mmx
360 const unsigned char *src_ptr,
361 int src_pixels_per_line,
362 int xoffset,
363 int yoffset,
364 const unsigned char *dst_ptr,
365 int dst_pixels_per_line,
366 unsigned int *sse
369 int xsum;
370 unsigned int xxsum;
371 vp8_filter_block2d_bil_var_mmx(
372 src_ptr, src_pixels_per_line,
373 dst_ptr, dst_pixels_per_line, 16,
374 vp8_vp7_bilinear_filters_mmx[xoffset], vp8_vp7_bilinear_filters_mmx[yoffset],
375 &xsum, &xxsum
377 *sse = xxsum;
378 return (xxsum - ((xsum * xsum) >> 7));
382 unsigned int vp8_variance_halfpixvar16x16_h_mmx(
383 const unsigned char *src_ptr,
384 int source_stride,
385 const unsigned char *ref_ptr,
386 int recon_stride,
387 unsigned int *sse)
389 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 0,
390 ref_ptr, recon_stride, sse);
394 unsigned int vp8_variance_halfpixvar16x16_v_mmx(
395 const unsigned char *src_ptr,
396 int source_stride,
397 const unsigned char *ref_ptr,
398 int recon_stride,
399 unsigned int *sse)
401 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 0, 4,
402 ref_ptr, recon_stride, sse);
406 unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
407 const unsigned char *src_ptr,
408 int source_stride,
409 const unsigned char *ref_ptr,
410 int recon_stride,
411 unsigned int *sse)
413 return vp8_sub_pixel_variance16x16_mmx(src_ptr, source_stride, 4, 4,
414 ref_ptr, recon_stride, sse);