2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
11 #include "vpx_config.h"
12 #include "vp8/encoder/variance.h"
13 #include "vp8/common/pragmas.h"
14 #include "vpx_ports/mem.h"
16 extern void filter_block1d_h6_mmx
18 const unsigned char *src_ptr
,
19 unsigned short *output_ptr
,
20 unsigned int src_pixels_per_line
,
21 unsigned int pixel_step
,
22 unsigned int output_height
,
23 unsigned int output_width
,
26 extern void filter_block1d_v6_mmx
29 unsigned char *output_ptr
,
30 unsigned int pixels_per_line
,
31 unsigned int pixel_step
,
32 unsigned int output_height
,
33 unsigned int output_width
,
37 extern unsigned int vp8_get_mb_ss_mmx(const short *src_ptr
);
38 extern unsigned int vp8_get8x8var_mmx
40 const unsigned char *src_ptr
,
42 const unsigned char *ref_ptr
,
47 extern unsigned int vp8_get4x4var_mmx
49 const unsigned char *src_ptr
,
51 const unsigned char *ref_ptr
,
56 extern void vp8_filter_block2d_bil4x4_var_mmx
58 const unsigned char *ref_ptr
,
59 int ref_pixels_per_line
,
60 const unsigned char *src_ptr
,
61 int src_pixels_per_line
,
65 unsigned int *sumsquared
67 extern void vp8_filter_block2d_bil_var_mmx
69 const unsigned char *ref_ptr
,
70 int ref_pixels_per_line
,
71 const unsigned char *src_ptr
,
72 int src_pixels_per_line
,
77 unsigned int *sumsquared
81 unsigned int vp8_variance4x4_mmx(
82 const unsigned char *src_ptr
,
84 const unsigned char *ref_ptr
,
91 vp8_get4x4var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &var
, &avg
) ;
93 return (var
- ((avg
* avg
) >> 4));
97 unsigned int vp8_variance8x8_mmx(
98 const unsigned char *src_ptr
,
100 const unsigned char *ref_ptr
,
107 vp8_get8x8var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &var
, &avg
) ;
110 return (var
- ((avg
* avg
) >> 6));
114 unsigned int vp8_mse16x16_mmx(
115 const unsigned char *src_ptr
,
117 const unsigned char *ref_ptr
,
121 unsigned int sse0
, sse1
, sse2
, sse3
, var
;
122 int sum0
, sum1
, sum2
, sum3
;
125 vp8_get8x8var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &sse0
, &sum0
) ;
126 vp8_get8x8var_mmx(src_ptr
+ 8, source_stride
, ref_ptr
+ 8, recon_stride
, &sse1
, &sum1
);
127 vp8_get8x8var_mmx(src_ptr
+ 8 * source_stride
, source_stride
, ref_ptr
+ 8 * recon_stride
, recon_stride
, &sse2
, &sum2
) ;
128 vp8_get8x8var_mmx(src_ptr
+ 8 * source_stride
+ 8, source_stride
, ref_ptr
+ 8 * recon_stride
+ 8, recon_stride
, &sse3
, &sum3
);
130 var
= sse0
+ sse1
+ sse2
+ sse3
;
136 unsigned int vp8_variance16x16_mmx(
137 const unsigned char *src_ptr
,
139 const unsigned char *ref_ptr
,
143 unsigned int sse0
, sse1
, sse2
, sse3
, var
;
144 int sum0
, sum1
, sum2
, sum3
, avg
;
147 vp8_get8x8var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &sse0
, &sum0
) ;
148 vp8_get8x8var_mmx(src_ptr
+ 8, source_stride
, ref_ptr
+ 8, recon_stride
, &sse1
, &sum1
);
149 vp8_get8x8var_mmx(src_ptr
+ 8 * source_stride
, source_stride
, ref_ptr
+ 8 * recon_stride
, recon_stride
, &sse2
, &sum2
) ;
150 vp8_get8x8var_mmx(src_ptr
+ 8 * source_stride
+ 8, source_stride
, ref_ptr
+ 8 * recon_stride
+ 8, recon_stride
, &sse3
, &sum3
);
152 var
= sse0
+ sse1
+ sse2
+ sse3
;
153 avg
= sum0
+ sum1
+ sum2
+ sum3
;
155 return (var
- ((avg
* avg
) >> 8));
158 unsigned int vp8_variance16x8_mmx(
159 const unsigned char *src_ptr
,
161 const unsigned char *ref_ptr
,
165 unsigned int sse0
, sse1
, var
;
168 vp8_get8x8var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &sse0
, &sum0
) ;
169 vp8_get8x8var_mmx(src_ptr
+ 8, source_stride
, ref_ptr
+ 8, recon_stride
, &sse1
, &sum1
);
174 return (var
- ((avg
* avg
) >> 7));
179 unsigned int vp8_variance8x16_mmx(
180 const unsigned char *src_ptr
,
182 const unsigned char *ref_ptr
,
186 unsigned int sse0
, sse1
, var
;
189 vp8_get8x8var_mmx(src_ptr
, source_stride
, ref_ptr
, recon_stride
, &sse0
, &sum0
) ;
190 vp8_get8x8var_mmx(src_ptr
+ 8 * source_stride
, source_stride
, ref_ptr
+ 8 * recon_stride
, recon_stride
, &sse1
, &sum1
) ;
196 return (var
- ((avg
* avg
) >> 7));
203 ///////////////////////////////////////////////////////////////////////////
204 // the mmx function that does the bilinear filtering and var calculation //
206 ///////////////////////////////////////////////////////////////////////////
207 DECLARE_ALIGNED(16, const short, vp8_vp7_bilinear_filters_mmx
[8][8]) =
209 { 128, 128, 128, 128, 0, 0, 0, 0 },
210 { 112, 112, 112, 112, 16, 16, 16, 16 },
211 { 96, 96, 96, 96, 32, 32, 32, 32 },
212 { 80, 80, 80, 80, 48, 48, 48, 48 },
213 { 64, 64, 64, 64, 64, 64, 64, 64 },
214 { 48, 48, 48, 48, 80, 80, 80, 80 },
215 { 32, 32, 32, 32, 96, 96, 96, 96 },
216 { 16, 16, 16, 16, 112, 112, 112, 112 }
219 unsigned int vp8_sub_pixel_variance4x4_mmx
221 const unsigned char *src_ptr
,
222 int src_pixels_per_line
,
225 const unsigned char *dst_ptr
,
226 int dst_pixels_per_line
,
232 vp8_filter_block2d_bil4x4_var_mmx(
233 src_ptr
, src_pixels_per_line
,
234 dst_ptr
, dst_pixels_per_line
,
235 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
239 return (xxsum
- ((xsum
* xsum
) >> 4));
243 unsigned int vp8_sub_pixel_variance8x8_mmx
245 const unsigned char *src_ptr
,
246 int src_pixels_per_line
,
249 const unsigned char *dst_ptr
,
250 int dst_pixels_per_line
,
257 vp8_filter_block2d_bil_var_mmx(
258 src_ptr
, src_pixels_per_line
,
259 dst_ptr
, dst_pixels_per_line
, 8,
260 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
264 return (xxsum
- ((xsum
* xsum
) >> 6));
267 unsigned int vp8_sub_pixel_variance16x16_mmx
269 const unsigned char *src_ptr
,
270 int src_pixels_per_line
,
273 const unsigned char *dst_ptr
,
274 int dst_pixels_per_line
,
280 unsigned int xxsum0
, xxsum1
;
283 vp8_filter_block2d_bil_var_mmx(
284 src_ptr
, src_pixels_per_line
,
285 dst_ptr
, dst_pixels_per_line
, 16,
286 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
291 vp8_filter_block2d_bil_var_mmx(
292 src_ptr
+ 8, src_pixels_per_line
,
293 dst_ptr
+ 8, dst_pixels_per_line
, 16,
294 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
302 return (xxsum0
- ((xsum0
* xsum0
) >> 8));
307 unsigned int vp8_sub_pixel_mse16x16_mmx(
308 const unsigned char *src_ptr
,
309 int src_pixels_per_line
,
312 const unsigned char *dst_ptr
,
313 int dst_pixels_per_line
,
317 vp8_sub_pixel_variance16x16_mmx(src_ptr
, src_pixels_per_line
, xoffset
, yoffset
, dst_ptr
, dst_pixels_per_line
, sse
);
321 unsigned int vp8_sub_pixel_variance16x8_mmx
323 const unsigned char *src_ptr
,
324 int src_pixels_per_line
,
327 const unsigned char *dst_ptr
,
328 int dst_pixels_per_line
,
333 unsigned int xxsum0
, xxsum1
;
336 vp8_filter_block2d_bil_var_mmx(
337 src_ptr
, src_pixels_per_line
,
338 dst_ptr
, dst_pixels_per_line
, 8,
339 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
344 vp8_filter_block2d_bil_var_mmx(
345 src_ptr
+ 8, src_pixels_per_line
,
346 dst_ptr
+ 8, dst_pixels_per_line
, 8,
347 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
355 return (xxsum0
- ((xsum0
* xsum0
) >> 7));
358 unsigned int vp8_sub_pixel_variance8x16_mmx
360 const unsigned char *src_ptr
,
361 int src_pixels_per_line
,
364 const unsigned char *dst_ptr
,
365 int dst_pixels_per_line
,
371 vp8_filter_block2d_bil_var_mmx(
372 src_ptr
, src_pixels_per_line
,
373 dst_ptr
, dst_pixels_per_line
, 16,
374 vp8_vp7_bilinear_filters_mmx
[xoffset
], vp8_vp7_bilinear_filters_mmx
[yoffset
],
378 return (xxsum
- ((xsum
* xsum
) >> 7));
382 unsigned int vp8_variance_halfpixvar16x16_h_mmx(
383 const unsigned char *src_ptr
,
385 const unsigned char *ref_ptr
,
389 return vp8_sub_pixel_variance16x16_mmx(src_ptr
, source_stride
, 4, 0,
390 ref_ptr
, recon_stride
, sse
);
394 unsigned int vp8_variance_halfpixvar16x16_v_mmx(
395 const unsigned char *src_ptr
,
397 const unsigned char *ref_ptr
,
401 return vp8_sub_pixel_variance16x16_mmx(src_ptr
, source_stride
, 0, 4,
402 ref_ptr
, recon_stride
, sse
);
406 unsigned int vp8_variance_halfpixvar16x16_hv_mmx(
407 const unsigned char *src_ptr
,
409 const unsigned char *ref_ptr
,
413 return vp8_sub_pixel_variance16x16_mmx(src_ptr
, source_stride
, 4, 4,
414 ref_ptr
, recon_stride
, sse
);