2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
20 static int mv_ref_ct
[31] [4] [2];
21 static int mv_mode_cts
[4] [2];
24 static int mv_bits_sadcost
[256];
26 void vp8cx_init_mv_bits_sadcost()
30 for (i
= 0; i
< 256; i
++)
32 mv_bits_sadcost
[i
] = (int)sqrt(i
* 16);
37 int vp8_mv_bit_cost(MV
*mv
, MV
*ref
, int *mvcost
[2], int Weight
)
39 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
40 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
41 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
42 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
43 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
) >> 1]) * Weight
) >> 7;
46 static int mv_err_cost(MV
*mv
, MV
*ref
, int *mvcost
[2], int error_per_bit
)
49 //return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
50 //return ( (vp8_mv_bit_cost(mv, ref, mvcost, 100) + 128) * error_per_bit) >> 8;
52 //i = (vp8_mv_bit_cost(mv, ref, mvcost, 100) * error_per_bit + 128) >> 8;
53 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
) >> 1]) * error_per_bit
+ 128) >> 8;
54 //return (vp8_mv_bit_cost(mv, ref, mvcost, 128) * error_per_bit + 128) >> 8;
57 static int mvsad_err_cost(MV
*mv
, MV
*ref
, int *mvsadcost
[2], int error_per_bit
)
59 /* Calculate sad error cost on full pixel basis. */
60 return ((mvsadcost
[0][(mv
->row
- ref
->row
)] + mvsadcost
[1][(mv
->col
- ref
->col
)]) * error_per_bit
+ 128) >> 8;
63 static int mv_bits(MV
*mv
, MV
*ref
, int *mvcost
[2])
65 // get the estimated number of bits for a motion vector, to be used for costing in SAD based
67 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
)>> 1]) + 128) >> 8;
70 void vp8_init_dsmotion_compensation(MACROBLOCK
*x
, int stride
)
73 int search_site_count
= 0;
76 // Generate offsets for 4 search sites per step.
78 x
->ss
[search_site_count
].mv
.col
= 0;
79 x
->ss
[search_site_count
].mv
.row
= 0;
80 x
->ss
[search_site_count
].offset
= 0;
86 // Compute offsets for search sites.
87 x
->ss
[search_site_count
].mv
.col
= 0;
88 x
->ss
[search_site_count
].mv
.row
= -Len
;
89 x
->ss
[search_site_count
].offset
= -Len
* stride
;
92 // Compute offsets for search sites.
93 x
->ss
[search_site_count
].mv
.col
= 0;
94 x
->ss
[search_site_count
].mv
.row
= Len
;
95 x
->ss
[search_site_count
].offset
= Len
* stride
;
98 // Compute offsets for search sites.
99 x
->ss
[search_site_count
].mv
.col
= -Len
;
100 x
->ss
[search_site_count
].mv
.row
= 0;
101 x
->ss
[search_site_count
].offset
= -Len
;
104 // Compute offsets for search sites.
105 x
->ss
[search_site_count
].mv
.col
= Len
;
106 x
->ss
[search_site_count
].mv
.row
= 0;
107 x
->ss
[search_site_count
].offset
= Len
;
114 x
->ss_count
= search_site_count
;
115 x
->searches_per_step
= 4;
118 void vp8_init3smotion_compensation(MACROBLOCK
*x
, int stride
)
121 int search_site_count
= 0;
123 // Generate offsets for 8 search sites per step.
124 Len
= MAX_FIRST_STEP
;
125 x
->ss
[search_site_count
].mv
.col
= 0;
126 x
->ss
[search_site_count
].mv
.row
= 0;
127 x
->ss
[search_site_count
].offset
= 0;
133 // Compute offsets for search sites.
134 x
->ss
[search_site_count
].mv
.col
= 0;
135 x
->ss
[search_site_count
].mv
.row
= -Len
;
136 x
->ss
[search_site_count
].offset
= -Len
* stride
;
139 // Compute offsets for search sites.
140 x
->ss
[search_site_count
].mv
.col
= 0;
141 x
->ss
[search_site_count
].mv
.row
= Len
;
142 x
->ss
[search_site_count
].offset
= Len
* stride
;
145 // Compute offsets for search sites.
146 x
->ss
[search_site_count
].mv
.col
= -Len
;
147 x
->ss
[search_site_count
].mv
.row
= 0;
148 x
->ss
[search_site_count
].offset
= -Len
;
151 // Compute offsets for search sites.
152 x
->ss
[search_site_count
].mv
.col
= Len
;
153 x
->ss
[search_site_count
].mv
.row
= 0;
154 x
->ss
[search_site_count
].offset
= Len
;
157 // Compute offsets for search sites.
158 x
->ss
[search_site_count
].mv
.col
= -Len
;
159 x
->ss
[search_site_count
].mv
.row
= -Len
;
160 x
->ss
[search_site_count
].offset
= -Len
* stride
- Len
;
163 // Compute offsets for search sites.
164 x
->ss
[search_site_count
].mv
.col
= Len
;
165 x
->ss
[search_site_count
].mv
.row
= -Len
;
166 x
->ss
[search_site_count
].offset
= -Len
* stride
+ Len
;
169 // Compute offsets for search sites.
170 x
->ss
[search_site_count
].mv
.col
= -Len
;
171 x
->ss
[search_site_count
].mv
.row
= Len
;
172 x
->ss
[search_site_count
].offset
= Len
* stride
- Len
;
175 // Compute offsets for search sites.
176 x
->ss
[search_site_count
].mv
.col
= Len
;
177 x
->ss
[search_site_count
].mv
.row
= Len
;
178 x
->ss
[search_site_count
].offset
= Len
* stride
+ Len
;
186 x
->ss_count
= search_site_count
;
187 x
->searches_per_step
= 8;
191 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
192 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
193 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
194 #define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
195 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
196 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
197 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
198 #define MIN(x,y) (((x)<(y))?(x):(y))
199 #define MAX(x,y) (((x)>(y))?(x):(y))
201 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
203 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
205 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
206 unsigned char *z
= (*(b
->base_src
) + b
->src
);
208 int rr
= ref_mv
->row
>> 1, rc
= ref_mv
->col
>> 1;
209 int br
= bestmv
->row
<< 2, bc
= bestmv
->col
<< 2;
210 int tr
= br
, tc
= bc
;
211 unsigned int besterr
= INT_MAX
;
212 unsigned int left
, right
, up
, down
, diag
;
214 unsigned int whichdir
;
215 unsigned int halfiters
= 4;
216 unsigned int quarteriters
= 4;
218 int minc
= MAX(x
->mv_col_min
<< 2, (ref_mv
->col
>> 1) - ((1 << mvlong_width
) - 1));
219 int maxc
= MIN(x
->mv_col_max
<< 2, (ref_mv
->col
>> 1) + ((1 << mvlong_width
) - 1));
220 int minr
= MAX(x
->mv_row_min
<< 2, (ref_mv
->row
>> 1) - ((1 << mvlong_width
) - 1));
221 int maxr
= MIN(x
->mv_row_max
<< 2, (ref_mv
->row
>> 1) + ((1 << mvlong_width
) - 1));
227 // calculate central point error
228 besterr
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
229 besterr
+= mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
231 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
235 CHECK_BETTER(left
, tr
, tc
- 2);
236 CHECK_BETTER(right
, tr
, tc
+ 2);
237 CHECK_BETTER(up
, tr
- 2, tc
);
238 CHECK_BETTER(down
, tr
+ 2, tc
);
240 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
245 CHECK_BETTER(diag
, tr
- 2, tc
- 2);
248 CHECK_BETTER(diag
, tr
- 2, tc
+ 2);
251 CHECK_BETTER(diag
, tr
+ 2, tc
- 2);
254 CHECK_BETTER(diag
, tr
+ 2, tc
+ 2);
258 // no reason to check the same one again.
259 if (tr
== br
&& tc
== bc
)
266 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
268 while (--quarteriters
)
270 CHECK_BETTER(left
, tr
, tc
- 1);
271 CHECK_BETTER(right
, tr
, tc
+ 1);
272 CHECK_BETTER(up
, tr
- 1, tc
);
273 CHECK_BETTER(down
, tr
+ 1, tc
);
275 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
280 CHECK_BETTER(diag
, tr
- 1, tc
- 1);
283 CHECK_BETTER(diag
, tr
- 1, tc
+ 1);
286 CHECK_BETTER(diag
, tr
+ 1, tc
- 1);
289 CHECK_BETTER(diag
, tr
+ 1, tc
+ 1);
293 // no reason to check the same one again.
294 if (tr
== br
&& tc
== bc
)
301 bestmv
->row
= br
<< 1;
302 bestmv
->col
= bc
<< 1;
304 if ((abs(bestmv
->col
- ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs(bestmv
->row
- ref_mv
->row
) > MAX_FULL_PEL_VAL
))
317 int vp8_find_best_sub_pixel_step(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
319 int bestmse
= INT_MAX
;
323 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
324 unsigned char *z
= (*(b
->base_src
) + b
->src
);
325 int left
, right
, up
, down
, diag
;
330 // Trap uncodable vectors
331 if ((abs((bestmv
->col
<< 3) - ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs((bestmv
->row
<< 3) - ref_mv
->row
) > MAX_FULL_PEL_VAL
))
343 // calculate central point error
344 bestmse
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
345 bestmse
+= mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
347 // go left then right and check error
348 this_mv
.row
= startmv
.row
;
349 this_mv
.col
= ((startmv
.col
- 8) | 4);
350 left
= vfp
->svf_halfpix_h(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
351 left
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
360 right
= vfp
->svf_halfpix_h(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
361 right
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
369 // go up then down and check error
370 this_mv
.col
= startmv
.col
;
371 this_mv
.row
= ((startmv
.row
- 8) | 4);
372 up
= vfp
->svf_halfpix_v(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
373 up
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
382 down
= vfp
->svf_halfpix_v(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
383 down
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
392 // now check 1 more diagonal
393 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
394 //for(whichdir =0;whichdir<4;whichdir++)
401 this_mv
.col
= (this_mv
.col
- 8) | 4;
402 this_mv
.row
= (this_mv
.row
- 8) | 4;
403 diag
= vfp
->svf_halfpix_hv(y
- 1 - d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
407 this_mv
.row
= (this_mv
.row
- 8) | 4;
408 diag
= vfp
->svf_halfpix_hv(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
411 this_mv
.col
= (this_mv
.col
- 8) | 4;
413 diag
= vfp
->svf_halfpix_hv(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
419 diag
= vfp
->svf_halfpix_hv(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
423 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
434 // time to check quarter pels.
435 if (bestmv
->row
< startmv
.row
)
438 if (bestmv
->col
< startmv
.col
)
445 // go left then right and check error
446 this_mv
.row
= startmv
.row
;
450 this_mv
.col
= startmv
.col
- 2;
451 left
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
455 this_mv
.col
= (startmv
.col
- 8) | 6;
456 left
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
459 left
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
468 right
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
469 right
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
477 // go up then down and check error
478 this_mv
.col
= startmv
.col
;
482 this_mv
.row
= startmv
.row
- 2;
483 up
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
487 this_mv
.row
= (startmv
.row
- 8) | 6;
488 up
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
491 up
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
500 down
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
501 down
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
510 // now check 1 more diagonal
511 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
513 // for(whichdir=0;whichdir<4;whichdir++)
528 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
532 this_mv
.col
= (startmv
.col
- 8) | 6;
533 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);;
538 this_mv
.row
= (startmv
.row
- 8) | 6;
543 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
547 this_mv
.col
= (startmv
.col
- 8) | 6;
548 diag
= vfp
->svf(y
- d
->pre_stride
- 1, d
->pre_stride
, 6, 6, z
, b
->src_stride
, &sse
);
559 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
563 this_mv
.row
= (startmv
.row
- 8) | 6;
564 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
574 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
578 this_mv
.col
= (startmv
.col
- 8) | 6;
579 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);;
586 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
590 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
603 int vp8_find_best_half_pixel_step(MACROBLOCK
*mb
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
605 int bestmse
= INT_MAX
;
609 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
610 unsigned char *z
= (*(b
->base_src
) + b
->src
);
611 int left
, right
, up
, down
, diag
;
614 // Trap uncodable vectors
615 if ((abs((bestmv
->col
<< 3) - ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs((bestmv
->row
<< 3) - ref_mv
->row
) > MAX_FULL_PEL_VAL
))
627 // calculate central point error
628 bestmse
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
629 bestmse
+= mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
631 // go left then right and check error
632 this_mv
.row
= startmv
.row
;
633 this_mv
.col
= ((startmv
.col
- 8) | 4);
634 left
= vfp
->svf_halfpix_h(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
635 left
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
644 right
= vfp
->svf_halfpix_h(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
645 right
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
653 // go up then down and check error
654 this_mv
.col
= startmv
.col
;
655 this_mv
.row
= ((startmv
.row
- 8) | 4);
656 up
= vfp
->svf_halfpix_v(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
657 up
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
666 down
= vfp
->svf_halfpix_v(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
667 down
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
675 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
677 // now check 1 more diagonal -
678 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
684 this_mv
.col
= (this_mv
.col
- 8) | 4;
685 this_mv
.row
= (this_mv
.row
- 8) | 4;
686 diag
= vfp
->svf(y
- 1 - d
->pre_stride
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
690 this_mv
.row
= (this_mv
.row
- 8) | 4;
691 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
694 this_mv
.col
= (this_mv
.col
- 8) | 4;
696 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
701 diag
= vfp
->svf(y
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
705 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
714 this_mv
.col
= (this_mv
.col
- 8) | 4;
715 this_mv
.row
= (this_mv
.row
- 8) | 4;
716 diag
= vfp
->svf_halfpix_hv(y
- 1 - d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
717 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
726 diag
= vfp
->svf_halfpix_hv(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
727 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
735 this_mv
.col
= (this_mv
.col
- 8) | 4;
736 this_mv
.row
= startmv
.row
+ 4;
737 diag
= vfp
->svf_halfpix_hv(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
738 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
747 diag
= vfp
->svf_halfpix_hv(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
748 diag
+= mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
761 #define MVC(r,c) (((mvsadcost[0][r-rr] + mvsadcost[1][c-rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
762 #define PRE(r,c) (*(d->base_pre) + d->pre + (r) * d->pre_stride + (c)) // pointer to predictor base of a motionvector
763 #define DIST(r,c,v) vfp->sdf( src,src_stride,PRE(r,c),d->pre_stride, v) // returns sad error score.
764 #define ERR(r,c,v) (MVC(r,c)+DIST(r,c,v)) // returns distortion + motion vector cost
765 #define CHECK_BETTER(v,r,c) if ((v = ERR(r,c,besterr)) < besterr) { besterr = v; br=r; bc=c; } // checks if (r,c) has better score than previous best
766 static const MV next_chkpts
[6][3] =
768 {{ -2, 0}, { -1, -2}, {1, -2}},
769 {{ -1, -2}, {1, -2}, {2, 0}},
770 {{1, -2}, {2, 0}, {1, 2}},
771 {{2, 0}, {1, 2}, { -1, 2}},
772 {{1, 2}, { -1, 2}, { -2, 0}},
773 {{ -1, 2}, { -2, 0}, { -1, -2}}
785 const vp8_variance_fn_ptr_t
*vfp
,
791 MV hex
[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
792 MV neighbors
[8] = { { -1, -1}, {0, -1}, {1, -1}, { -1, 0}, {1, 0}, { -1, 1}, {0, 1}, {1, 1} } ;
794 unsigned char *src
= (*(b
->base_src
) + b
->src
);
795 int src_stride
= b
->src_stride
;
796 int rr
= center_mv
->row
, rc
= center_mv
->col
;
797 int br
= ref_mv
->row
>> 3, bc
= ref_mv
->col
>> 3, tr
, tc
;
798 unsigned int besterr
, thiserr
= 0x7fffffff;
801 if (bc
< x
->mv_col_min
) bc
= x
->mv_col_min
;
803 if (bc
> x
->mv_col_max
) bc
= x
->mv_col_max
;
805 if (br
< x
->mv_row_min
) br
= x
->mv_row_min
;
807 if (br
> x
->mv_row_max
) br
= x
->mv_row_max
;
812 besterr
= ERR(br
, bc
, thiserr
);
819 for (i
= 0; i
< 6; i
++)
821 int nr
= tr
+ hex
[i
].row
, nc
= tc
+ hex
[i
].col
;
823 if (nc
< x
->mv_col_min
) continue;
825 if (nc
> x
->mv_col_max
) continue;
827 if (nr
< x
->mv_row_min
) continue;
829 if (nr
> x
->mv_row_max
) continue;
831 //CHECK_BETTER(thiserr,nr,nc);
832 if ((thiserr
= ERR(nr
, nc
, besterr
)) < besterr
)
841 if (tr
== br
&& tc
== bc
)
844 for (j
= 1; j
< 127; j
++)
850 for (i
= 0; i
< 3; i
++)
852 int nr
= tr
+ next_chkpts
[tk
][i
].row
, nc
= tc
+ next_chkpts
[tk
][i
].col
;
854 if (nc
< x
->mv_col_min
) continue;
856 if (nc
> x
->mv_col_max
) continue;
858 if (nr
< x
->mv_row_min
) continue;
860 if (nr
> x
->mv_row_max
) continue;
862 //CHECK_BETTER(thiserr,nr,nc);
863 if ((thiserr
= ERR(nr
, nc
, besterr
)) < besterr
)
867 bc
= nc
; //k=(tk+5+i)%6;}
870 if (k
>= 12) k
-= 12;
871 else if (k
>= 6) k
-= 6;
875 if (tr
== br
&& tc
== bc
)
879 // check 8 1 away neighbors
884 for (i
= 0; i
< 8; i
++)
886 int nr
= tr
+ neighbors
[i
].row
, nc
= tc
+ neighbors
[i
].col
;
888 if (nc
< x
->mv_col_min
) continue;
890 if (nc
> x
->mv_col_max
) continue;
892 if (nr
< x
->mv_row_min
) continue;
894 if (nr
> x
->mv_row_max
) continue;
896 CHECK_BETTER(thiserr
, nr
, nc
);
902 return vfp
->vf(src
, src_stride
, PRE(br
, bc
), d
->pre_stride
, &thiserr
) + mv_err_cost(best_mv
, center_mv
, mvcost
, error_per_bit
) ;
912 int vp8_diamond_search_sad
922 vp8_variance_fn_ptr_t
*fn_ptr
,
929 unsigned char *what
= (*(b
->base_src
) + b
->src
);
930 int what_stride
= b
->src_stride
;
931 unsigned char *in_what
;
932 int in_what_stride
= d
->pre_stride
;
933 unsigned char *best_address
;
938 int bestsad
= INT_MAX
;
942 int ref_row
= ref_mv
->row
>> 3;
943 int ref_col
= ref_mv
->col
>> 3;
948 unsigned char *check_here
;
951 int *mvsadcost
[2] = {x
->mvsadcost
[0], x
->mvsadcost
[1]};
953 fcenter_mv
.row
= center_mv
->row
>> 3;
954 fcenter_mv
.col
= center_mv
->col
>> 3;
958 best_mv
->row
= ref_row
;
959 best_mv
->col
= ref_col
;
961 // Work out the start point for the search
962 in_what
= (unsigned char *)(*(d
->base_pre
) + d
->pre
+ (ref_row
* (d
->pre_stride
)) + ref_col
);
963 best_address
= in_what
;
965 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
966 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
967 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
969 // Check the starting position
970 bestsad
= fn_ptr
->sdf(what
, what_stride
, in_what
, in_what_stride
, 0x7fffffff) + mvsad_err_cost(best_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
973 // search_param determines the length of the initial step and hence the number of iterations
974 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
975 ss
= &x
->ss
[search_param
* x
->searches_per_step
];
976 tot_steps
= (x
->ss_count
/ x
->searches_per_step
) - search_param
;
980 for (step
= 0; step
< tot_steps
; step
++)
982 for (j
= 0 ; j
< x
->searches_per_step
; j
++)
984 // Trap illegal vectors
985 this_row_offset
= best_mv
->row
+ ss
[i
].mv
.row
;
986 this_col_offset
= best_mv
->col
+ ss
[i
].mv
.col
;
988 if ((this_col_offset
> x
->mv_col_min
) && (this_col_offset
< x
->mv_col_max
) &&
989 (this_row_offset
> x
->mv_row_min
) && (this_row_offset
< x
->mv_row_max
))
992 check_here
= ss
[i
].offset
+ best_address
;
993 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
995 if (thissad
< bestsad
)
997 this_mv
.row
= this_row_offset
;
998 this_mv
.col
= this_col_offset
;
999 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1001 if (thissad
< bestsad
)
1012 if (best_site
!= last_site
)
1014 best_mv
->row
+= ss
[best_site
].mv
.row
;
1015 best_mv
->col
+= ss
[best_site
].mv
.col
;
1016 best_address
+= ss
[best_site
].offset
;
1017 last_site
= best_site
;
1019 else if (best_address
== in_what
)
1023 this_mv
.row
= best_mv
->row
<< 3;
1024 this_mv
.col
= best_mv
->col
<< 3;
1026 if (bestsad
== INT_MAX
)
1029 return fn_ptr
->vf(what
, what_stride
, best_address
, in_what_stride
, (unsigned int *)(&thissad
))
1030 + mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1033 int vp8_diamond_search_sadx4
1043 vp8_variance_fn_ptr_t
*fn_ptr
,
1050 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1051 int what_stride
= b
->src_stride
;
1052 unsigned char *in_what
;
1053 int in_what_stride
= d
->pre_stride
;
1054 unsigned char *best_address
;
1059 int bestsad
= INT_MAX
;
1063 int ref_row
= ref_mv
->row
>> 3;
1064 int ref_col
= ref_mv
->col
>> 3;
1065 int this_row_offset
;
1066 int this_col_offset
;
1069 unsigned char *check_here
;
1070 unsigned int thissad
;
1072 int *mvsadcost
[2] = {x
->mvsadcost
[0], x
->mvsadcost
[1]};
1074 fcenter_mv
.row
= center_mv
->row
>> 3;
1075 fcenter_mv
.col
= center_mv
->col
>> 3;
1078 best_mv
->row
= ref_row
;
1079 best_mv
->col
= ref_col
;
1081 // Work out the start point for the search
1082 in_what
= (unsigned char *)(*(d
->base_pre
) + d
->pre
+ (ref_row
* (d
->pre_stride
)) + ref_col
);
1083 best_address
= in_what
;
1085 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1086 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1087 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1089 // Check the starting position
1090 bestsad
= fn_ptr
->sdf(what
, what_stride
, in_what
, in_what_stride
, 0x7fffffff) + mvsad_err_cost(best_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1093 // search_param determines the length of the initial step and hence the number of iterations
1094 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1095 ss
= &x
->ss
[search_param
* x
->searches_per_step
];
1096 tot_steps
= (x
->ss_count
/ x
->searches_per_step
) - search_param
;
1100 for (step
= 0; step
< tot_steps
; step
++)
1104 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1105 // checking 4 bounds for each points.
1106 all_in
&= ((best_mv
->row
+ ss
[i
].mv
.row
)> x
->mv_row_min
);
1107 all_in
&= ((best_mv
->row
+ ss
[i
+1].mv
.row
) < x
->mv_row_max
);
1108 all_in
&= ((best_mv
->col
+ ss
[i
+2].mv
.col
) > x
->mv_col_min
);
1109 all_in
&= ((best_mv
->col
+ ss
[i
+3].mv
.col
) < x
->mv_col_max
);
1113 unsigned int sad_array
[4];
1115 for (j
= 0 ; j
< x
->searches_per_step
; j
+= 4)
1117 unsigned char *block_offset
[4];
1119 for (t
= 0; t
< 4; t
++)
1120 block_offset
[t
] = ss
[i
+t
].offset
+ best_address
;
1122 fn_ptr
->sdx4df(what
, what_stride
, block_offset
, in_what_stride
, sad_array
);
1124 for (t
= 0; t
< 4; t
++, i
++)
1126 if (sad_array
[t
] < bestsad
)
1128 this_mv
.row
= best_mv
->row
+ ss
[i
].mv
.row
;
1129 this_mv
.col
= best_mv
->col
+ ss
[i
].mv
.col
;
1130 sad_array
[t
] += mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1132 if (sad_array
[t
] < bestsad
)
1134 bestsad
= sad_array
[t
];
1143 for (j
= 0 ; j
< x
->searches_per_step
; j
++)
1145 // Trap illegal vectors
1146 this_row_offset
= best_mv
->row
+ ss
[i
].mv
.row
;
1147 this_col_offset
= best_mv
->col
+ ss
[i
].mv
.col
;
1149 if ((this_col_offset
> x
->mv_col_min
) && (this_col_offset
< x
->mv_col_max
) &&
1150 (this_row_offset
> x
->mv_row_min
) && (this_row_offset
< x
->mv_row_max
))
1152 check_here
= ss
[i
].offset
+ best_address
;
1153 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1155 if (thissad
< bestsad
)
1157 this_mv
.row
= this_row_offset
;
1158 this_mv
.col
= this_col_offset
;
1159 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1161 if (thissad
< bestsad
)
1172 if (best_site
!= last_site
)
1174 best_mv
->row
+= ss
[best_site
].mv
.row
;
1175 best_mv
->col
+= ss
[best_site
].mv
.col
;
1176 best_address
+= ss
[best_site
].offset
;
1177 last_site
= best_site
;
1179 else if (best_address
== in_what
)
1183 this_mv
.row
= best_mv
->row
<< 3;
1184 this_mv
.col
= best_mv
->col
<< 3;
1186 if (bestsad
== INT_MAX
)
1189 return fn_ptr
->vf(what
, what_stride
, best_address
, in_what_stride
, (unsigned int *)(&thissad
))
1190 + mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1194 #if !(CONFIG_REALTIME_ONLY)
1195 int vp8_full_search_sad(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], MV
*center_mv
)
1197 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1198 int what_stride
= b
->src_stride
;
1199 unsigned char *in_what
;
1200 int in_what_stride
= d
->pre_stride
;
1201 int mv_stride
= d
->pre_stride
;
1202 unsigned char *bestaddress
;
1203 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1205 int bestsad
= INT_MAX
;
1208 unsigned char *check_here
;
1211 int ref_row
= ref_mv
->row
;
1212 int ref_col
= ref_mv
->col
;
1214 int row_min
= ref_row
- distance
;
1215 int row_max
= ref_row
+ distance
;
1216 int col_min
= ref_col
- distance
;
1217 int col_max
= ref_col
+ distance
;
1219 int *mvsadcost
[2] = {x
->mvsadcost
[0], x
->mvsadcost
[1]};
1221 fcenter_mv
.row
= center_mv
->row
>> 3;
1222 fcenter_mv
.col
= center_mv
->col
>> 3;
1224 // Work out the mid point for the search
1225 in_what
= *(d
->base_pre
) + d
->pre
;
1226 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1228 best_mv
->row
= ref_row
;
1229 best_mv
->col
= ref_col
;
1231 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1232 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1233 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1235 // Baseline value at the centre
1237 //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
1238 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + mvsad_err_cost(best_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1241 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1242 if (col_min
< x
->mv_col_min
)
1243 col_min
= x
->mv_col_min
;
1245 if (col_max
> x
->mv_col_max
)
1246 col_max
= x
->mv_col_max
;
1248 if (row_min
< x
->mv_row_min
)
1249 row_min
= x
->mv_row_min
;
1251 if (row_max
> x
->mv_row_max
)
1252 row_max
= x
->mv_row_max
;
1254 for (r
= row_min
; r
< row_max
; r
++)
1257 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1259 for (c
= col_min
; c
< col_max
; c
++)
1261 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1264 //thissad += (int)sqrt(mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
1265 //thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
1266 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
1268 if (thissad
< bestsad
)
1273 bestaddress
= check_here
;
1280 this_mv
.row
= best_mv
->row
<< 3;
1281 this_mv
.col
= best_mv
->col
<< 3;
1283 if (bestsad
< INT_MAX
)
1284 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1285 + mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1290 int vp8_full_search_sadx3(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], MV
*center_mv
)
1292 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1293 int what_stride
= b
->src_stride
;
1294 unsigned char *in_what
;
1295 int in_what_stride
= d
->pre_stride
;
1296 int mv_stride
= d
->pre_stride
;
1297 unsigned char *bestaddress
;
1298 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1300 int bestsad
= INT_MAX
;
1303 unsigned char *check_here
;
1304 unsigned int thissad
;
1306 int ref_row
= ref_mv
->row
;
1307 int ref_col
= ref_mv
->col
;
1309 int row_min
= ref_row
- distance
;
1310 int row_max
= ref_row
+ distance
;
1311 int col_min
= ref_col
- distance
;
1312 int col_max
= ref_col
+ distance
;
1314 unsigned int sad_array
[3];
1316 int *mvsadcost
[2] = {x
->mvsadcost
[0], x
->mvsadcost
[1]};
1318 fcenter_mv
.row
= center_mv
->row
>> 3;
1319 fcenter_mv
.col
= center_mv
->col
>> 3;
1321 // Work out the mid point for the search
1322 in_what
= *(d
->base_pre
) + d
->pre
;
1323 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1325 best_mv
->row
= ref_row
;
1326 best_mv
->col
= ref_col
;
1328 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1329 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1330 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1332 // Baseline value at the centre
1333 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + mvsad_err_cost(best_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1336 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1337 if (col_min
< x
->mv_col_min
)
1338 col_min
= x
->mv_col_min
;
1340 if (col_max
> x
->mv_col_max
)
1341 col_max
= x
->mv_col_max
;
1343 if (row_min
< x
->mv_row_min
)
1344 row_min
= x
->mv_row_min
;
1346 if (row_max
> x
->mv_row_max
)
1347 row_max
= x
->mv_row_max
;
1349 for (r
= row_min
; r
< row_max
; r
++)
1352 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1355 while ((c
+ 2) < col_max
)
1359 fn_ptr
->sdx3f(what
, what_stride
, check_here
, in_what_stride
, sad_array
);
1361 for (i
= 0; i
< 3; i
++)
1363 thissad
= sad_array
[i
];
1365 if (thissad
< bestsad
)
1368 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1370 if (thissad
< bestsad
)
1375 bestaddress
= check_here
;
1386 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1388 if (thissad
< bestsad
)
1391 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1393 if (thissad
< bestsad
)
1398 bestaddress
= check_here
;
1408 this_mv
.row
= best_mv
->row
<< 3;
1409 this_mv
.col
= best_mv
->col
<< 3;
1411 if (bestsad
< INT_MAX
)
1412 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1413 + mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1418 int vp8_full_search_sadx8(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], MV
*center_mv
)
1420 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1421 int what_stride
= b
->src_stride
;
1422 unsigned char *in_what
;
1423 int in_what_stride
= d
->pre_stride
;
1424 int mv_stride
= d
->pre_stride
;
1425 unsigned char *bestaddress
;
1426 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1428 int bestsad
= INT_MAX
;
1431 unsigned char *check_here
;
1432 unsigned int thissad
;
1434 int ref_row
= ref_mv
->row
;
1435 int ref_col
= ref_mv
->col
;
1437 int row_min
= ref_row
- distance
;
1438 int row_max
= ref_row
+ distance
;
1439 int col_min
= ref_col
- distance
;
1440 int col_max
= ref_col
+ distance
;
1442 DECLARE_ALIGNED_ARRAY(16, unsigned short, sad_array8
, 8);
1443 unsigned int sad_array
[3];
1445 int *mvsadcost
[2] = {x
->mvsadcost
[0], x
->mvsadcost
[1]};
1447 fcenter_mv
.row
= center_mv
->row
>> 3;
1448 fcenter_mv
.col
= center_mv
->col
>> 3;
1450 // Work out the mid point for the search
1451 in_what
= *(d
->base_pre
) + d
->pre
;
1452 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1454 best_mv
->row
= ref_row
;
1455 best_mv
->col
= ref_col
;
1457 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1458 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1459 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1461 // Baseline value at the centre
1462 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + mvsad_err_cost(best_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1465 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1466 if (col_min
< x
->mv_col_min
)
1467 col_min
= x
->mv_col_min
;
1469 if (col_max
> x
->mv_col_max
)
1470 col_max
= x
->mv_col_max
;
1472 if (row_min
< x
->mv_row_min
)
1473 row_min
= x
->mv_row_min
;
1475 if (row_max
> x
->mv_row_max
)
1476 row_max
= x
->mv_row_max
;
1478 for (r
= row_min
; r
< row_max
; r
++)
1481 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1484 while ((c
+ 7) < col_max
)
1488 fn_ptr
->sdx8f(what
, what_stride
, check_here
, in_what_stride
, sad_array8
);
1490 for (i
= 0; i
< 8; i
++)
1492 thissad
= (unsigned int)sad_array8
[i
];
1494 if (thissad
< bestsad
)
1497 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1499 if (thissad
< bestsad
)
1504 bestaddress
= check_here
;
1513 while ((c
+ 2) < col_max
)
1517 fn_ptr
->sdx3f(what
, what_stride
, check_here
, in_what_stride
, sad_array
);
1519 for (i
= 0; i
< 3; i
++)
1521 thissad
= sad_array
[i
];
1523 if (thissad
< bestsad
)
1526 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1528 if (thissad
< bestsad
)
1533 bestaddress
= check_here
;
1544 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1546 if (thissad
< bestsad
)
1549 thissad
+= mvsad_err_cost(&this_mv
, &fcenter_mv
, mvsadcost
, error_per_bit
);
1551 if (thissad
< bestsad
)
1556 bestaddress
= check_here
;
1565 this_mv
.row
= best_mv
->row
<< 3;
1566 this_mv
.col
= best_mv
->col
<< 3;
1568 if (bestsad
< INT_MAX
)
1569 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1570 + mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1574 #endif /* !(CONFIG_REALTIME_ONLY) */
1576 #ifdef ENTROPY_STATS
1577 void print_mode_context(void)
1579 FILE *f
= fopen("modecont.c", "w");
1582 fprintf(f
, "#include \"entropy.h\"\n");
1583 fprintf(f
, "const int vp8_mode_contexts[6][4] =\n");
1586 for (j
= 0; j
< 6; j
++)
1588 fprintf(f
, " { // %d \n", j
);
1591 for (i
= 0; i
< 4; i
++)
1595 int count
; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1598 count
= mv_mode_cts
[i
][0] + mv_mode_cts
[i
][1];
1601 overal_prob
= 256 * mv_mode_cts
[i
][0] / count
;
1605 if (overal_prob
== 0)
1609 count
= mv_ref_ct
[j
][i
][0] + mv_ref_ct
[j
][i
][1];
1612 this_prob
= 256 * mv_ref_ct
[j
][i
][0] / count
;
1619 fprintf(f
, "%5d, ", this_prob
);
1620 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1621 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1624 fprintf(f
, " },\n");
1631 /* MV ref count ENTROPY_STATS stats code */
1632 #ifdef ENTROPY_STATS
1633 void init_mv_ref_counts()
1635 vpx_memset(mv_ref_ct
, 0, sizeof(mv_ref_ct
));
1636 vpx_memset(mv_mode_cts
, 0, sizeof(mv_mode_cts
));
1639 void accum_mv_refs(MB_PREDICTION_MODE m
, const int ct
[4])
1643 ++mv_ref_ct
[ct
[0]] [0] [0];
1644 ++mv_mode_cts
[0][0];
1648 ++mv_ref_ct
[ct
[0]] [0] [1];
1649 ++mv_mode_cts
[0][1];
1653 ++mv_ref_ct
[ct
[1]] [1] [0];
1654 ++mv_mode_cts
[1][0];
1658 ++mv_ref_ct
[ct
[1]] [1] [1];
1659 ++mv_mode_cts
[1][1];
1663 ++mv_ref_ct
[ct
[2]] [2] [0];
1664 ++mv_mode_cts
[2][0];
1668 ++mv_ref_ct
[ct
[2]] [2] [1];
1669 ++mv_mode_cts
[2][1];
1673 ++mv_ref_ct
[ct
[3]] [3] [0];
1674 ++mv_mode_cts
[3][0];
1678 ++mv_ref_ct
[ct
[3]] [3] [1];
1679 ++mv_mode_cts
[3][1];
1686 #endif/* END MV ref count ENTROPY_STATS stats code */