2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
13 #include "vpx_mem/vpx_mem.h"
20 static int mv_ref_ct
[31] [4] [2];
21 static int mv_mode_cts
[4] [2];
24 static int mv_bits_sadcost
[256];
26 void vp8cx_init_mv_bits_sadcost()
30 for (i
= 0; i
< 256; i
++)
32 mv_bits_sadcost
[i
] = (int)sqrt(i
* 16);
37 int vp8_mv_bit_cost(MV
*mv
, MV
*ref
, int *mvcost
[2], int Weight
)
39 // MV costing is based on the distribution of vectors in the previous frame and as such will tend to
40 // over state the cost of vectors. In addition coding a new vector can have a knock on effect on the
41 // cost of subsequent vectors and the quality of prediction from NEAR and NEAREST for subsequent blocks.
42 // The "Weight" parameter allows, to a limited extent, for some account to be taken of these factors.
43 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
) >> 1]) * Weight
) >> 7;
46 int vp8_mv_err_cost(MV
*mv
, MV
*ref
, int *mvcost
[2], int error_per_bit
)
49 //return ((mvcost[0][(mv->row - ref->row)>>1] + mvcost[1][(mv->col - ref->col)>>1] + 128) * error_per_bit) >> 8;
50 //return ( (vp8_mv_bit_cost(mv, ref, mvcost, 100) + 128) * error_per_bit) >> 8;
52 //i = (vp8_mv_bit_cost(mv, ref, mvcost, 100) * error_per_bit + 128) >> 8;
53 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
) >> 1]) * error_per_bit
+ 128) >> 8;
54 //return (vp8_mv_bit_cost(mv, ref, mvcost, 128) * error_per_bit + 128) >> 8;
58 static int mv_bits(MV
*mv
, MV
*ref
, int *mvcost
[2])
60 // get the estimated number of bits for a motion vector, to be used for costing in SAD based
62 return ((mvcost
[0][(mv
->row
- ref
->row
) >> 1] + mvcost
[1][(mv
->col
- ref
->col
)>> 1]) + 128) >> 8;
65 void vp8_init_dsmotion_compensation(MACROBLOCK
*x
, int stride
)
68 int search_site_count
= 0;
71 // Generate offsets for 4 search sites per step.
73 x
->ss
[search_site_count
].mv
.col
= 0;
74 x
->ss
[search_site_count
].mv
.row
= 0;
75 x
->ss
[search_site_count
].offset
= 0;
81 // Compute offsets for search sites.
82 x
->ss
[search_site_count
].mv
.col
= 0;
83 x
->ss
[search_site_count
].mv
.row
= -Len
;
84 x
->ss
[search_site_count
].offset
= -Len
* stride
;
87 // Compute offsets for search sites.
88 x
->ss
[search_site_count
].mv
.col
= 0;
89 x
->ss
[search_site_count
].mv
.row
= Len
;
90 x
->ss
[search_site_count
].offset
= Len
* stride
;
93 // Compute offsets for search sites.
94 x
->ss
[search_site_count
].mv
.col
= -Len
;
95 x
->ss
[search_site_count
].mv
.row
= 0;
96 x
->ss
[search_site_count
].offset
= -Len
;
99 // Compute offsets for search sites.
100 x
->ss
[search_site_count
].mv
.col
= Len
;
101 x
->ss
[search_site_count
].mv
.row
= 0;
102 x
->ss
[search_site_count
].offset
= Len
;
109 x
->ss_count
= search_site_count
;
110 x
->searches_per_step
= 4;
113 void vp8_init3smotion_compensation(MACROBLOCK
*x
, int stride
)
116 int search_site_count
= 0;
118 // Generate offsets for 8 search sites per step.
119 Len
= MAX_FIRST_STEP
;
120 x
->ss
[search_site_count
].mv
.col
= 0;
121 x
->ss
[search_site_count
].mv
.row
= 0;
122 x
->ss
[search_site_count
].offset
= 0;
128 // Compute offsets for search sites.
129 x
->ss
[search_site_count
].mv
.col
= 0;
130 x
->ss
[search_site_count
].mv
.row
= -Len
;
131 x
->ss
[search_site_count
].offset
= -Len
* stride
;
134 // Compute offsets for search sites.
135 x
->ss
[search_site_count
].mv
.col
= 0;
136 x
->ss
[search_site_count
].mv
.row
= Len
;
137 x
->ss
[search_site_count
].offset
= Len
* stride
;
140 // Compute offsets for search sites.
141 x
->ss
[search_site_count
].mv
.col
= -Len
;
142 x
->ss
[search_site_count
].mv
.row
= 0;
143 x
->ss
[search_site_count
].offset
= -Len
;
146 // Compute offsets for search sites.
147 x
->ss
[search_site_count
].mv
.col
= Len
;
148 x
->ss
[search_site_count
].mv
.row
= 0;
149 x
->ss
[search_site_count
].offset
= Len
;
152 // Compute offsets for search sites.
153 x
->ss
[search_site_count
].mv
.col
= -Len
;
154 x
->ss
[search_site_count
].mv
.row
= -Len
;
155 x
->ss
[search_site_count
].offset
= -Len
* stride
- Len
;
158 // Compute offsets for search sites.
159 x
->ss
[search_site_count
].mv
.col
= Len
;
160 x
->ss
[search_site_count
].mv
.row
= -Len
;
161 x
->ss
[search_site_count
].offset
= -Len
* stride
+ Len
;
164 // Compute offsets for search sites.
165 x
->ss
[search_site_count
].mv
.col
= -Len
;
166 x
->ss
[search_site_count
].mv
.row
= Len
;
167 x
->ss
[search_site_count
].offset
= Len
* stride
- Len
;
170 // Compute offsets for search sites.
171 x
->ss
[search_site_count
].mv
.col
= Len
;
172 x
->ss
[search_site_count
].mv
.row
= Len
;
173 x
->ss
[search_site_count
].offset
= Len
* stride
+ Len
;
181 x
->ss_count
= search_site_count
;
182 x
->searches_per_step
= 8;
186 #define MVC(r,c) (((mvcost[0][(r)-rr] + mvcost[1][(c) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
187 #define PRE(r,c) (*(d->base_pre) + d->pre + ((r)>>2) * d->pre_stride + ((c)>>2)) // pointer to predictor base of a motionvector
188 #define SP(x) (((x)&3)<<1) // convert motion vector component to offset for svf calc
189 #define DIST(r,c) vfp->svf( PRE(r,c), d->pre_stride, SP(c),SP(r), z,b->src_stride,&sse) // returns subpixel variance error function.
190 #define IFMVCV(r,c,s,e) if ( c >= minc && c <= maxc && r >= minr && r <= maxr) s else e;
191 #define ERR(r,c) (MVC(r,c)+DIST(r,c)) // returns distortion + motion vector cost
192 #define CHECK_BETTER(v,r,c) IFMVCV(r,c,{if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }}, v=INT_MAX;)// checks if (r,c) has better score than previous best
193 #define MIN(x,y) (((x)<(y))?(x):(y))
194 #define MAX(x,y) (((x)>(y))?(x):(y))
196 //#define CHECK_BETTER(v,r,c) if((v = ERR(r,c)) < besterr) { besterr = v; br=r; bc=c; }
198 int vp8_find_best_sub_pixel_step_iteratively(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
200 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
201 unsigned char *z
= (*(b
->base_src
) + b
->src
);
203 int rr
= ref_mv
->row
>> 1, rc
= ref_mv
->col
>> 1;
204 int br
= bestmv
->row
<< 2, bc
= bestmv
->col
<< 2;
205 int tr
= br
, tc
= bc
;
206 unsigned int besterr
= INT_MAX
;
207 unsigned int left
, right
, up
, down
, diag
;
209 unsigned int whichdir
;
210 unsigned int halfiters
= 4;
211 unsigned int quarteriters
= 4;
213 int minc
= MAX(x
->mv_col_min
<< 2, (ref_mv
->col
>> 1) - ((1 << mvlong_width
) - 1));
214 int maxc
= MIN(x
->mv_col_max
<< 2, (ref_mv
->col
>> 1) + ((1 << mvlong_width
) - 1));
215 int minr
= MAX(x
->mv_row_min
<< 2, (ref_mv
->row
>> 1) - ((1 << mvlong_width
) - 1));
216 int maxr
= MIN(x
->mv_row_max
<< 2, (ref_mv
->row
>> 1) + ((1 << mvlong_width
) - 1));
222 // calculate central point error
223 besterr
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
224 besterr
+= vp8_mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
226 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
230 CHECK_BETTER(left
, tr
, tc
- 2);
231 CHECK_BETTER(right
, tr
, tc
+ 2);
232 CHECK_BETTER(up
, tr
- 2, tc
);
233 CHECK_BETTER(down
, tr
+ 2, tc
);
235 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
240 CHECK_BETTER(diag
, tr
- 2, tc
- 2);
243 CHECK_BETTER(diag
, tr
- 2, tc
+ 2);
246 CHECK_BETTER(diag
, tr
+ 2, tc
- 2);
249 CHECK_BETTER(diag
, tr
+ 2, tc
+ 2);
253 // no reason to check the same one again.
254 if (tr
== br
&& tc
== bc
)
261 // TODO: Each subsequent iteration checks at least one point in common with the last iteration could be 2 ( if diag selected)
263 while (--quarteriters
)
265 CHECK_BETTER(left
, tr
, tc
- 1);
266 CHECK_BETTER(right
, tr
, tc
+ 1);
267 CHECK_BETTER(up
, tr
- 1, tc
);
268 CHECK_BETTER(down
, tr
+ 1, tc
);
270 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
275 CHECK_BETTER(diag
, tr
- 1, tc
- 1);
278 CHECK_BETTER(diag
, tr
- 1, tc
+ 1);
281 CHECK_BETTER(diag
, tr
+ 1, tc
- 1);
284 CHECK_BETTER(diag
, tr
+ 1, tc
+ 1);
288 // no reason to check the same one again.
289 if (tr
== br
&& tc
== bc
)
296 bestmv
->row
= br
<< 1;
297 bestmv
->col
= bc
<< 1;
299 if ((abs(bestmv
->col
- ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs(bestmv
->row
- ref_mv
->row
) > MAX_FULL_PEL_VAL
))
312 int vp8_find_best_sub_pixel_step(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
314 int bestmse
= INT_MAX
;
318 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
319 unsigned char *z
= (*(b
->base_src
) + b
->src
);
320 int left
, right
, up
, down
, diag
;
325 // Trap uncodable vectors
326 if ((abs((bestmv
->col
<< 3) - ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs((bestmv
->row
<< 3) - ref_mv
->row
) > MAX_FULL_PEL_VAL
))
338 // calculate central point error
339 bestmse
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
340 bestmse
+= vp8_mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
342 // go left then right and check error
343 this_mv
.row
= startmv
.row
;
344 this_mv
.col
= ((startmv
.col
- 8) | 4);
345 left
= vfp
->svf_halfpix_h(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
346 left
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
355 right
= vfp
->svf_halfpix_h(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
356 right
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
364 // go up then down and check error
365 this_mv
.col
= startmv
.col
;
366 this_mv
.row
= ((startmv
.row
- 8) | 4);
367 up
= vfp
->svf_halfpix_v(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
368 up
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
377 down
= vfp
->svf_halfpix_v(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
378 down
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
387 // now check 1 more diagonal
388 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
389 //for(whichdir =0;whichdir<4;whichdir++)
396 this_mv
.col
= (this_mv
.col
- 8) | 4;
397 this_mv
.row
= (this_mv
.row
- 8) | 4;
398 diag
= vfp
->svf_halfpix_hv(y
- 1 - d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
402 this_mv
.row
= (this_mv
.row
- 8) | 4;
403 diag
= vfp
->svf_halfpix_hv(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
406 this_mv
.col
= (this_mv
.col
- 8) | 4;
408 diag
= vfp
->svf_halfpix_hv(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
414 diag
= vfp
->svf_halfpix_hv(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
418 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
429 // time to check quarter pels.
430 if (bestmv
->row
< startmv
.row
)
433 if (bestmv
->col
< startmv
.col
)
440 // go left then right and check error
441 this_mv
.row
= startmv
.row
;
445 this_mv
.col
= startmv
.col
- 2;
446 left
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
450 this_mv
.col
= (startmv
.col
- 8) | 6;
451 left
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
454 left
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
463 right
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
464 right
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
472 // go up then down and check error
473 this_mv
.col
= startmv
.col
;
477 this_mv
.row
= startmv
.row
- 2;
478 up
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
482 this_mv
.row
= (startmv
.row
- 8) | 6;
483 up
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
486 up
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
495 down
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
496 down
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
505 // now check 1 more diagonal
506 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
508 // for(whichdir=0;whichdir<4;whichdir++)
523 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
527 this_mv
.col
= (startmv
.col
- 8) | 6;
528 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);;
533 this_mv
.row
= (startmv
.row
- 8) | 6;
538 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
542 this_mv
.col
= (startmv
.col
- 8) | 6;
543 diag
= vfp
->svf(y
- d
->pre_stride
- 1, d
->pre_stride
, 6, 6, z
, b
->src_stride
, &sse
);
554 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
558 this_mv
.row
= (startmv
.row
- 8) | 6;
559 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, this_mv
.col
& 7, 6, z
, b
->src_stride
, &sse
);
569 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
573 this_mv
.col
= (startmv
.col
- 8) | 6;
574 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 6, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);;
581 diag
= vfp
->svf(y
, d
->pre_stride
, this_mv
.col
& 7, this_mv
.row
& 7, z
, b
->src_stride
, &sse
);
585 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
598 int vp8_find_best_half_pixel_step(MACROBLOCK
*mb
, BLOCK
*b
, BLOCKD
*d
, MV
*bestmv
, MV
*ref_mv
, int error_per_bit
, const vp8_variance_fn_ptr_t
*vfp
, int *mvcost
[2])
600 int bestmse
= INT_MAX
;
604 unsigned char *y
= *(d
->base_pre
) + d
->pre
+ (bestmv
->row
) * d
->pre_stride
+ bestmv
->col
;
605 unsigned char *z
= (*(b
->base_src
) + b
->src
);
606 int left
, right
, up
, down
, diag
;
609 // Trap uncodable vectors
610 if ((abs((bestmv
->col
<< 3) - ref_mv
->col
) > MAX_FULL_PEL_VAL
) || (abs((bestmv
->row
<< 3) - ref_mv
->row
) > MAX_FULL_PEL_VAL
))
622 // calculate central point error
623 bestmse
= vfp
->vf(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
624 bestmse
+= vp8_mv_err_cost(bestmv
, ref_mv
, mvcost
, error_per_bit
);
626 // go left then right and check error
627 this_mv
.row
= startmv
.row
;
628 this_mv
.col
= ((startmv
.col
- 8) | 4);
629 left
= vfp
->svf_halfpix_h(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
630 left
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
639 right
= vfp
->svf_halfpix_h(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
640 right
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
648 // go up then down and check error
649 this_mv
.col
= startmv
.col
;
650 this_mv
.row
= ((startmv
.row
- 8) | 4);
651 up
= vfp
->svf_halfpix_v(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
652 up
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
661 down
= vfp
->svf_halfpix_v(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
662 down
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
670 // somewhat strangely not doing all the diagonals for half pel is slower than doing them.
672 // now check 1 more diagonal -
673 whichdir
= (left
< right
? 0 : 1) + (up
< down
? 0 : 2);
679 this_mv
.col
= (this_mv
.col
- 8) | 4;
680 this_mv
.row
= (this_mv
.row
- 8) | 4;
681 diag
= vfp
->svf(y
- 1 - d
->pre_stride
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
685 this_mv
.row
= (this_mv
.row
- 8) | 4;
686 diag
= vfp
->svf(y
- d
->pre_stride
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
689 this_mv
.col
= (this_mv
.col
- 8) | 4;
691 diag
= vfp
->svf(y
- 1, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
696 diag
= vfp
->svf(y
, d
->pre_stride
, 4, 4, z
, b
->src_stride
, &sse
);
700 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
709 this_mv
.col
= (this_mv
.col
- 8) | 4;
710 this_mv
.row
= (this_mv
.row
- 8) | 4;
711 diag
= vfp
->svf_halfpix_hv(y
- 1 - d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
712 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
721 diag
= vfp
->svf_halfpix_hv(y
- d
->pre_stride
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
722 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
730 this_mv
.col
= (this_mv
.col
- 8) | 4;
731 this_mv
.row
= startmv
.row
+ 4;
732 diag
= vfp
->svf_halfpix_hv(y
- 1, d
->pre_stride
, z
, b
->src_stride
, &sse
);
733 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
742 diag
= vfp
->svf_halfpix_hv(y
, d
->pre_stride
, z
, b
->src_stride
, &sse
);
743 diag
+= vp8_mv_err_cost(&this_mv
, ref_mv
, mvcost
, error_per_bit
);
756 #define MVC(r,c) (((mvsadcost[0][((r)<<2)-rr] + mvsadcost[1][((c)<<2) - rc]) * error_per_bit + 128 )>>8 ) // estimated cost of a motion vector (r,c)
757 #define PRE(r,c) (*(d->base_pre) + d->pre + (r) * d->pre_stride + (c)) // pointer to predictor base of a motionvector
758 #define DIST(r,c,v) vfp->sdf( src,src_stride,PRE(r,c),d->pre_stride, v) // returns sad error score.
759 #define ERR(r,c,v) (MVC(r,c)+DIST(r,c,v)) // returns distortion + motion vector cost
760 #define CHECK_BETTER(v,r,c) if ((v = ERR(r,c,besterr)) < besterr) { besterr = v; br=r; bc=c; } // checks if (r,c) has better score than previous best
761 static const MV next_chkpts
[6][3] =
763 {{ -2, 0}, { -1, -2}, {1, -2}},
764 {{ -1, -2}, {1, -2}, {2, 0}},
765 {{1, -2}, {2, 0}, {1, 2}},
766 {{2, 0}, {1, 2}, { -1, 2}},
767 {{1, 2}, { -1, 2}, { -2, 0}},
768 {{ -1, 2}, { -2, 0}, { -1, -2}}
780 const vp8_variance_fn_ptr_t
*vfp
,
786 MV hex
[6] = { { -1, -2}, {1, -2}, {2, 0}, {1, 2}, { -1, 2}, { -2, 0} } ;
787 MV neighbors
[8] = { { -1, -1}, {0, -1}, {1, -1}, { -1, 0}, {1, 0}, { -1, 1}, {0, 1}, {1, 1} } ;
789 unsigned char *src
= (*(b
->base_src
) + b
->src
);
790 int src_stride
= b
->src_stride
;
791 int rr
= center_mv
->row
, rc
= center_mv
->col
;
792 int br
= ref_mv
->row
>> 3, bc
= ref_mv
->col
>> 3, tr
, tc
;
793 unsigned int besterr
, thiserr
= 0x7fffffff;
796 if (bc
< x
->mv_col_min
) bc
= x
->mv_col_min
;
798 if (bc
> x
->mv_col_max
) bc
= x
->mv_col_max
;
800 if (br
< x
->mv_row_min
) br
= x
->mv_row_min
;
802 if (br
> x
->mv_row_max
) br
= x
->mv_row_max
;
807 besterr
= ERR(br
, bc
, thiserr
);
814 for (i
= 0; i
< 6; i
++)
816 int nr
= tr
+ hex
[i
].row
, nc
= tc
+ hex
[i
].col
;
818 if (nc
< x
->mv_col_min
) continue;
820 if (nc
> x
->mv_col_max
) continue;
822 if (nr
< x
->mv_row_min
) continue;
824 if (nr
> x
->mv_row_max
) continue;
826 //CHECK_BETTER(thiserr,nr,nc);
827 if ((thiserr
= ERR(nr
, nc
, besterr
)) < besterr
)
836 if (tr
== br
&& tc
== bc
)
839 for (j
= 1; j
< 127; j
++)
845 for (i
= 0; i
< 3; i
++)
847 int nr
= tr
+ next_chkpts
[tk
][i
].row
, nc
= tc
+ next_chkpts
[tk
][i
].col
;
849 if (nc
< x
->mv_col_min
) continue;
851 if (nc
> x
->mv_col_max
) continue;
853 if (nr
< x
->mv_row_min
) continue;
855 if (nr
> x
->mv_row_max
) continue;
857 //CHECK_BETTER(thiserr,nr,nc);
858 if ((thiserr
= ERR(nr
, nc
, besterr
)) < besterr
)
862 bc
= nc
; //k=(tk+5+i)%6;}
865 if (k
>= 12) k
-= 12;
866 else if (k
>= 6) k
-= 6;
870 if (tr
== br
&& tc
== bc
)
874 // check 8 1 away neighbors
879 for (i
= 0; i
< 8; i
++)
881 int nr
= tr
+ neighbors
[i
].row
, nc
= tc
+ neighbors
[i
].col
;
883 if (nc
< x
->mv_col_min
) continue;
885 if (nc
> x
->mv_col_max
) continue;
887 if (nr
< x
->mv_row_min
) continue;
889 if (nr
> x
->mv_row_max
) continue;
891 CHECK_BETTER(thiserr
, nr
, nc
);
897 return vfp
->vf(src
, src_stride
, PRE(br
, bc
), d
->pre_stride
, &thiserr
) + vp8_mv_err_cost(best_mv
, center_mv
, mvcost
, error_per_bit
) ;
907 int vp8_diamond_search_sad
917 vp8_variance_fn_ptr_t
*fn_ptr
,
925 unsigned char *what
= (*(b
->base_src
) + b
->src
);
926 int what_stride
= b
->src_stride
;
927 unsigned char *in_what
;
928 int in_what_stride
= d
->pre_stride
;
929 unsigned char *best_address
;
934 int bestsad
= INT_MAX
;
938 int ref_row
= ref_mv
->row
>> 3;
939 int ref_col
= ref_mv
->col
>> 3;
944 unsigned char *check_here
;
949 // Work out the start point for the search
950 in_what
= (unsigned char *)(*(d
->base_pre
) + d
->pre
+ (ref_row
* (d
->pre_stride
)) + ref_col
);
951 best_address
= in_what
;
953 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
954 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
955 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
957 // Check the starting position
958 bestsad
= fn_ptr
->sdf(what
, what_stride
, in_what
, in_what_stride
, 0x7fffffff) + vp8_mv_err_cost(ref_mv
, center_mv
, mvsadcost
, error_per_bit
);
961 // search_param determines the length of the initial step and hence the number of iterations
962 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
963 ss
= &x
->ss
[search_param
* x
->searches_per_step
];
964 tot_steps
= (x
->ss_count
/ x
->searches_per_step
) - search_param
;
967 best_mv
->row
= ref_row
;
968 best_mv
->col
= ref_col
;
970 for (step
= 0; step
< tot_steps
; step
++)
972 for (j
= 0 ; j
< x
->searches_per_step
; j
++)
974 // Trap illegal vectors
975 this_row_offset
= best_mv
->row
+ ss
[i
].mv
.row
;
976 this_col_offset
= best_mv
->col
+ ss
[i
].mv
.col
;
978 if ((this_col_offset
> x
->mv_col_min
) && (this_col_offset
< x
->mv_col_max
) &&
979 (this_row_offset
> x
->mv_row_min
) && (this_row_offset
< x
->mv_row_max
))
982 check_here
= ss
[i
].offset
+ best_address
;
983 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
985 if (thissad
< bestsad
)
987 this_mv
.row
= this_row_offset
<< 3;
988 this_mv
.col
= this_col_offset
<< 3;
989 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
991 if (thissad
< bestsad
)
1002 if (best_site
!= last_site
)
1004 best_mv
->row
+= ss
[best_site
].mv
.row
;
1005 best_mv
->col
+= ss
[best_site
].mv
.col
;
1006 best_address
+= ss
[best_site
].offset
;
1007 last_site
= best_site
;
1009 else if (best_address
== in_what
)
1013 this_mv
.row
= best_mv
->row
<< 3;
1014 this_mv
.col
= best_mv
->col
<< 3;
1016 if (bestsad
== INT_MAX
)
1019 return fn_ptr
->vf(what
, what_stride
, best_address
, in_what_stride
, (unsigned int *)(&thissad
))
1020 + vp8_mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1023 int vp8_diamond_search_sadx4
1033 vp8_variance_fn_ptr_t
*fn_ptr
,
1041 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1042 int what_stride
= b
->src_stride
;
1043 unsigned char *in_what
;
1044 int in_what_stride
= d
->pre_stride
;
1045 unsigned char *best_address
;
1050 int bestsad
= INT_MAX
;
1054 int ref_row
= ref_mv
->row
>> 3;
1055 int ref_col
= ref_mv
->col
>> 3;
1056 int this_row_offset
;
1057 int this_col_offset
;
1060 unsigned char *check_here
;
1061 unsigned int thissad
;
1065 // Work out the start point for the search
1066 in_what
= (unsigned char *)(*(d
->base_pre
) + d
->pre
+ (ref_row
* (d
->pre_stride
)) + ref_col
);
1067 best_address
= in_what
;
1069 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1070 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1071 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1073 // Check the starting position
1074 bestsad
= fn_ptr
->sdf(what
, what_stride
, in_what
, in_what_stride
, 0x7fffffff) + vp8_mv_err_cost(ref_mv
, center_mv
, mvsadcost
, error_per_bit
);
1077 // search_param determines the length of the initial step and hence the number of iterations
1078 // 0 = initial step (MAX_FIRST_STEP) pel : 1 = (MAX_FIRST_STEP/2) pel, 2 = (MAX_FIRST_STEP/4) pel... etc.
1079 ss
= &x
->ss
[search_param
* x
->searches_per_step
];
1080 tot_steps
= (x
->ss_count
/ x
->searches_per_step
) - search_param
;
1083 best_mv
->row
= ref_row
;
1084 best_mv
->col
= ref_col
;
1086 for (step
= 0; step
< tot_steps
; step
++)
1090 // To know if all neighbor points are within the bounds, 4 bounds checking are enough instead of
1091 // checking 4 bounds for each points.
1092 all_in
&= ((best_mv
->row
+ ss
[i
].mv
.row
)> x
->mv_row_min
);
1093 all_in
&= ((best_mv
->row
+ ss
[i
+1].mv
.row
) < x
->mv_row_max
);
1094 all_in
&= ((best_mv
->col
+ ss
[i
+2].mv
.col
) > x
->mv_col_min
);
1095 all_in
&= ((best_mv
->col
+ ss
[i
+3].mv
.col
) < x
->mv_col_max
);
1099 unsigned int sad_array
[4];
1101 for (j
= 0 ; j
< x
->searches_per_step
; j
+= 4)
1103 unsigned char *block_offset
[4];
1105 for (t
= 0; t
< 4; t
++)
1106 block_offset
[t
] = ss
[i
+t
].offset
+ best_address
;
1108 fn_ptr
->sdx4df(what
, what_stride
, block_offset
, in_what_stride
, sad_array
);
1110 for (t
= 0; t
< 4; t
++, i
++)
1112 if (sad_array
[t
] < bestsad
)
1114 this_mv
.row
= (best_mv
->row
+ ss
[i
].mv
.row
) << 3;
1115 this_mv
.col
= (best_mv
->col
+ ss
[i
].mv
.col
) << 3;
1116 sad_array
[t
] += vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1118 if (sad_array
[t
] < bestsad
)
1120 bestsad
= sad_array
[t
];
1129 for (j
= 0 ; j
< x
->searches_per_step
; j
++)
1131 // Trap illegal vectors
1132 this_row_offset
= best_mv
->row
+ ss
[i
].mv
.row
;
1133 this_col_offset
= best_mv
->col
+ ss
[i
].mv
.col
;
1135 if ((this_col_offset
> x
->mv_col_min
) && (this_col_offset
< x
->mv_col_max
) &&
1136 (this_row_offset
> x
->mv_row_min
) && (this_row_offset
< x
->mv_row_max
))
1138 check_here
= ss
[i
].offset
+ best_address
;
1139 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1141 if (thissad
< bestsad
)
1143 this_mv
.row
= this_row_offset
<< 3;
1144 this_mv
.col
= this_col_offset
<< 3;
1145 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1147 if (thissad
< bestsad
)
1158 if (best_site
!= last_site
)
1160 best_mv
->row
+= ss
[best_site
].mv
.row
;
1161 best_mv
->col
+= ss
[best_site
].mv
.col
;
1162 best_address
+= ss
[best_site
].offset
;
1163 last_site
= best_site
;
1165 else if (best_address
== in_what
)
1169 this_mv
.row
= best_mv
->row
<< 3;
1170 this_mv
.col
= best_mv
->col
<< 3;
1172 if (bestsad
== INT_MAX
)
1175 return fn_ptr
->vf(what
, what_stride
, best_address
, in_what_stride
, (unsigned int *)(&thissad
))
1176 + vp8_mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1180 #if !(CONFIG_REALTIME_ONLY)
1181 int vp8_full_search_sad(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], int *mvsadcost
[2], MV
*center_mv
)
1183 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1184 int what_stride
= b
->src_stride
;
1185 unsigned char *in_what
;
1186 int in_what_stride
= d
->pre_stride
;
1187 int mv_stride
= d
->pre_stride
;
1188 unsigned char *bestaddress
;
1189 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1191 int bestsad
= INT_MAX
;
1194 unsigned char *check_here
;
1197 int ref_row
= ref_mv
->row
>> 3;
1198 int ref_col
= ref_mv
->col
>> 3;
1200 int row_min
= ref_row
- distance
;
1201 int row_max
= ref_row
+ distance
;
1202 int col_min
= ref_col
- distance
;
1203 int col_max
= ref_col
+ distance
;
1205 // Work out the mid point for the search
1206 in_what
= *(d
->base_pre
) + d
->pre
;
1207 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1209 best_mv
->row
= ref_row
;
1210 best_mv
->col
= ref_col
;
1212 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1213 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1214 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1216 // Baseline value at the centre
1218 //bestsad = fn_ptr->sf( what,what_stride,bestaddress,in_what_stride) + (int)sqrt(vp8_mv_err_cost(ref_mv,ref_mv, mvcost,error_per_bit*14));
1219 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + vp8_mv_err_cost(ref_mv
, center_mv
, mvsadcost
, error_per_bit
);
1222 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1223 if (col_min
< x
->mv_col_min
)
1224 col_min
= x
->mv_col_min
;
1226 if (col_max
> x
->mv_col_max
)
1227 col_max
= x
->mv_col_max
;
1229 if (row_min
< x
->mv_row_min
)
1230 row_min
= x
->mv_row_min
;
1232 if (row_max
> x
->mv_row_max
)
1233 row_max
= x
->mv_row_max
;
1235 for (r
= row_min
; r
< row_max
; r
++)
1237 this_mv
.row
= r
<< 3;
1238 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1240 for (c
= col_min
; c
< col_max
; c
++)
1242 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1244 this_mv
.col
= c
<< 3;
1245 //thissad += (int)sqrt(vp8_mv_err_cost(&this_mv,ref_mv, mvcost,error_per_bit*14));
1246 //thissad += error_per_bit * mv_bits_sadcost[mv_bits(&this_mv, ref_mv, mvcost)];
1247 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
); //mv_bits(error_per_bit, &this_mv, ref_mv, mvsadcost);
1249 if (thissad
< bestsad
)
1254 bestaddress
= check_here
;
1261 this_mv
.row
= best_mv
->row
<< 3;
1262 this_mv
.col
= best_mv
->col
<< 3;
1264 if (bestsad
< INT_MAX
)
1265 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1266 + vp8_mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1271 int vp8_full_search_sadx3(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], int *mvsadcost
[2], MV
*center_mv
)
1273 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1274 int what_stride
= b
->src_stride
;
1275 unsigned char *in_what
;
1276 int in_what_stride
= d
->pre_stride
;
1277 int mv_stride
= d
->pre_stride
;
1278 unsigned char *bestaddress
;
1279 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1281 int bestsad
= INT_MAX
;
1284 unsigned char *check_here
;
1285 unsigned int thissad
;
1287 int ref_row
= ref_mv
->row
>> 3;
1288 int ref_col
= ref_mv
->col
>> 3;
1290 int row_min
= ref_row
- distance
;
1291 int row_max
= ref_row
+ distance
;
1292 int col_min
= ref_col
- distance
;
1293 int col_max
= ref_col
+ distance
;
1295 unsigned int sad_array
[3];
1297 // Work out the mid point for the search
1298 in_what
= *(d
->base_pre
) + d
->pre
;
1299 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1301 best_mv
->row
= ref_row
;
1302 best_mv
->col
= ref_col
;
1304 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1305 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1306 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1308 // Baseline value at the centre
1309 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + vp8_mv_err_cost(ref_mv
, center_mv
, mvsadcost
, error_per_bit
);
1312 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1313 if (col_min
< x
->mv_col_min
)
1314 col_min
= x
->mv_col_min
;
1316 if (col_max
> x
->mv_col_max
)
1317 col_max
= x
->mv_col_max
;
1319 if (row_min
< x
->mv_row_min
)
1320 row_min
= x
->mv_row_min
;
1322 if (row_max
> x
->mv_row_max
)
1323 row_max
= x
->mv_row_max
;
1325 for (r
= row_min
; r
< row_max
; r
++)
1327 this_mv
.row
= r
<< 3;
1328 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1331 while ((c
+ 2) < col_max
)
1335 fn_ptr
->sdx3f(what
, what_stride
, check_here
, in_what_stride
, sad_array
);
1337 for (i
= 0; i
< 3; i
++)
1339 thissad
= sad_array
[i
];
1341 if (thissad
< bestsad
)
1343 this_mv
.col
= c
<< 3;
1344 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1346 if (thissad
< bestsad
)
1351 bestaddress
= check_here
;
1362 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1364 if (thissad
< bestsad
)
1366 this_mv
.col
= c
<< 3;
1367 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1369 if (thissad
< bestsad
)
1374 bestaddress
= check_here
;
1384 this_mv
.row
= best_mv
->row
<< 3;
1385 this_mv
.col
= best_mv
->col
<< 3;
1387 if (bestsad
< INT_MAX
)
1388 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1389 + vp8_mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1394 int vp8_full_search_sadx8(MACROBLOCK
*x
, BLOCK
*b
, BLOCKD
*d
, MV
*ref_mv
, int error_per_bit
, int distance
, vp8_variance_fn_ptr_t
*fn_ptr
, int *mvcost
[2], int *mvsadcost
[2], MV
*center_mv
)
1396 unsigned char *what
= (*(b
->base_src
) + b
->src
);
1397 int what_stride
= b
->src_stride
;
1398 unsigned char *in_what
;
1399 int in_what_stride
= d
->pre_stride
;
1400 int mv_stride
= d
->pre_stride
;
1401 unsigned char *bestaddress
;
1402 MV
*best_mv
= &d
->bmi
.mv
.as_mv
;
1404 int bestsad
= INT_MAX
;
1407 unsigned char *check_here
;
1408 unsigned int thissad
;
1410 int ref_row
= ref_mv
->row
>> 3;
1411 int ref_col
= ref_mv
->col
>> 3;
1413 int row_min
= ref_row
- distance
;
1414 int row_max
= ref_row
+ distance
;
1415 int col_min
= ref_col
- distance
;
1416 int col_max
= ref_col
+ distance
;
1418 unsigned short sad_array8
[8];
1419 unsigned int sad_array
[3];
1421 // Work out the mid point for the search
1422 in_what
= *(d
->base_pre
) + d
->pre
;
1423 bestaddress
= in_what
+ (ref_row
* d
->pre_stride
) + ref_col
;
1425 best_mv
->row
= ref_row
;
1426 best_mv
->col
= ref_col
;
1428 // We need to check that the starting point for the search (as indicated by ref_mv) is within the buffer limits
1429 if ((ref_col
> x
->mv_col_min
) && (ref_col
< x
->mv_col_max
) &&
1430 (ref_row
> x
->mv_row_min
) && (ref_row
< x
->mv_row_max
))
1432 // Baseline value at the centre
1433 bestsad
= fn_ptr
->sdf(what
, what_stride
, bestaddress
, in_what_stride
, 0x7fffffff) + vp8_mv_err_cost(ref_mv
, center_mv
, mvsadcost
, error_per_bit
);
1436 // Apply further limits to prevent us looking using vectors that stretch beyiond the UMV border
1437 if (col_min
< x
->mv_col_min
)
1438 col_min
= x
->mv_col_min
;
1440 if (col_max
> x
->mv_col_max
)
1441 col_max
= x
->mv_col_max
;
1443 if (row_min
< x
->mv_row_min
)
1444 row_min
= x
->mv_row_min
;
1446 if (row_max
> x
->mv_row_max
)
1447 row_max
= x
->mv_row_max
;
1449 for (r
= row_min
; r
< row_max
; r
++)
1451 this_mv
.row
= r
<< 3;
1452 check_here
= r
* mv_stride
+ in_what
+ col_min
;
1455 while ((c
+ 7) < col_max
)
1459 fn_ptr
->sdx8f(what
, what_stride
, check_here
, in_what_stride
, sad_array8
);
1461 for (i
= 0; i
< 8; i
++)
1463 thissad
= (unsigned int)sad_array8
[i
];
1465 if (thissad
< bestsad
)
1467 this_mv
.col
= c
<< 3;
1468 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1470 if (thissad
< bestsad
)
1475 bestaddress
= check_here
;
1484 while ((c
+ 2) < col_max
)
1488 fn_ptr
->sdx3f(what
, what_stride
, check_here
, in_what_stride
, sad_array
);
1490 for (i
= 0; i
< 3; i
++)
1492 thissad
= sad_array
[i
];
1494 if (thissad
< bestsad
)
1496 this_mv
.col
= c
<< 3;
1497 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1499 if (thissad
< bestsad
)
1504 bestaddress
= check_here
;
1515 thissad
= fn_ptr
->sdf(what
, what_stride
, check_here
, in_what_stride
, bestsad
);
1517 if (thissad
< bestsad
)
1519 this_mv
.col
= c
<< 3;
1520 thissad
+= vp8_mv_err_cost(&this_mv
, center_mv
, mvsadcost
, error_per_bit
);
1522 if (thissad
< bestsad
)
1527 bestaddress
= check_here
;
1536 this_mv
.row
= best_mv
->row
<< 3;
1537 this_mv
.col
= best_mv
->col
<< 3;
1539 if (bestsad
< INT_MAX
)
1540 return fn_ptr
->vf(what
, what_stride
, bestaddress
, in_what_stride
, (unsigned int *)(&thissad
))
1541 + vp8_mv_err_cost(&this_mv
, center_mv
, mvcost
, error_per_bit
);
1545 #endif /* !(CONFIG_REALTIME_ONLY) */
1547 #ifdef ENTROPY_STATS
1548 void print_mode_context(void)
1550 FILE *f
= fopen("modecont.c", "w");
1553 fprintf(f
, "#include \"entropy.h\"\n");
1554 fprintf(f
, "const int vp8_mode_contexts[6][4] =\n");
1557 for (j
= 0; j
< 6; j
++)
1559 fprintf(f
, " { // %d \n", j
);
1562 for (i
= 0; i
< 4; i
++)
1566 int count
; // = mv_ref_ct[j][i][0]+mv_ref_ct[j][i][1];
1569 count
= mv_mode_cts
[i
][0] + mv_mode_cts
[i
][1];
1572 overal_prob
= 256 * mv_mode_cts
[i
][0] / count
;
1576 if (overal_prob
== 0)
1580 count
= mv_ref_ct
[j
][i
][0] + mv_ref_ct
[j
][i
][1];
1583 this_prob
= 256 * mv_ref_ct
[j
][i
][0] / count
;
1590 fprintf(f
, "%5d, ", this_prob
);
1591 //fprintf(f,"%5d, %5d, %8d,", this_prob, overal_prob, (this_prob << 10)/overal_prob);
1592 //fprintf(f,"%8d, ", (this_prob << 10)/overal_prob);
1595 fprintf(f
, " },\n");
1602 /* MV ref count ENTROPY_STATS stats code */
1603 #ifdef ENTROPY_STATS
1604 void init_mv_ref_counts()
1606 vpx_memset(mv_ref_ct
, 0, sizeof(mv_ref_ct
));
1607 vpx_memset(mv_mode_cts
, 0, sizeof(mv_mode_cts
));
1610 void accum_mv_refs(MB_PREDICTION_MODE m
, const int ct
[4])
1614 ++mv_ref_ct
[ct
[0]] [0] [0];
1615 ++mv_mode_cts
[0][0];
1619 ++mv_ref_ct
[ct
[0]] [0] [1];
1620 ++mv_mode_cts
[0][1];
1624 ++mv_ref_ct
[ct
[1]] [1] [0];
1625 ++mv_mode_cts
[1][0];
1629 ++mv_ref_ct
[ct
[1]] [1] [1];
1630 ++mv_mode_cts
[1][1];
1634 ++mv_ref_ct
[ct
[2]] [2] [0];
1635 ++mv_mode_cts
[2][0];
1639 ++mv_ref_ct
[ct
[2]] [2] [1];
1640 ++mv_mode_cts
[2][1];
1644 ++mv_ref_ct
[ct
[3]] [3] [0];
1645 ++mv_mode_cts
[3][0];
1649 ++mv_ref_ct
[ct
[3]] [3] [1];
1650 ++mv_mode_cts
[3][1];
1657 #endif/* END MV ref count ENTROPY_STATS stats code */