K-means weightp
[x264-7mod.git] / encoder / me.c
blob094fc5da87c4bdb5fd977e74bafff74e5f5a1ea2
1 /*****************************************************************************
2 * me.c: motion estimation
3 *****************************************************************************
4 * Copyright (C) 2003-2017 x264 project
6 * Authors: Loren Merritt <lorenm@u.washington.edu>
7 * Laurent Aimar <fenrir@via.ecp.fr>
8 * Fiona Glaser <fiona@x264.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02111, USA.
24 * This program is also available under a commercial proprietary license.
25 * For more information, contact us at licensing@x264.com.
26 *****************************************************************************/
28 #include "common/common.h"
29 #include "macroblock.h"
30 #include "me.h"
32 /* presets selected from good points on the speed-vs-quality curve of several test videos
33 * subpel_iters[i_subpel_refine] = { refine_hpel, refine_qpel, me_hpel, me_qpel }
34 * where me_* are the number of EPZS iterations run on all candidate block types,
35 * and refine_* are run only on the winner.
36 * the subme=8,9 values are much higher because any amount of satd search makes
37 * up its time by reducing the number of qpel-rd iterations. */
38 static const uint8_t subpel_iterations[][4] =
39 {{0,0,0,0},
40 {1,1,0,0},
41 {0,1,1,0},
42 {0,2,1,0},
43 {0,2,1,1},
44 {0,2,1,2},
45 {0,0,2,2},
46 {0,0,2,2},
47 {0,0,4,10},
48 {0,0,4,10},
49 {0,0,4,10},
50 {0,0,4,10}};
52 /* (x-1)%6 */
53 static const uint8_t mod6m1[8] = {5,0,1,2,3,4,5,0};
54 /* radius 2 hexagon. repeated entries are to avoid having to compute mod6 every time. */
55 static const int8_t hex2[8][2] = {{-1,-2}, {-2,0}, {-1,2}, {1,2}, {2,0}, {1,-2}, {-1,-2}, {-2,0}};
56 static const int8_t square1[9][2] = {{0,0}, {0,-1}, {0,1}, {-1,0}, {1,0}, {-1,-1}, {-1,1}, {1,-1}, {1,1}};
58 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel );
60 #define BITS_MVD( mx, my )\
61 (p_cost_mvx[(mx)<<2] + p_cost_mvy[(my)<<2])
63 #define COST_MV( mx, my )\
64 do\
66 int cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE,\
67 &p_fref_w[(my)*stride+(mx)], stride )\
68 + BITS_MVD(mx,my);\
69 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my );\
70 } while( 0 )
72 #define COST_MV_HPEL( mx, my, cost )\
73 do\
75 intptr_t stride2 = 16;\
76 pixel *src = h->mc.get_ref( pix, &stride2, m->p_fref, stride, mx, my, bw, bh, &m->weight[0] );\
77 cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, src, stride2 )\
78 + p_cost_mvx[ mx ] + p_cost_mvy[ my ];\
79 } while( 0 )
81 #define COST_MV_X3_DIR( m0x, m0y, m1x, m1y, m2x, m2y, costs )\
83 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
84 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
85 pix_base + (m0x) + (m0y)*stride,\
86 pix_base + (m1x) + (m1y)*stride,\
87 pix_base + (m2x) + (m2y)*stride,\
88 stride, costs );\
89 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
90 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
91 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
94 #define COST_MV_X4_DIR( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y, costs )\
96 pixel *pix_base = p_fref_w + bmx + bmy*stride;\
97 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
98 pix_base + (m0x) + (m0y)*stride,\
99 pix_base + (m1x) + (m1y)*stride,\
100 pix_base + (m2x) + (m2y)*stride,\
101 pix_base + (m3x) + (m3y)*stride,\
102 stride, costs );\
103 (costs)[0] += BITS_MVD( bmx+(m0x), bmy+(m0y) );\
104 (costs)[1] += BITS_MVD( bmx+(m1x), bmy+(m1y) );\
105 (costs)[2] += BITS_MVD( bmx+(m2x), bmy+(m2y) );\
106 (costs)[3] += BITS_MVD( bmx+(m3x), bmy+(m3y) );\
109 #define COST_MV_X4( m0x, m0y, m1x, m1y, m2x, m2y, m3x, m3y )\
111 pixel *pix_base = p_fref_w + omx + omy*stride;\
112 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
113 pix_base + (m0x) + (m0y)*stride,\
114 pix_base + (m1x) + (m1y)*stride,\
115 pix_base + (m2x) + (m2y)*stride,\
116 pix_base + (m3x) + (m3y)*stride,\
117 stride, costs );\
118 costs[0] += BITS_MVD( omx+(m0x), omy+(m0y) );\
119 costs[1] += BITS_MVD( omx+(m1x), omy+(m1y) );\
120 costs[2] += BITS_MVD( omx+(m2x), omy+(m2y) );\
121 costs[3] += BITS_MVD( omx+(m3x), omy+(m3y) );\
122 COPY3_IF_LT( bcost, costs[0], bmx, omx+(m0x), bmy, omy+(m0y) );\
123 COPY3_IF_LT( bcost, costs[1], bmx, omx+(m1x), bmy, omy+(m1y) );\
124 COPY3_IF_LT( bcost, costs[2], bmx, omx+(m2x), bmy, omy+(m2y) );\
125 COPY3_IF_LT( bcost, costs[3], bmx, omx+(m3x), bmy, omy+(m3y) );\
128 #define COST_MV_X3_ABS( m0x, m0y, m1x, m1y, m2x, m2y )\
130 h->pixf.fpelcmp_x3[i_pixel]( p_fenc,\
131 p_fref_w + (m0x) + (m0y)*stride,\
132 p_fref_w + (m1x) + (m1y)*stride,\
133 p_fref_w + (m2x) + (m2y)*stride,\
134 stride, costs );\
135 costs[0] += p_cost_mvx[(m0x)<<2]; /* no cost_mvy */\
136 costs[1] += p_cost_mvx[(m1x)<<2];\
137 costs[2] += p_cost_mvx[(m2x)<<2];\
138 COPY3_IF_LT( bcost, costs[0], bmx, m0x, bmy, m0y );\
139 COPY3_IF_LT( bcost, costs[1], bmx, m1x, bmy, m1y );\
140 COPY3_IF_LT( bcost, costs[2], bmx, m2x, bmy, m2y );\
143 /* 1 */
144 /* 101 */
145 /* 1 */
146 #define DIA1_ITER( mx, my )\
148 omx = mx; omy = my;\
149 COST_MV_X4( 0,-1, 0,1, -1,0, 1,0 );\
152 #define CROSS( start, x_max, y_max )\
154 int i = start;\
155 if( (x_max) <= X264_MIN(mv_x_max-omx, omx-mv_x_min) )\
156 for( ; i < (x_max)-2; i+=4 )\
157 COST_MV_X4( i,0, -i,0, i+2,0, -i-2,0 );\
158 for( ; i < (x_max); i+=2 )\
160 if( omx+i <= mv_x_max )\
161 COST_MV( omx+i, omy );\
162 if( omx-i >= mv_x_min )\
163 COST_MV( omx-i, omy );\
165 i = start;\
166 if( (y_max) <= X264_MIN(mv_y_max-omy, omy-mv_y_min) )\
167 for( ; i < (y_max)-2; i+=4 )\
168 COST_MV_X4( 0,i, 0,-i, 0,i+2, 0,-i-2 );\
169 for( ; i < (y_max); i+=2 )\
171 if( omy+i <= mv_y_max )\
172 COST_MV( omx, omy+i );\
173 if( omy-i >= mv_y_min )\
174 COST_MV( omx, omy-i );\
178 #define FPEL(mv) (((mv)+2)>>2) /* Convert subpel MV to fullpel with rounding... */
179 #define SPEL(mv) ((mv)<<2) /* ... and the reverse. */
180 #define SPELx2(mv) (SPEL(mv)&0xFFFCFFFC) /* for two packed MVs */
182 void x264_me_search_ref( x264_t *h, x264_me_t *m, int16_t (*mvc)[2], int i_mvc, int *p_halfpel_thresh )
184 const int bw = x264_pixel_size[m->i_pixel].w;
185 const int bh = x264_pixel_size[m->i_pixel].h;
186 const int i_pixel = m->i_pixel;
187 const int stride = m->i_stride[0];
188 int i_me_range = h->param.analyse.i_me_range;
189 int bmx, bmy, bcost = COST_MAX;
190 int bpred_cost = COST_MAX;
191 int omx, omy, pmx, pmy;
192 pixel *p_fenc = m->p_fenc[0];
193 pixel *p_fref_w = m->p_fref_w;
194 ALIGNED_ARRAY_32( pixel, pix,[16*16] );
195 ALIGNED_ARRAY_8( int16_t, mvc_temp,[16],[2] );
197 ALIGNED_ARRAY_16( int, costs,[16] );
199 int mv_x_min = h->mb.mv_limit_fpel[0][0];
200 int mv_y_min = h->mb.mv_limit_fpel[0][1];
201 int mv_x_max = h->mb.mv_limit_fpel[1][0];
202 int mv_y_max = h->mb.mv_limit_fpel[1][1];
203 /* Special version of pack to allow shortcuts in CHECK_MVRANGE */
204 #define pack16to32_mask2(mx,my) ((mx<<16)|(my&0x7FFF))
205 uint32_t mv_min = pack16to32_mask2( -mv_x_min, -mv_y_min );
206 uint32_t mv_max = pack16to32_mask2( mv_x_max, mv_y_max )|0x8000;
207 uint32_t pmv, bpred_mv = 0;
209 #define CHECK_MVRANGE(mx,my) (!(((pack16to32_mask2(mx,my) + mv_min) | (mv_max - pack16to32_mask2(mx,my))) & 0x80004000))
211 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
212 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
214 /* Try extra predictors if provided. If subme >= 3, check subpel predictors,
215 * otherwise round them to fullpel. */
216 if( h->mb.i_subpel_refine >= 3 )
218 /* Calculate and check the MVP first */
219 int bpred_mx = x264_clip3( m->mvp[0], SPEL(mv_x_min), SPEL(mv_x_max) );
220 int bpred_my = x264_clip3( m->mvp[1], SPEL(mv_y_min), SPEL(mv_y_max) );
221 pmv = pack16to32_mask( bpred_mx, bpred_my );
222 pmx = FPEL( bpred_mx );
223 pmy = FPEL( bpred_my );
225 COST_MV_HPEL( bpred_mx, bpred_my, bpred_cost );
226 int pmv_cost = bpred_cost;
228 if( i_mvc > 0 )
230 /* Clip MV candidates and eliminate those equal to zero and pmv. */
231 int valid_mvcs = x264_predictor_clip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv );
232 if( valid_mvcs > 0 )
234 int i = 1, cost;
235 /* We stuff pmv here to branchlessly pick between pmv and the various
236 * MV candidates. [0] gets skipped in order to maintain alignment for
237 * x264_predictor_clip. */
238 M32( mvc_temp[1] ) = pmv;
239 bpred_cost <<= 4;
242 int mx = mvc_temp[i+1][0];
243 int my = mvc_temp[i+1][1];
244 COST_MV_HPEL( mx, my, cost );
245 COPY1_IF_LT( bpred_cost, (cost << 4) + i );
246 } while( ++i <= valid_mvcs );
247 bpred_mx = mvc_temp[(bpred_cost&15)+1][0];
248 bpred_my = mvc_temp[(bpred_cost&15)+1][1];
249 bpred_cost >>= 4;
253 /* Round the best predictor back to fullpel and get the cost, since this is where
254 * we'll be starting the fullpel motion search. */
255 bmx = FPEL( bpred_mx );
256 bmy = FPEL( bpred_my );
257 bpred_mv = pack16to32_mask(bpred_mx, bpred_my);
258 if( bpred_mv&0x00030003 ) /* Only test if the tested predictor is actually subpel... */
259 COST_MV( bmx, bmy );
260 else /* Otherwise just copy the cost (we already know it) */
261 bcost = bpred_cost;
263 /* Test the zero vector if it hasn't been tested yet. */
264 if( pmv )
266 if( bmx|bmy ) COST_MV( 0, 0 );
268 /* If a subpel mv candidate was better than the zero vector, the previous
269 * fullpel check won't have gotten it even if the pmv was zero. So handle
270 * that possibility here. */
271 else
273 COPY3_IF_LT( bcost, pmv_cost, bmx, 0, bmy, 0 );
276 else
278 /* Calculate and check the fullpel MVP first */
279 bmx = pmx = x264_clip3( FPEL(m->mvp[0]), mv_x_min, mv_x_max );
280 bmy = pmy = x264_clip3( FPEL(m->mvp[1]), mv_y_min, mv_y_max );
281 pmv = pack16to32_mask( bmx, bmy );
283 /* Because we are rounding the predicted motion vector to fullpel, there will be
284 * an extra MV cost in 15 out of 16 cases. However, when the predicted MV is
285 * chosen as the best predictor, it is often the case that the subpel search will
286 * result in a vector at or next to the predicted motion vector. Therefore, we omit
287 * the cost of the MV from the rounded MVP to avoid unfairly biasing against use of
288 * the predicted motion vector.
290 * Disclaimer: this is a post-hoc rationalization for why this hack works. */
291 bcost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[bmy*stride+bmx], stride );
293 if( i_mvc > 0 )
295 /* Like in subme>=3, except we also round the candidates to fullpel. */
296 int valid_mvcs = x264_predictor_roundclip( mvc_temp+2, mvc, i_mvc, h->mb.mv_limit_fpel, pmv );
297 if( valid_mvcs > 0 )
299 int i = 1, cost;
300 M32( mvc_temp[1] ) = pmv;
301 bcost <<= 4;
304 int mx = mvc_temp[i+1][0];
305 int my = mvc_temp[i+1][1];
306 cost = h->pixf.fpelcmp[i_pixel]( p_fenc, FENC_STRIDE, &p_fref_w[my*stride+mx], stride ) + BITS_MVD( mx, my );
307 COPY1_IF_LT( bcost, (cost << 4) + i );
308 } while( ++i <= valid_mvcs );
309 bmx = mvc_temp[(bcost&15)+1][0];
310 bmy = mvc_temp[(bcost&15)+1][1];
311 bcost >>= 4;
315 /* Same as above, except the condition is simpler. */
316 if( pmv )
317 COST_MV( 0, 0 );
320 switch( h->mb.i_me_method )
322 case X264_ME_DIA:
324 /* diamond search, radius 1 */
325 bcost <<= 4;
326 int i = i_me_range;
329 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
330 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
331 COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
332 COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
333 COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
334 if( !(bcost&15) )
335 break;
336 bmx -= (bcost<<28)>>30;
337 bmy -= (bcost<<30)>>30;
338 bcost &= ~15;
339 } while( --i && CHECK_MVRANGE(bmx, bmy) );
340 bcost >>= 4;
341 break;
344 case X264_ME_HEX:
346 me_hex2:
347 /* hexagon search, radius 2 */
348 #if 0
349 for( int i = 0; i < i_me_range/2; i++ )
351 omx = bmx; omy = bmy;
352 COST_MV( omx-2, omy );
353 COST_MV( omx-1, omy+2 );
354 COST_MV( omx+1, omy+2 );
355 COST_MV( omx+2, omy );
356 COST_MV( omx+1, omy-2 );
357 COST_MV( omx-1, omy-2 );
358 if( bmx == omx && bmy == omy )
359 break;
360 if( !CHECK_MVRANGE(bmx, bmy) )
361 break;
363 #else
364 /* equivalent to the above, but eliminates duplicate candidates */
366 /* hexagon */
367 COST_MV_X3_DIR( -2,0, -1, 2, 1, 2, costs );
368 COST_MV_X3_DIR( 2,0, 1,-2, -1,-2, costs+4 ); /* +4 for 16-byte alignment */
369 bcost <<= 3;
370 COPY1_IF_LT( bcost, (costs[0]<<3)+2 );
371 COPY1_IF_LT( bcost, (costs[1]<<3)+3 );
372 COPY1_IF_LT( bcost, (costs[2]<<3)+4 );
373 COPY1_IF_LT( bcost, (costs[4]<<3)+5 );
374 COPY1_IF_LT( bcost, (costs[5]<<3)+6 );
375 COPY1_IF_LT( bcost, (costs[6]<<3)+7 );
377 if( bcost&7 )
379 int dir = (bcost&7)-2;
380 bmx += hex2[dir+1][0];
381 bmy += hex2[dir+1][1];
383 /* half hexagon, not overlapping the previous iteration */
384 for( int i = (i_me_range>>1) - 1; i > 0 && CHECK_MVRANGE(bmx, bmy); i-- )
386 COST_MV_X3_DIR( hex2[dir+0][0], hex2[dir+0][1],
387 hex2[dir+1][0], hex2[dir+1][1],
388 hex2[dir+2][0], hex2[dir+2][1],
389 costs );
390 bcost &= ~7;
391 COPY1_IF_LT( bcost, (costs[0]<<3)+1 );
392 COPY1_IF_LT( bcost, (costs[1]<<3)+2 );
393 COPY1_IF_LT( bcost, (costs[2]<<3)+3 );
394 if( !(bcost&7) )
395 break;
396 dir += (bcost&7)-2;
397 dir = mod6m1[dir+1];
398 bmx += hex2[dir+1][0];
399 bmy += hex2[dir+1][1];
402 bcost >>= 3;
403 #endif
404 /* square refine */
405 bcost <<= 4;
406 COST_MV_X4_DIR( 0,-1, 0,1, -1,0, 1,0, costs );
407 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
408 COPY1_IF_LT( bcost, (costs[1]<<4)+2 );
409 COPY1_IF_LT( bcost, (costs[2]<<4)+3 );
410 COPY1_IF_LT( bcost, (costs[3]<<4)+4 );
411 COST_MV_X4_DIR( -1,-1, -1,1, 1,-1, 1,1, costs );
412 COPY1_IF_LT( bcost, (costs[0]<<4)+5 );
413 COPY1_IF_LT( bcost, (costs[1]<<4)+6 );
414 COPY1_IF_LT( bcost, (costs[2]<<4)+7 );
415 COPY1_IF_LT( bcost, (costs[3]<<4)+8 );
416 bmx += square1[bcost&15][0];
417 bmy += square1[bcost&15][1];
418 bcost >>= 4;
419 break;
422 case X264_ME_UMH:
424 /* Uneven-cross Multi-Hexagon-grid Search
425 * as in JM, except with different early termination */
427 static const uint8_t x264_pixel_size_shift[7] = { 0, 1, 1, 2, 3, 3, 4 };
429 int ucost1, ucost2;
430 int cross_start = 1;
432 /* refine predictors */
433 ucost1 = bcost;
434 DIA1_ITER( pmx, pmy );
435 if( pmx | pmy )
436 DIA1_ITER( 0, 0 );
438 if( i_pixel == PIXEL_4x4 )
439 goto me_hex2;
441 ucost2 = bcost;
442 if( (bmx | bmy) && ((bmx-pmx) | (bmy-pmy)) )
443 DIA1_ITER( bmx, bmy );
444 if( bcost == ucost2 )
445 cross_start = 3;
446 omx = bmx; omy = bmy;
448 /* early termination */
449 #define SAD_THRESH(v) ( bcost < ( v >> x264_pixel_size_shift[i_pixel] ) )
450 if( bcost == ucost2 && SAD_THRESH(2000) )
452 COST_MV_X4( 0,-2, -1,-1, 1,-1, -2,0 );
453 COST_MV_X4( 2, 0, -1, 1, 1, 1, 0,2 );
454 if( bcost == ucost1 && SAD_THRESH(500) )
455 break;
456 if( bcost == ucost2 )
458 int range = (i_me_range>>1) | 1;
459 CROSS( 3, range, range );
460 COST_MV_X4( -1,-2, 1,-2, -2,-1, 2,-1 );
461 COST_MV_X4( -2, 1, 2, 1, -1, 2, 1, 2 );
462 if( bcost == ucost2 )
463 break;
464 cross_start = range + 2;
468 /* adaptive search range */
469 if( i_mvc )
471 /* range multipliers based on casual inspection of some statistics of
472 * average distance between current predictor and final mv found by ESA.
473 * these have not been tuned much by actual encoding. */
474 static const uint8_t range_mul[4][4] =
476 { 3, 3, 4, 4 },
477 { 3, 4, 4, 4 },
478 { 4, 4, 4, 5 },
479 { 4, 4, 5, 6 },
481 int mvd;
482 int sad_ctx, mvd_ctx;
483 int denom = 1;
485 if( i_mvc == 1 )
487 if( i_pixel == PIXEL_16x16 )
488 /* mvc is probably the same as mvp, so the difference isn't meaningful.
489 * but prediction usually isn't too bad, so just use medium range */
490 mvd = 25;
491 else
492 mvd = abs( m->mvp[0] - mvc[0][0] )
493 + abs( m->mvp[1] - mvc[0][1] );
495 else
497 /* calculate the degree of agreement between predictors. */
498 /* in 16x16, mvc includes all the neighbors used to make mvp,
499 * so don't count mvp separately. */
500 denom = i_mvc - 1;
501 mvd = 0;
502 if( i_pixel != PIXEL_16x16 )
504 mvd = abs( m->mvp[0] - mvc[0][0] )
505 + abs( m->mvp[1] - mvc[0][1] );
506 denom++;
508 mvd += x264_predictor_difference( mvc, i_mvc );
511 sad_ctx = SAD_THRESH(1000) ? 0
512 : SAD_THRESH(2000) ? 1
513 : SAD_THRESH(4000) ? 2 : 3;
514 mvd_ctx = mvd < 10*denom ? 0
515 : mvd < 20*denom ? 1
516 : mvd < 40*denom ? 2 : 3;
518 i_me_range = i_me_range * range_mul[mvd_ctx][sad_ctx] >> 2;
521 /* FIXME if the above DIA2/OCT2/CROSS found a new mv, it has not updated omx/omy.
522 * we are still centered on the same place as the DIA2. is this desirable? */
523 CROSS( cross_start, i_me_range, i_me_range>>1 );
525 COST_MV_X4( -2,-2, -2,2, 2,-2, 2,2 );
527 /* hexagon grid */
528 omx = bmx; omy = bmy;
529 const uint16_t *p_cost_omvx = p_cost_mvx + omx*4;
530 const uint16_t *p_cost_omvy = p_cost_mvy + omy*4;
531 int i = 1;
534 static const int8_t hex4[16][2] = {
535 { 0,-4}, { 0, 4}, {-2,-3}, { 2,-3},
536 {-4,-2}, { 4,-2}, {-4,-1}, { 4,-1},
537 {-4, 0}, { 4, 0}, {-4, 1}, { 4, 1},
538 {-4, 2}, { 4, 2}, {-2, 3}, { 2, 3},
541 if( 4*i > X264_MIN4( mv_x_max-omx, omx-mv_x_min,
542 mv_y_max-omy, omy-mv_y_min ) )
544 for( int j = 0; j < 16; j++ )
546 int mx = omx + hex4[j][0]*i;
547 int my = omy + hex4[j][1]*i;
548 if( CHECK_MVRANGE(mx, my) )
549 COST_MV( mx, my );
552 else
554 int dir = 0;
555 pixel *pix_base = p_fref_w + omx + (omy-4*i)*stride;
556 int dy = i*stride;
557 #define SADS(k,x0,y0,x1,y1,x2,y2,x3,y3)\
558 h->pixf.fpelcmp_x4[i_pixel]( p_fenc,\
559 pix_base x0*i+(y0-2*k+4)*dy,\
560 pix_base x1*i+(y1-2*k+4)*dy,\
561 pix_base x2*i+(y2-2*k+4)*dy,\
562 pix_base x3*i+(y3-2*k+4)*dy,\
563 stride, costs+4*k );\
564 pix_base += 2*dy;
565 #define ADD_MVCOST(k,x,y) costs[k] += p_cost_omvx[x*4*i] + p_cost_omvy[y*4*i]
566 #define MIN_MV(k,x,y) COPY2_IF_LT( bcost, costs[k], dir, x*16+(y&15) )
567 SADS( 0, +0,-4, +0,+4, -2,-3, +2,-3 );
568 SADS( 1, -4,-2, +4,-2, -4,-1, +4,-1 );
569 SADS( 2, -4,+0, +4,+0, -4,+1, +4,+1 );
570 SADS( 3, -4,+2, +4,+2, -2,+3, +2,+3 );
571 ADD_MVCOST( 0, 0,-4 );
572 ADD_MVCOST( 1, 0, 4 );
573 ADD_MVCOST( 2,-2,-3 );
574 ADD_MVCOST( 3, 2,-3 );
575 ADD_MVCOST( 4,-4,-2 );
576 ADD_MVCOST( 5, 4,-2 );
577 ADD_MVCOST( 6,-4,-1 );
578 ADD_MVCOST( 7, 4,-1 );
579 ADD_MVCOST( 8,-4, 0 );
580 ADD_MVCOST( 9, 4, 0 );
581 ADD_MVCOST( 10,-4, 1 );
582 ADD_MVCOST( 11, 4, 1 );
583 ADD_MVCOST( 12,-4, 2 );
584 ADD_MVCOST( 13, 4, 2 );
585 ADD_MVCOST( 14,-2, 3 );
586 ADD_MVCOST( 15, 2, 3 );
587 MIN_MV( 0, 0,-4 );
588 MIN_MV( 1, 0, 4 );
589 MIN_MV( 2,-2,-3 );
590 MIN_MV( 3, 2,-3 );
591 MIN_MV( 4,-4,-2 );
592 MIN_MV( 5, 4,-2 );
593 MIN_MV( 6,-4,-1 );
594 MIN_MV( 7, 4,-1 );
595 MIN_MV( 8,-4, 0 );
596 MIN_MV( 9, 4, 0 );
597 MIN_MV( 10,-4, 1 );
598 MIN_MV( 11, 4, 1 );
599 MIN_MV( 12,-4, 2 );
600 MIN_MV( 13, 4, 2 );
601 MIN_MV( 14,-2, 3 );
602 MIN_MV( 15, 2, 3 );
603 #undef SADS
604 #undef ADD_MVCOST
605 #undef MIN_MV
606 if( dir )
608 bmx = omx + i*(dir>>4);
609 bmy = omy + i*((dir<<28)>>28);
612 } while( ++i <= i_me_range>>2 );
613 if( bmy <= mv_y_max && bmy >= mv_y_min && bmx <= mv_x_max && bmx >= mv_x_min )
614 goto me_hex2;
615 break;
618 case X264_ME_ESA:
619 case X264_ME_TESA:
621 const int min_x = X264_MAX( bmx - i_me_range, mv_x_min );
622 const int min_y = X264_MAX( bmy - i_me_range, mv_y_min );
623 const int max_x = X264_MIN( bmx + i_me_range, mv_x_max );
624 const int max_y = X264_MIN( bmy + i_me_range, mv_y_max );
625 /* SEA is fastest in multiples of 4 */
626 const int width = (max_x - min_x + 3) & ~3;
627 #if 0
628 /* plain old exhaustive search */
629 for( int my = min_y; my <= max_y; my++ )
630 for( int mx = min_x; mx < min_x + width; mx++ )
631 COST_MV( mx, my );
632 #else
633 /* successive elimination by comparing DC before a full SAD,
634 * because sum(abs(diff)) >= abs(diff(sum)). */
635 uint16_t *sums_base = m->integral;
636 ALIGNED_16( static pixel zero[8*FENC_STRIDE] ) = {0};
637 ALIGNED_ARRAY_16( int, enc_dc,[4] );
638 int sad_size = i_pixel <= PIXEL_8x8 ? PIXEL_8x8 : PIXEL_4x4;
639 int delta = x264_pixel_size[sad_size].w;
640 int16_t *xs = h->scratch_buffer;
641 int xn;
642 uint16_t *cost_fpel_mvx = h->cost_mv_fpel[h->mb.i_qp][-m->mvp[0]&3] + (-m->mvp[0]>>2);
644 h->pixf.sad_x4[sad_size]( zero, p_fenc, p_fenc+delta,
645 p_fenc+delta*FENC_STRIDE, p_fenc+delta+delta*FENC_STRIDE,
646 FENC_STRIDE, enc_dc );
647 if( delta == 4 )
648 sums_base += stride * (h->fenc->i_lines[0] + PADV*2);
649 if( i_pixel == PIXEL_16x16 || i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
650 delta *= stride;
651 if( i_pixel == PIXEL_8x16 || i_pixel == PIXEL_4x8 )
652 enc_dc[1] = enc_dc[2];
654 if( h->mb.i_me_method == X264_ME_TESA )
656 // ADS threshold, then SAD threshold, then keep the best few SADs, then SATD
657 mvsad_t *mvsads = (mvsad_t *)(xs + ((width+31)&~31) + 4);
658 int nmvsad = 0, limit;
659 int sad_thresh = i_me_range <= 16 ? 10 : i_me_range <= 24 ? 11 : 12;
660 int bsad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+bmy*stride+bmx, stride )
661 + BITS_MVD( bmx, bmy );
662 for( int my = min_y; my <= max_y; my++ )
664 int i;
665 int ycost = p_cost_mvy[my<<2];
666 if( bsad <= ycost )
667 continue;
668 bsad -= ycost;
669 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
670 cost_fpel_mvx+min_x, xs, width, bsad * 17 >> 4 );
671 for( i = 0; i < xn-2; i += 3 )
673 pixel *ref = p_fref_w+min_x+my*stride;
674 ALIGNED_ARRAY_16( int, sads,[4] ); /* padded to [4] for asm */
675 h->pixf.sad_x3[i_pixel]( p_fenc, ref+xs[i], ref+xs[i+1], ref+xs[i+2], stride, sads );
676 for( int j = 0; j < 3; j++ )
678 int sad = sads[j] + cost_fpel_mvx[xs[i+j]];
679 if( sad < bsad*sad_thresh>>3 )
681 COPY1_IF_LT( bsad, sad );
682 mvsads[nmvsad].sad = sad + ycost;
683 mvsads[nmvsad].mv[0] = min_x+xs[i+j];
684 mvsads[nmvsad].mv[1] = my;
685 nmvsad++;
689 for( ; i < xn; i++ )
691 int mx = min_x+xs[i];
692 int sad = h->pixf.sad[i_pixel]( p_fenc, FENC_STRIDE, p_fref_w+mx+my*stride, stride )
693 + cost_fpel_mvx[xs[i]];
694 if( sad < bsad*sad_thresh>>3 )
696 COPY1_IF_LT( bsad, sad );
697 mvsads[nmvsad].sad = sad + ycost;
698 mvsads[nmvsad].mv[0] = mx;
699 mvsads[nmvsad].mv[1] = my;
700 nmvsad++;
703 bsad += ycost;
706 limit = i_me_range >> 1;
707 sad_thresh = bsad*sad_thresh>>3;
708 while( nmvsad > limit*2 && sad_thresh > bsad )
710 int i = 0;
711 // halve the range if the domain is too large... eh, close enough
712 sad_thresh = (sad_thresh + bsad) >> 1;
713 while( i < nmvsad && mvsads[i].sad <= sad_thresh )
714 i++;
715 for( int j = i; j < nmvsad; j++ )
717 uint32_t sad;
718 if( WORD_SIZE == 8 && sizeof(mvsad_t) == 8 )
720 uint64_t mvsad = M64( &mvsads[i] ) = M64( &mvsads[j] );
721 #if WORDS_BIGENDIAN
722 mvsad >>= 32;
723 #endif
724 sad = mvsad;
726 else
728 sad = mvsads[j].sad;
729 CP32( mvsads[i].mv, mvsads[j].mv );
730 mvsads[i].sad = sad;
732 i += (sad - (sad_thresh+1)) >> 31;
734 nmvsad = i;
736 while( nmvsad > limit )
738 int bi = 0;
739 for( int i = 1; i < nmvsad; i++ )
740 if( mvsads[i].sad > mvsads[bi].sad )
741 bi = i;
742 nmvsad--;
743 if( sizeof( mvsad_t ) == sizeof( uint64_t ) )
744 CP64( &mvsads[bi], &mvsads[nmvsad] );
745 else
746 mvsads[bi] = mvsads[nmvsad];
748 for( int i = 0; i < nmvsad; i++ )
749 COST_MV( mvsads[i].mv[0], mvsads[i].mv[1] );
751 else
753 // just ADS and SAD
754 for( int my = min_y; my <= max_y; my++ )
756 int i;
757 int ycost = p_cost_mvy[my<<2];
758 if( bcost <= ycost )
759 continue;
760 bcost -= ycost;
761 xn = h->pixf.ads[i_pixel]( enc_dc, sums_base + min_x + my * stride, delta,
762 cost_fpel_mvx+min_x, xs, width, bcost );
763 for( i = 0; i < xn-2; i += 3 )
764 COST_MV_X3_ABS( min_x+xs[i],my, min_x+xs[i+1],my, min_x+xs[i+2],my );
765 bcost += ycost;
766 for( ; i < xn; i++ )
767 COST_MV( min_x+xs[i], my );
770 #endif
772 break;
775 /* -> qpel mv */
776 uint32_t bmv = pack16to32_mask(bmx,bmy);
777 uint32_t bmv_spel = SPELx2(bmv);
778 if( h->mb.i_subpel_refine < 3 )
780 m->cost_mv = p_cost_mvx[bmx<<2] + p_cost_mvy[bmy<<2];
781 m->cost = bcost;
782 /* compute the real cost */
783 if( bmv == pmv ) m->cost += m->cost_mv;
784 M32( m->mv ) = bmv_spel;
786 else
788 M32(m->mv) = bpred_cost < bcost ? bpred_mv : bmv_spel;
789 m->cost = X264_MIN( bpred_cost, bcost );
792 /* subpel refine */
793 if( h->mb.i_subpel_refine >= 2 )
795 int hpel = subpel_iterations[h->mb.i_subpel_refine][2];
796 int qpel = subpel_iterations[h->mb.i_subpel_refine][3];
797 refine_subpel( h, m, hpel, qpel, p_halfpel_thresh, 0 );
800 #undef COST_MV
802 void x264_me_refine_qpel( x264_t *h, x264_me_t *m )
804 int hpel = subpel_iterations[h->mb.i_subpel_refine][0];
805 int qpel = subpel_iterations[h->mb.i_subpel_refine][1];
807 if( m->i_pixel <= PIXEL_8x8 )
808 m->cost -= m->i_ref_cost;
810 refine_subpel( h, m, hpel, qpel, NULL, 1 );
813 void x264_me_refine_qpel_refdupe( x264_t *h, x264_me_t *m, int *p_halfpel_thresh )
815 refine_subpel( h, m, 0, X264_MIN( 2, subpel_iterations[h->mb.i_subpel_refine][3] ), p_halfpel_thresh, 0 );
818 #define COST_MV_SAD( mx, my ) \
820 intptr_t stride = 16; \
821 pixel *src = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
822 int cost = h->pixf.fpelcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
823 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
824 COPY3_IF_LT( bcost, cost, bmx, mx, bmy, my ); \
827 #define COST_MV_SATD( mx, my, dir ) \
828 if( b_refine_qpel || (dir^1) != odir ) \
830 intptr_t stride = 16; \
831 pixel *src = h->mc.get_ref( pix, &stride, &m->p_fref[0], m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
832 int cost = h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[0], FENC_STRIDE, src, stride ) \
833 + p_cost_mvx[ mx ] + p_cost_mvy[ my ]; \
834 if( b_chroma_me && cost < bcost ) \
836 if( CHROMA444 ) \
838 stride = 16; \
839 src = h->mc.get_ref( pix, &stride, &m->p_fref[4], m->i_stride[1], mx, my, bw, bh, &m->weight[1] ); \
840 cost += h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[1], FENC_STRIDE, src, stride ); \
841 if( cost < bcost ) \
843 stride = 16; \
844 src = h->mc.get_ref( pix, &stride, &m->p_fref[8], m->i_stride[2], mx, my, bw, bh, &m->weight[2] ); \
845 cost += h->pixf.mbcmp_unaligned[i_pixel]( m->p_fenc[2], FENC_STRIDE, src, stride ); \
848 else \
850 h->mc.mc_chroma( pix, pix+8, 16, m->p_fref[4], m->i_stride[1], \
851 mx, 2*(my+mvy_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift ); \
852 if( m->weight[1].weightfn ) \
853 m->weight[1].weightfn[bw>>3]( pix, 16, pix, 16, &m->weight[1], bh>>chroma_v_shift ); \
854 cost += h->pixf.mbcmp[chromapix]( m->p_fenc[1], FENC_STRIDE, pix, 16 ); \
855 if( cost < bcost ) \
857 if( m->weight[2].weightfn ) \
858 m->weight[2].weightfn[bw>>3]( pix+8, 16, pix+8, 16, &m->weight[2], bh>>chroma_v_shift ); \
859 cost += h->pixf.mbcmp[chromapix]( m->p_fenc[2], FENC_STRIDE, pix+8, 16 ); \
863 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, bdir, dir ); \
866 static void refine_subpel( x264_t *h, x264_me_t *m, int hpel_iters, int qpel_iters, int *p_halfpel_thresh, int b_refine_qpel )
868 const int bw = x264_pixel_size[m->i_pixel].w;
869 const int bh = x264_pixel_size[m->i_pixel].h;
870 const uint16_t *p_cost_mvx = m->p_cost_mv - m->mvp[0];
871 const uint16_t *p_cost_mvy = m->p_cost_mv - m->mvp[1];
872 const int i_pixel = m->i_pixel;
873 const int b_chroma_me = h->mb.b_chroma_me && (i_pixel <= PIXEL_8x8 || CHROMA444);
874 int chromapix = h->luma2chroma_pixel[i_pixel];
875 int chroma_v_shift = CHROMA_V_SHIFT;
876 int mvy_offset = chroma_v_shift & MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
878 ALIGNED_ARRAY_32( pixel, pix,[64*18] ); // really 17x17x2, but round up for alignment
879 ALIGNED_ARRAY_16( int, costs,[4] );
881 int bmx = m->mv[0];
882 int bmy = m->mv[1];
883 int bcost = m->cost;
884 int odir = -1, bdir;
886 /* halfpel diamond search */
887 if( hpel_iters )
889 /* try the subpel component of the predicted mv */
890 if( h->mb.i_subpel_refine < 3 )
892 int mx = x264_clip3( m->mvp[0], h->mb.mv_min_spel[0]+2, h->mb.mv_max_spel[0]-2 );
893 int my = x264_clip3( m->mvp[1], h->mb.mv_min_spel[1]+2, h->mb.mv_max_spel[1]-2 );
894 if( (mx-bmx)|(my-bmy) )
895 COST_MV_SAD( mx, my );
898 bcost <<= 6;
899 for( int i = hpel_iters; i > 0; i-- )
901 int omx = bmx, omy = bmy;
902 intptr_t stride = 64; // candidates are either all hpel or all qpel, so one stride is enough
903 pixel *src0, *src1, *src2, *src3;
904 src0 = h->mc.get_ref( pix, &stride, m->p_fref, m->i_stride[0], omx, omy-2, bw, bh+1, &m->weight[0] );
905 src2 = h->mc.get_ref( pix+32, &stride, m->p_fref, m->i_stride[0], omx-2, omy, bw+4, bh, &m->weight[0] );
906 src1 = src0 + stride;
907 src3 = src2 + 1;
908 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], src0, src1, src2, src3, stride, costs );
909 costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-2];
910 costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+2];
911 costs[2] += p_cost_mvx[omx-2] + p_cost_mvy[omy ];
912 costs[3] += p_cost_mvx[omx+2] + p_cost_mvy[omy ];
913 COPY1_IF_LT( bcost, (costs[0]<<6)+2 );
914 COPY1_IF_LT( bcost, (costs[1]<<6)+6 );
915 COPY1_IF_LT( bcost, (costs[2]<<6)+16 );
916 COPY1_IF_LT( bcost, (costs[3]<<6)+48 );
917 if( !(bcost&63) )
918 break;
919 bmx -= (bcost<<26)>>29;
920 bmy -= (bcost<<29)>>29;
921 bcost &= ~63;
923 bcost >>= 6;
926 if( !b_refine_qpel && (h->pixf.mbcmp_unaligned[0] != h->pixf.fpelcmp[0] || b_chroma_me) )
928 bcost = COST_MAX;
929 COST_MV_SATD( bmx, bmy, -1 );
932 /* early termination when examining multiple reference frames */
933 if( p_halfpel_thresh )
935 if( (bcost*7)>>3 > *p_halfpel_thresh )
937 m->cost = bcost;
938 m->mv[0] = bmx;
939 m->mv[1] = bmy;
940 // don't need cost_mv
941 return;
943 else if( bcost < *p_halfpel_thresh )
944 *p_halfpel_thresh = bcost;
947 /* quarterpel diamond search */
948 if( h->mb.i_subpel_refine != 1 )
950 bdir = -1;
951 for( int i = qpel_iters; i > 0; i-- )
953 if( bmy <= h->mb.mv_min_spel[1] || bmy >= h->mb.mv_max_spel[1] || bmx <= h->mb.mv_min_spel[0] || bmx >= h->mb.mv_max_spel[0] )
954 break;
955 odir = bdir;
956 int omx = bmx, omy = bmy;
957 COST_MV_SATD( omx, omy - 1, 0 );
958 COST_MV_SATD( omx, omy + 1, 1 );
959 COST_MV_SATD( omx - 1, omy, 2 );
960 COST_MV_SATD( omx + 1, omy, 3 );
961 if( (bmx == omx) & (bmy == omy) )
962 break;
965 /* Special simplified case for subme=1 */
966 else if( bmy > h->mb.mv_min_spel[1] && bmy < h->mb.mv_max_spel[1] && bmx > h->mb.mv_min_spel[0] && bmx < h->mb.mv_max_spel[0] )
968 int omx = bmx, omy = bmy;
969 /* We have to use mc_luma because all strides must be the same to use fpelcmp_x4 */
970 h->mc.mc_luma( pix , 64, m->p_fref, m->i_stride[0], omx, omy-1, bw, bh, &m->weight[0] );
971 h->mc.mc_luma( pix+16, 64, m->p_fref, m->i_stride[0], omx, omy+1, bw, bh, &m->weight[0] );
972 h->mc.mc_luma( pix+32, 64, m->p_fref, m->i_stride[0], omx-1, omy, bw, bh, &m->weight[0] );
973 h->mc.mc_luma( pix+48, 64, m->p_fref, m->i_stride[0], omx+1, omy, bw, bh, &m->weight[0] );
974 h->pixf.fpelcmp_x4[i_pixel]( m->p_fenc[0], pix, pix+16, pix+32, pix+48, 64, costs );
975 costs[0] += p_cost_mvx[omx ] + p_cost_mvy[omy-1];
976 costs[1] += p_cost_mvx[omx ] + p_cost_mvy[omy+1];
977 costs[2] += p_cost_mvx[omx-1] + p_cost_mvy[omy ];
978 costs[3] += p_cost_mvx[omx+1] + p_cost_mvy[omy ];
979 bcost <<= 4;
980 COPY1_IF_LT( bcost, (costs[0]<<4)+1 );
981 COPY1_IF_LT( bcost, (costs[1]<<4)+3 );
982 COPY1_IF_LT( bcost, (costs[2]<<4)+4 );
983 COPY1_IF_LT( bcost, (costs[3]<<4)+12 );
984 bmx -= (bcost<<28)>>30;
985 bmy -= (bcost<<30)>>30;
986 bcost >>= 4;
989 m->cost = bcost;
990 m->mv[0] = bmx;
991 m->mv[1] = bmy;
992 m->cost_mv = p_cost_mvx[bmx] + p_cost_mvy[bmy];
995 #define BIME_CACHE( dx, dy, list )\
997 x264_me_t *m = m##list;\
998 int i = 4 + 3*dx + dy;\
999 int mvx = bm##list##x+dx;\
1000 int mvy = bm##list##y+dy;\
1001 stride[0][list][i] = bw;\
1002 src[0][list][i] = h->mc.get_ref( pixy_buf[list][i], &stride[0][list][i], &m->p_fref[0],\
1003 m->i_stride[0], mvx, mvy, bw, bh, x264_weight_none );\
1004 if( rd )\
1006 if( CHROMA444 )\
1008 stride[1][list][i] = bw;\
1009 src[1][list][i] = h->mc.get_ref( pixu_buf[list][i], &stride[1][list][i], &m->p_fref[4],\
1010 m->i_stride[1], mvx, mvy, bw, bh, x264_weight_none );\
1011 stride[2][list][i] = bw;\
1012 src[2][list][i] = h->mc.get_ref( pixv_buf[list][i], &stride[2][list][i], &m->p_fref[8],\
1013 m->i_stride[2], mvx, mvy, bw, bh, x264_weight_none );\
1015 else\
1016 h->mc.mc_chroma( pixu_buf[list][i], pixv_buf[list][i], 8, m->p_fref[4], m->i_stride[1],\
1017 mvx, 2*(mvy+mv##list##y_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift );\
1021 #define SATD_THRESH(cost) (cost+(cost>>4))
1023 /* Don't unroll the BIME_CACHE loop. I couldn't find any way to force this
1024 * other than making its iteration count not a compile-time constant. */
1025 int x264_iter_kludge = 0;
1027 static void ALWAYS_INLINE x264_me_refine_bidir( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2, int rd )
1029 int x = i8&1;
1030 int y = i8>>1;
1031 int s8 = X264_SCAN8_0 + 2*x + 16*y;
1032 int16_t *cache0_mv = h->mb.cache.mv[0][s8];
1033 int16_t *cache1_mv = h->mb.cache.mv[1][s8];
1034 const int i_pixel = m0->i_pixel;
1035 const int bw = x264_pixel_size[i_pixel].w;
1036 const int bh = x264_pixel_size[i_pixel].h;
1037 ALIGNED_ARRAY_32( pixel, pixy_buf,[2],[9][16*16] );
1038 ALIGNED_ARRAY_32( pixel, pixu_buf,[2],[9][16*16] );
1039 ALIGNED_ARRAY_32( pixel, pixv_buf,[2],[9][16*16] );
1040 pixel *src[3][2][9];
1041 int chromapix = h->luma2chroma_pixel[i_pixel];
1042 int chroma_v_shift = CHROMA_V_SHIFT;
1043 int chroma_x = (8 >> CHROMA_H_SHIFT) * x;
1044 int chroma_y = (8 >> chroma_v_shift) * y;
1045 pixel *pix = &h->mb.pic.p_fdec[0][8*x + 8*y*FDEC_STRIDE];
1046 pixel *pixu = &h->mb.pic.p_fdec[1][chroma_x + chroma_y*FDEC_STRIDE];
1047 pixel *pixv = &h->mb.pic.p_fdec[2][chroma_x + chroma_y*FDEC_STRIDE];
1048 int ref0 = h->mb.cache.ref[0][s8];
1049 int ref1 = h->mb.cache.ref[1][s8];
1050 const int mv0y_offset = chroma_v_shift & MB_INTERLACED & ref0 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1051 const int mv1y_offset = chroma_v_shift & MB_INTERLACED & ref1 ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1052 intptr_t stride[3][2][9];
1053 int bm0x = m0->mv[0];
1054 int bm0y = m0->mv[1];
1055 int bm1x = m1->mv[0];
1056 int bm1y = m1->mv[1];
1057 int bcost = COST_MAX;
1058 int mc_list0 = 1, mc_list1 = 1;
1059 uint64_t bcostrd = COST_MAX64;
1060 uint16_t amvd;
1061 /* each byte of visited represents 8 possible m1y positions, so a 4D array isn't needed */
1062 ALIGNED_ARRAY_64( uint8_t, visited,[8],[8][8] );
1063 /* all permutations of an offset in up to 2 of the dimensions */
1064 ALIGNED_4( static const int8_t dia4d[33][4] ) =
1066 {0,0,0,0},
1067 {0,0,0,1}, {0,0,0,-1}, {0,0,1,0}, {0,0,-1,0},
1068 {0,1,0,0}, {0,-1,0,0}, {1,0,0,0}, {-1,0,0,0},
1069 {0,0,1,1}, {0,0,-1,-1},{0,1,1,0}, {0,-1,-1,0},
1070 {1,1,0,0}, {-1,-1,0,0},{1,0,0,1}, {-1,0,0,-1},
1071 {0,1,0,1}, {0,-1,0,-1},{1,0,1,0}, {-1,0,-1,0},
1072 {0,0,-1,1},{0,0,1,-1}, {0,-1,1,0},{0,1,-1,0},
1073 {-1,1,0,0},{1,-1,0,0}, {1,0,0,-1},{-1,0,0,1},
1074 {0,-1,0,1},{0,1,0,-1}, {-1,0,1,0},{1,0,-1,0},
1077 if( bm0y < h->mb.mv_min_spel[1] + 8 || bm1y < h->mb.mv_min_spel[1] + 8 ||
1078 bm0y > h->mb.mv_max_spel[1] - 8 || bm1y > h->mb.mv_max_spel[1] - 8 ||
1079 bm0x < h->mb.mv_min_spel[0] + 8 || bm1x < h->mb.mv_min_spel[0] + 8 ||
1080 bm0x > h->mb.mv_max_spel[0] - 8 || bm1x > h->mb.mv_max_spel[0] - 8 )
1081 return;
1083 if( rd && m0->i_pixel != PIXEL_16x16 && i8 != 0 )
1085 x264_mb_predict_mv( h, 0, i8<<2, bw>>2, m0->mvp );
1086 x264_mb_predict_mv( h, 1, i8<<2, bw>>2, m1->mvp );
1089 const uint16_t *p_cost_m0x = m0->p_cost_mv - m0->mvp[0];
1090 const uint16_t *p_cost_m0y = m0->p_cost_mv - m0->mvp[1];
1091 const uint16_t *p_cost_m1x = m1->p_cost_mv - m1->mvp[0];
1092 const uint16_t *p_cost_m1y = m1->p_cost_mv - m1->mvp[1];
1094 h->mc.memzero_aligned( visited, sizeof(uint8_t[8][8][8]) );
1096 for( int pass = 0; pass < 8; pass++ )
1098 int bestj = 0;
1099 /* check all mv pairs that differ in at most 2 components from the current mvs. */
1100 /* doesn't do chroma ME. this probably doesn't matter, as the gains
1101 * from bidir ME are the same with and without chroma ME. */
1103 if( mc_list0 )
1104 for( int j = x264_iter_kludge; j < 9; j++ )
1105 BIME_CACHE( square1[j][0], square1[j][1], 0 );
1107 if( mc_list1 )
1108 for( int j = x264_iter_kludge; j < 9; j++ )
1109 BIME_CACHE( square1[j][0], square1[j][1], 1 );
1111 for( int j = !!pass; j < 33; j++ )
1113 int m0x = dia4d[j][0] + bm0x;
1114 int m0y = dia4d[j][1] + bm0y;
1115 int m1x = dia4d[j][2] + bm1x;
1116 int m1y = dia4d[j][3] + bm1y;
1117 if( !pass || !((visited[(m0x)&7][(m0y)&7][(m1x)&7] & (1<<((m1y)&7)))) )
1119 int i0 = 4 + 3*dia4d[j][0] + dia4d[j][1];
1120 int i1 = 4 + 3*dia4d[j][2] + dia4d[j][3];
1121 visited[(m0x)&7][(m0y)&7][(m1x)&7] |= (1<<((m1y)&7));
1122 h->mc.avg[i_pixel]( pix, FDEC_STRIDE, src[0][0][i0], stride[0][0][i0], src[0][1][i1], stride[0][1][i1], i_weight );
1123 int cost = h->pixf.mbcmp[i_pixel]( m0->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE )
1124 + p_cost_m0x[m0x] + p_cost_m0y[m0y] + p_cost_m1x[m1x] + p_cost_m1y[m1y];
1125 if( rd )
1127 if( cost < SATD_THRESH(bcost) )
1129 bcost = X264_MIN( cost, bcost );
1130 M32( cache0_mv ) = pack16to32_mask(m0x,m0y);
1131 M32( cache1_mv ) = pack16to32_mask(m1x,m1y);
1132 if( CHROMA444 )
1134 h->mc.avg[i_pixel]( pixu, FDEC_STRIDE, src[1][0][i0], stride[1][0][i0], src[1][1][i1], stride[1][1][i1], i_weight );
1135 h->mc.avg[i_pixel]( pixv, FDEC_STRIDE, src[2][0][i0], stride[2][0][i0], src[2][1][i1], stride[2][1][i1], i_weight );
1137 else
1139 h->mc.avg[chromapix]( pixu, FDEC_STRIDE, pixu_buf[0][i0], 8, pixu_buf[1][i1], 8, i_weight );
1140 h->mc.avg[chromapix]( pixv, FDEC_STRIDE, pixv_buf[0][i0], 8, pixv_buf[1][i1], 8, i_weight );
1142 uint64_t costrd = x264_rd_cost_part( h, i_lambda2, i8*4, m0->i_pixel );
1143 COPY2_IF_LT( bcostrd, costrd, bestj, j );
1146 else
1147 COPY2_IF_LT( bcost, cost, bestj, j );
1151 if( !bestj )
1152 break;
1154 bm0x += dia4d[bestj][0];
1155 bm0y += dia4d[bestj][1];
1156 bm1x += dia4d[bestj][2];
1157 bm1y += dia4d[bestj][3];
1159 mc_list0 = M16( &dia4d[bestj][0] );
1160 mc_list1 = M16( &dia4d[bestj][2] );
1163 if( rd )
1165 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 0, pack16to32_mask(bm0x, bm0y) );
1166 amvd = pack8to16( X264_MIN(abs(bm0x - m0->mvp[0]),33), X264_MIN(abs(bm0y - m0->mvp[1]),33) );
1167 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 0, amvd );
1169 x264_macroblock_cache_mv ( h, 2*x, 2*y, bw>>2, bh>>2, 1, pack16to32_mask(bm1x, bm1y) );
1170 amvd = pack8to16( X264_MIN(abs(bm1x - m1->mvp[0]),33), X264_MIN(abs(bm1y - m1->mvp[1]),33) );
1171 x264_macroblock_cache_mvd( h, 2*x, 2*y, bw>>2, bh>>2, 1, amvd );
1174 m0->mv[0] = bm0x;
1175 m0->mv[1] = bm0y;
1176 m1->mv[0] = bm1x;
1177 m1->mv[1] = bm1y;
1180 void x264_me_refine_bidir_satd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight )
1182 x264_me_refine_bidir( h, m0, m1, i_weight, 0, 0, 0 );
1185 void x264_me_refine_bidir_rd( x264_t *h, x264_me_t *m0, x264_me_t *m1, int i_weight, int i8, int i_lambda2 )
1187 /* Motion compensation is done as part of bidir_rd; don't repeat
1188 * it in encoding. */
1189 h->mb.b_skip_mc = 1;
1190 x264_me_refine_bidir( h, m0, m1, i_weight, i8, i_lambda2, 1 );
1191 h->mb.b_skip_mc = 0;
1194 #undef COST_MV_SATD
1195 #define COST_MV_SATD( mx, my, dst, avoid_mvp ) \
1197 if( !avoid_mvp || !(mx == pmx && my == pmy) ) \
1199 h->mc.mc_luma( pix, FDEC_STRIDE, m->p_fref, m->i_stride[0], mx, my, bw, bh, &m->weight[0] ); \
1200 dst = h->pixf.mbcmp[i_pixel]( m->p_fenc[0], FENC_STRIDE, pix, FDEC_STRIDE ) \
1201 + p_cost_mvx[mx] + p_cost_mvy[my]; \
1202 COPY1_IF_LT( bsatd, dst ); \
1204 else \
1205 dst = COST_MAX; \
1208 #define COST_MV_RD( mx, my, satd, do_dir, mdir ) \
1210 if( satd <= SATD_THRESH(bsatd) ) \
1212 uint64_t cost; \
1213 M32( cache_mv ) = pack16to32_mask(mx,my); \
1214 if( CHROMA444 ) \
1216 h->mc.mc_luma( pixu, FDEC_STRIDE, &m->p_fref[4], m->i_stride[1], mx, my, bw, bh, &m->weight[1] ); \
1217 h->mc.mc_luma( pixv, FDEC_STRIDE, &m->p_fref[8], m->i_stride[2], mx, my, bw, bh, &m->weight[2] ); \
1219 else if( m->i_pixel <= PIXEL_8x8 ) \
1221 h->mc.mc_chroma( pixu, pixv, FDEC_STRIDE, m->p_fref[4], m->i_stride[1], \
1222 mx, 2*(my+mvy_offset)>>chroma_v_shift, bw>>1, bh>>chroma_v_shift ); \
1223 if( m->weight[1].weightfn ) \
1224 m->weight[1].weightfn[bw>>3]( pixu, FDEC_STRIDE, pixu, FDEC_STRIDE, &m->weight[1], bh>>chroma_v_shift ); \
1225 if( m->weight[2].weightfn ) \
1226 m->weight[2].weightfn[bw>>3]( pixv, FDEC_STRIDE, pixv, FDEC_STRIDE, &m->weight[2], bh>>chroma_v_shift ); \
1228 cost = x264_rd_cost_part( h, i_lambda2, i4, m->i_pixel ); \
1229 COPY4_IF_LT( bcost, cost, bmx, mx, bmy, my, dir, do_dir?mdir:dir ); \
1233 void x264_me_refine_qpel_rd( x264_t *h, x264_me_t *m, int i_lambda2, int i4, int i_list )
1235 int16_t *cache_mv = h->mb.cache.mv[i_list][x264_scan8[i4]];
1236 const uint16_t *p_cost_mvx, *p_cost_mvy;
1237 const int bw = x264_pixel_size[m->i_pixel].w;
1238 const int bh = x264_pixel_size[m->i_pixel].h;
1239 const int i_pixel = m->i_pixel;
1240 int chroma_v_shift = CHROMA_V_SHIFT;
1241 int mvy_offset = chroma_v_shift & MB_INTERLACED & m->i_ref ? (h->mb.i_mb_y & 1)*4 - 2 : 0;
1243 uint64_t bcost = COST_MAX64;
1244 int bmx = m->mv[0];
1245 int bmy = m->mv[1];
1246 int omx, omy, pmx, pmy;
1247 int satd, bsatd;
1248 int dir = -2;
1249 int i8 = i4>>2;
1250 uint16_t amvd;
1252 pixel *pix = &h->mb.pic.p_fdec[0][block_idx_xy_fdec[i4]];
1253 pixel *pixu, *pixv;
1254 if( CHROMA444 )
1256 pixu = &h->mb.pic.p_fdec[1][block_idx_xy_fdec[i4]];
1257 pixv = &h->mb.pic.p_fdec[2][block_idx_xy_fdec[i4]];
1259 else
1261 pixu = &h->mb.pic.p_fdec[1][(i8>>1)*(8*FDEC_STRIDE>>chroma_v_shift)+(i8&1)*4];
1262 pixv = &h->mb.pic.p_fdec[2][(i8>>1)*(8*FDEC_STRIDE>>chroma_v_shift)+(i8&1)*4];
1265 h->mb.b_skip_mc = 1;
1267 if( m->i_pixel != PIXEL_16x16 && i4 != 0 )
1268 x264_mb_predict_mv( h, i_list, i4, bw>>2, m->mvp );
1269 pmx = m->mvp[0];
1270 pmy = m->mvp[1];
1271 p_cost_mvx = m->p_cost_mv - pmx;
1272 p_cost_mvy = m->p_cost_mv - pmy;
1273 COST_MV_SATD( bmx, bmy, bsatd, 0 );
1274 if( m->i_pixel != PIXEL_16x16 )
1275 COST_MV_RD( bmx, bmy, 0, 0, 0 )
1276 else
1277 bcost = m->cost;
1279 /* check the predicted mv */
1280 if( (bmx != pmx || bmy != pmy)
1281 && pmx >= h->mb.mv_min_spel[0] && pmx <= h->mb.mv_max_spel[0]
1282 && pmy >= h->mb.mv_min_spel[1] && pmy <= h->mb.mv_max_spel[1] )
1284 COST_MV_SATD( pmx, pmy, satd, 0 );
1285 COST_MV_RD ( pmx, pmy, satd, 0, 0 );
1286 /* The hex motion search is guaranteed to not repeat the center candidate,
1287 * so if pmv is chosen, set the "MV to avoid checking" to bmv instead. */
1288 if( bmx == pmx && bmy == pmy )
1290 pmx = m->mv[0];
1291 pmy = m->mv[1];
1295 if( bmy < h->mb.mv_min_spel[1] + 3 || bmy > h->mb.mv_max_spel[1] - 3 ||
1296 bmx < h->mb.mv_min_spel[0] + 3 || bmx > h->mb.mv_max_spel[0] - 3 )
1298 h->mb.b_skip_mc = 0;
1299 return;
1302 /* subpel hex search, same pattern as ME HEX. */
1303 dir = -2;
1304 omx = bmx;
1305 omy = bmy;
1306 for( int j = 0; j < 6; j++ )
1308 COST_MV_SATD( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1 );
1309 COST_MV_RD ( omx + hex2[j+1][0], omy + hex2[j+1][1], satd, 1, j );
1312 if( dir != -2 )
1314 /* half hexagon, not overlapping the previous iteration */
1315 for( int i = 1; i < 10; i++ )
1317 const int odir = mod6m1[dir+1];
1318 if( bmy < h->mb.mv_min_spel[1] + 3 ||
1319 bmy > h->mb.mv_max_spel[1] - 3 )
1320 break;
1321 dir = -2;
1322 omx = bmx;
1323 omy = bmy;
1324 for( int j = 0; j < 3; j++ )
1326 COST_MV_SATD( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1 );
1327 COST_MV_RD ( omx + hex2[odir+j][0], omy + hex2[odir+j][1], satd, 1, odir-1+j );
1329 if( dir == -2 )
1330 break;
1334 /* square refine, same pattern as ME HEX. */
1335 omx = bmx;
1336 omy = bmy;
1337 for( int i = 0; i < 8; i++ )
1339 COST_MV_SATD( omx + square1[i+1][0], omy + square1[i+1][1], satd, 1 );
1340 COST_MV_RD ( omx + square1[i+1][0], omy + square1[i+1][1], satd, 0, 0 );
1343 m->cost = bcost;
1344 m->mv[0] = bmx;
1345 m->mv[1] = bmy;
1346 x264_macroblock_cache_mv ( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, pack16to32_mask(bmx, bmy) );
1347 amvd = pack8to16( X264_MIN(abs(bmx - m->mvp[0]),66), X264_MIN(abs(bmy - m->mvp[1]),66) );
1348 x264_macroblock_cache_mvd( h, block_idx_x[i4], block_idx_y[i4], bw>>2, bh>>2, i_list, amvd );
1349 h->mb.b_skip_mc = 0;