Merge "vp8_rd_pick_best_mbsegmentation code restructure"
[libvpx.git] / vp8 / common / reconintra.c
blob9cf5f6a88d25caad66d9f96d16302aa12f0d23af
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #include "vpx_ports/config.h"
13 #include "recon.h"
14 #include "reconintra.h"
15 #include "vpx_mem/vpx_mem.h"
17 /* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
18 * vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
20 void vp8_recon_intra_mbuv(const vp8_recon_rtcd_vtable_t *rtcd, MACROBLOCKD *x)
22 int i;
24 for (i = 16; i < 24; i += 2)
26 BLOCKD *b = &x->block[i];
27 RECON_INVOKE(rtcd, recon2)(b->predictor, b->diff, *(b->base_dst) + b->dst, b->dst_stride);
31 void vp8_build_intra_predictors_mby(MACROBLOCKD *x)
34 unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride;
35 unsigned char yleft_col[16];
36 unsigned char ytop_left = yabove_row[-1];
37 unsigned char *ypred_ptr = x->predictor;
38 int r, c, i;
40 for (i = 0; i < 16; i++)
42 yleft_col[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
45 /* for Y */
46 switch (x->mode_info_context->mbmi.mode)
48 case DC_PRED:
50 int expected_dc;
51 int i;
52 int shift;
53 int average = 0;
56 if (x->up_available || x->left_available)
58 if (x->up_available)
60 for (i = 0; i < 16; i++)
62 average += yabove_row[i];
66 if (x->left_available)
69 for (i = 0; i < 16; i++)
71 average += yleft_col[i];
78 shift = 3 + x->up_available + x->left_available;
79 expected_dc = (average + (1 << (shift - 1))) >> shift;
81 else
83 expected_dc = 128;
86 vpx_memset(ypred_ptr, expected_dc, 256);
88 break;
89 case V_PRED:
92 for (r = 0; r < 16; r++)
95 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
96 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
97 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
98 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
99 ypred_ptr += 16;
102 break;
103 case H_PRED:
106 for (r = 0; r < 16; r++)
109 vpx_memset(ypred_ptr, yleft_col[r], 16);
110 ypred_ptr += 16;
114 break;
115 case TM_PRED:
118 for (r = 0; r < 16; r++)
120 for (c = 0; c < 16; c++)
122 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
124 if (pred < 0)
125 pred = 0;
127 if (pred > 255)
128 pred = 255;
130 ypred_ptr[c] = pred;
133 ypred_ptr += 16;
137 break;
138 case B_PRED:
139 case NEARESTMV:
140 case NEARMV:
141 case ZEROMV:
142 case NEWMV:
143 case SPLITMV:
144 case MB_MODE_COUNT:
145 break;
149 void vp8_build_intra_predictors_mby_s(MACROBLOCKD *x)
152 unsigned char *yabove_row = x->dst.y_buffer - x->dst.y_stride;
153 unsigned char yleft_col[16];
154 unsigned char ytop_left = yabove_row[-1];
155 unsigned char *ypred_ptr = x->predictor;
156 int r, c, i;
158 int y_stride = x->dst.y_stride;
159 ypred_ptr = x->dst.y_buffer; /*x->predictor;*/
161 for (i = 0; i < 16; i++)
163 yleft_col[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
166 /* for Y */
167 switch (x->mode_info_context->mbmi.mode)
169 case DC_PRED:
171 int expected_dc;
172 int i;
173 int shift;
174 int average = 0;
177 if (x->up_available || x->left_available)
179 if (x->up_available)
181 for (i = 0; i < 16; i++)
183 average += yabove_row[i];
187 if (x->left_available)
190 for (i = 0; i < 16; i++)
192 average += yleft_col[i];
199 shift = 3 + x->up_available + x->left_available;
200 expected_dc = (average + (1 << (shift - 1))) >> shift;
202 else
204 expected_dc = 128;
207 /*vpx_memset(ypred_ptr, expected_dc, 256);*/
208 for (r = 0; r < 16; r++)
210 vpx_memset(ypred_ptr, expected_dc, 16);
211 ypred_ptr += y_stride; /*16;*/
214 break;
215 case V_PRED:
218 for (r = 0; r < 16; r++)
221 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
222 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
223 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
224 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
225 ypred_ptr += y_stride; /*16;*/
228 break;
229 case H_PRED:
232 for (r = 0; r < 16; r++)
235 vpx_memset(ypred_ptr, yleft_col[r], 16);
236 ypred_ptr += y_stride; /*16;*/
240 break;
241 case TM_PRED:
244 for (r = 0; r < 16; r++)
246 for (c = 0; c < 16; c++)
248 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
250 if (pred < 0)
251 pred = 0;
253 if (pred > 255)
254 pred = 255;
256 ypred_ptr[c] = pred;
259 ypred_ptr += y_stride; /*16;*/
263 break;
264 case B_PRED:
265 case NEARESTMV:
266 case NEARMV:
267 case ZEROMV:
268 case NEWMV:
269 case SPLITMV:
270 case MB_MODE_COUNT:
271 break;
275 void vp8_build_intra_predictors_mbuv(MACROBLOCKD *x)
277 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
278 unsigned char uleft_col[16];
279 unsigned char utop_left = uabove_row[-1];
280 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
281 unsigned char vleft_col[20];
282 unsigned char vtop_left = vabove_row[-1];
283 unsigned char *upred_ptr = &x->predictor[256];
284 unsigned char *vpred_ptr = &x->predictor[320];
285 int i, j;
287 for (i = 0; i < 8; i++)
289 uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
290 vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
293 switch (x->mode_info_context->mbmi.uv_mode)
295 case DC_PRED:
297 int expected_udc;
298 int expected_vdc;
299 int i;
300 int shift;
301 int Uaverage = 0;
302 int Vaverage = 0;
304 if (x->up_available)
306 for (i = 0; i < 8; i++)
308 Uaverage += uabove_row[i];
309 Vaverage += vabove_row[i];
313 if (x->left_available)
315 for (i = 0; i < 8; i++)
317 Uaverage += uleft_col[i];
318 Vaverage += vleft_col[i];
322 if (!x->up_available && !x->left_available)
324 expected_udc = 128;
325 expected_vdc = 128;
327 else
329 shift = 2 + x->up_available + x->left_available;
330 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
331 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
335 vpx_memset(upred_ptr, expected_udc, 64);
336 vpx_memset(vpred_ptr, expected_vdc, 64);
340 break;
341 case V_PRED:
343 int i;
345 for (i = 0; i < 8; i++)
347 vpx_memcpy(upred_ptr, uabove_row, 8);
348 vpx_memcpy(vpred_ptr, vabove_row, 8);
349 upred_ptr += 8;
350 vpred_ptr += 8;
354 break;
355 case H_PRED:
357 int i;
359 for (i = 0; i < 8; i++)
361 vpx_memset(upred_ptr, uleft_col[i], 8);
362 vpx_memset(vpred_ptr, vleft_col[i], 8);
363 upred_ptr += 8;
364 vpred_ptr += 8;
368 break;
369 case TM_PRED:
371 int i;
373 for (i = 0; i < 8; i++)
375 for (j = 0; j < 8; j++)
377 int predu = uleft_col[i] + uabove_row[j] - utop_left;
378 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
380 if (predu < 0)
381 predu = 0;
383 if (predu > 255)
384 predu = 255;
386 if (predv < 0)
387 predv = 0;
389 if (predv > 255)
390 predv = 255;
392 upred_ptr[j] = predu;
393 vpred_ptr[j] = predv;
396 upred_ptr += 8;
397 vpred_ptr += 8;
401 break;
402 case B_PRED:
403 case NEARESTMV:
404 case NEARMV:
405 case ZEROMV:
406 case NEWMV:
407 case SPLITMV:
408 case MB_MODE_COUNT:
409 break;
413 void vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x)
415 unsigned char *uabove_row = x->dst.u_buffer - x->dst.uv_stride;
416 unsigned char uleft_col[16];
417 unsigned char utop_left = uabove_row[-1];
418 unsigned char *vabove_row = x->dst.v_buffer - x->dst.uv_stride;
419 unsigned char vleft_col[20];
420 unsigned char vtop_left = vabove_row[-1];
421 unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
422 unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
423 int uv_stride = x->dst.uv_stride;
425 int i, j;
427 for (i = 0; i < 8; i++)
429 uleft_col[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
430 vleft_col[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
433 switch (x->mode_info_context->mbmi.uv_mode)
435 case DC_PRED:
437 int expected_udc;
438 int expected_vdc;
439 int i;
440 int shift;
441 int Uaverage = 0;
442 int Vaverage = 0;
444 if (x->up_available)
446 for (i = 0; i < 8; i++)
448 Uaverage += uabove_row[i];
449 Vaverage += vabove_row[i];
453 if (x->left_available)
455 for (i = 0; i < 8; i++)
457 Uaverage += uleft_col[i];
458 Vaverage += vleft_col[i];
462 if (!x->up_available && !x->left_available)
464 expected_udc = 128;
465 expected_vdc = 128;
467 else
469 shift = 2 + x->up_available + x->left_available;
470 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
471 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
475 /*vpx_memset(upred_ptr,expected_udc,64);*/
476 /*vpx_memset(vpred_ptr,expected_vdc,64);*/
477 for (i = 0; i < 8; i++)
479 vpx_memset(upred_ptr, expected_udc, 8);
480 vpx_memset(vpred_ptr, expected_vdc, 8);
481 upred_ptr += uv_stride; /*8;*/
482 vpred_ptr += uv_stride; /*8;*/
485 break;
486 case V_PRED:
488 int i;
490 for (i = 0; i < 8; i++)
492 vpx_memcpy(upred_ptr, uabove_row, 8);
493 vpx_memcpy(vpred_ptr, vabove_row, 8);
494 upred_ptr += uv_stride; /*8;*/
495 vpred_ptr += uv_stride; /*8;*/
499 break;
500 case H_PRED:
502 int i;
504 for (i = 0; i < 8; i++)
506 vpx_memset(upred_ptr, uleft_col[i], 8);
507 vpx_memset(vpred_ptr, vleft_col[i], 8);
508 upred_ptr += uv_stride; /*8;*/
509 vpred_ptr += uv_stride; /*8;*/
513 break;
514 case TM_PRED:
516 int i;
518 for (i = 0; i < 8; i++)
520 for (j = 0; j < 8; j++)
522 int predu = uleft_col[i] + uabove_row[j] - utop_left;
523 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
525 if (predu < 0)
526 predu = 0;
528 if (predu > 255)
529 predu = 255;
531 if (predv < 0)
532 predv = 0;
534 if (predv > 255)
535 predv = 255;
537 upred_ptr[j] = predu;
538 vpred_ptr[j] = predv;
541 upred_ptr += uv_stride; /*8;*/
542 vpred_ptr += uv_stride; /*8;*/
546 break;
547 case B_PRED:
548 case NEARESTMV:
549 case NEARMV:
550 case ZEROMV:
551 case NEWMV:
552 case SPLITMV:
553 case MB_MODE_COUNT:
554 break;