Define RDCOST only once
[libvpx.git] / vp8 / decoder / reconintra_mt.c
blobb9d2b370364130c710939d13bb25445d1c7f88dc
1 /*
2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
12 #include "vpx_ports/config.h"
13 #include "vp8/common/recon.h"
14 #include "vp8/common/reconintra.h"
15 #include "vpx_mem/vpx_mem.h"
16 #include "onyxd_int.h"
18 /* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
19 * vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
22 void vp8mt_build_intra_predictors_mby(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
24 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
25 unsigned char *yleft_col;
26 unsigned char yleft_buf[16];
27 unsigned char ytop_left; /* = yabove_row[-1]; */
28 unsigned char *ypred_ptr = x->predictor;
29 int r, c, i;
31 if (pbi->common.filter_level)
33 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
34 yleft_col = pbi->mt_yleft_col[mb_row];
35 } else
37 yabove_row = x->dst.y_buffer - x->dst.y_stride;
39 for (i = 0; i < 16; i++)
40 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
41 yleft_col = yleft_buf;
44 ytop_left = yabove_row[-1];
46 /* for Y */
47 switch (x->mode_info_context->mbmi.mode)
49 case DC_PRED:
51 int expected_dc;
52 int i;
53 int shift;
54 int average = 0;
57 if (x->up_available || x->left_available)
59 if (x->up_available)
61 for (i = 0; i < 16; i++)
63 average += yabove_row[i];
67 if (x->left_available)
70 for (i = 0; i < 16; i++)
72 average += yleft_col[i];
79 shift = 3 + x->up_available + x->left_available;
80 expected_dc = (average + (1 << (shift - 1))) >> shift;
82 else
84 expected_dc = 128;
87 vpx_memset(ypred_ptr, expected_dc, 256);
89 break;
90 case V_PRED:
93 for (r = 0; r < 16; r++)
96 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
97 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
98 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
99 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
100 ypred_ptr += 16;
103 break;
104 case H_PRED:
107 for (r = 0; r < 16; r++)
110 vpx_memset(ypred_ptr, yleft_col[r], 16);
111 ypred_ptr += 16;
115 break;
116 case TM_PRED:
119 for (r = 0; r < 16; r++)
121 for (c = 0; c < 16; c++)
123 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
125 if (pred < 0)
126 pred = 0;
128 if (pred > 255)
129 pred = 255;
131 ypred_ptr[c] = pred;
134 ypred_ptr += 16;
138 break;
139 case B_PRED:
140 case NEARESTMV:
141 case NEARMV:
142 case ZEROMV:
143 case NEWMV:
144 case SPLITMV:
145 case MB_MODE_COUNT:
146 break;
150 void vp8mt_build_intra_predictors_mby_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
152 unsigned char *yabove_row; /* = x->dst.y_buffer - x->dst.y_stride; */
153 unsigned char *yleft_col;
154 unsigned char yleft_buf[16];
155 unsigned char ytop_left; /* = yabove_row[-1]; */
156 unsigned char *ypred_ptr = x->predictor;
157 int r, c, i;
159 int y_stride = x->dst.y_stride;
160 ypred_ptr = x->dst.y_buffer; /*x->predictor;*/
162 if (pbi->common.filter_level)
164 yabove_row = pbi->mt_yabove_row[mb_row] + mb_col*16 +32;
165 yleft_col = pbi->mt_yleft_col[mb_row];
166 } else
168 yabove_row = x->dst.y_buffer - x->dst.y_stride;
170 for (i = 0; i < 16; i++)
171 yleft_buf[i] = x->dst.y_buffer [i* x->dst.y_stride -1];
172 yleft_col = yleft_buf;
175 ytop_left = yabove_row[-1];
177 /* for Y */
178 switch (x->mode_info_context->mbmi.mode)
180 case DC_PRED:
182 int expected_dc;
183 int i;
184 int shift;
185 int average = 0;
188 if (x->up_available || x->left_available)
190 if (x->up_available)
192 for (i = 0; i < 16; i++)
194 average += yabove_row[i];
198 if (x->left_available)
201 for (i = 0; i < 16; i++)
203 average += yleft_col[i];
210 shift = 3 + x->up_available + x->left_available;
211 expected_dc = (average + (1 << (shift - 1))) >> shift;
213 else
215 expected_dc = 128;
218 /*vpx_memset(ypred_ptr, expected_dc, 256);*/
219 for (r = 0; r < 16; r++)
221 vpx_memset(ypred_ptr, expected_dc, 16);
222 ypred_ptr += y_stride; /*16;*/
225 break;
226 case V_PRED:
229 for (r = 0; r < 16; r++)
232 ((int *)ypred_ptr)[0] = ((int *)yabove_row)[0];
233 ((int *)ypred_ptr)[1] = ((int *)yabove_row)[1];
234 ((int *)ypred_ptr)[2] = ((int *)yabove_row)[2];
235 ((int *)ypred_ptr)[3] = ((int *)yabove_row)[3];
236 ypred_ptr += y_stride; /*16;*/
239 break;
240 case H_PRED:
243 for (r = 0; r < 16; r++)
246 vpx_memset(ypred_ptr, yleft_col[r], 16);
247 ypred_ptr += y_stride; /*16;*/
251 break;
252 case TM_PRED:
255 for (r = 0; r < 16; r++)
257 for (c = 0; c < 16; c++)
259 int pred = yleft_col[r] + yabove_row[ c] - ytop_left;
261 if (pred < 0)
262 pred = 0;
264 if (pred > 255)
265 pred = 255;
267 ypred_ptr[c] = pred;
270 ypred_ptr += y_stride; /*16;*/
274 break;
275 case B_PRED:
276 case NEARESTMV:
277 case NEARMV:
278 case ZEROMV:
279 case NEWMV:
280 case SPLITMV:
281 case MB_MODE_COUNT:
282 break;
286 void vp8mt_build_intra_predictors_mbuv(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
288 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
289 unsigned char *uleft_col; /*[16];*/
290 unsigned char uleft_buf[8];
291 unsigned char utop_left; /* = uabove_row[-1]; */
292 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
293 unsigned char *vleft_col; /*[20];*/
294 unsigned char vleft_buf[8];
295 unsigned char vtop_left; /* = vabove_row[-1]; */
296 unsigned char *upred_ptr = &x->predictor[256];
297 unsigned char *vpred_ptr = &x->predictor[320];
298 int i, j;
300 if (pbi->common.filter_level)
302 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
303 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
304 uleft_col = pbi->mt_uleft_col[mb_row];
305 vleft_col = pbi->mt_vleft_col[mb_row];
306 } else
308 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
309 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
311 for (i = 0; i < 8; i++)
313 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
314 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
316 uleft_col = uleft_buf;
317 vleft_col = vleft_buf;
319 utop_left = uabove_row[-1];
320 vtop_left = vabove_row[-1];
322 switch (x->mode_info_context->mbmi.uv_mode)
324 case DC_PRED:
326 int expected_udc;
327 int expected_vdc;
328 int i;
329 int shift;
330 int Uaverage = 0;
331 int Vaverage = 0;
333 if (x->up_available)
335 for (i = 0; i < 8; i++)
337 Uaverage += uabove_row[i];
338 Vaverage += vabove_row[i];
342 if (x->left_available)
344 for (i = 0; i < 8; i++)
346 Uaverage += uleft_col[i];
347 Vaverage += vleft_col[i];
351 if (!x->up_available && !x->left_available)
353 expected_udc = 128;
354 expected_vdc = 128;
356 else
358 shift = 2 + x->up_available + x->left_available;
359 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
360 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
364 vpx_memset(upred_ptr, expected_udc, 64);
365 vpx_memset(vpred_ptr, expected_vdc, 64);
369 break;
370 case V_PRED:
372 int i;
374 for (i = 0; i < 8; i++)
376 vpx_memcpy(upred_ptr, uabove_row, 8);
377 vpx_memcpy(vpred_ptr, vabove_row, 8);
378 upred_ptr += 8;
379 vpred_ptr += 8;
383 break;
384 case H_PRED:
386 int i;
388 for (i = 0; i < 8; i++)
390 vpx_memset(upred_ptr, uleft_col[i], 8);
391 vpx_memset(vpred_ptr, vleft_col[i], 8);
392 upred_ptr += 8;
393 vpred_ptr += 8;
397 break;
398 case TM_PRED:
400 int i;
402 for (i = 0; i < 8; i++)
404 for (j = 0; j < 8; j++)
406 int predu = uleft_col[i] + uabove_row[j] - utop_left;
407 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
409 if (predu < 0)
410 predu = 0;
412 if (predu > 255)
413 predu = 255;
415 if (predv < 0)
416 predv = 0;
418 if (predv > 255)
419 predv = 255;
421 upred_ptr[j] = predu;
422 vpred_ptr[j] = predv;
425 upred_ptr += 8;
426 vpred_ptr += 8;
430 break;
431 case B_PRED:
432 case NEARESTMV:
433 case NEARMV:
434 case ZEROMV:
435 case NEWMV:
436 case SPLITMV:
437 case MB_MODE_COUNT:
438 break;
442 void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
444 unsigned char *uabove_row; /* = x->dst.u_buffer - x->dst.uv_stride; */
445 unsigned char *uleft_col; /*[16];*/
446 unsigned char uleft_buf[8];
447 unsigned char utop_left; /* = uabove_row[-1]; */
448 unsigned char *vabove_row; /* = x->dst.v_buffer - x->dst.uv_stride; */
449 unsigned char *vleft_col; /*[20];*/
450 unsigned char vleft_buf[8];
451 unsigned char vtop_left; /* = vabove_row[-1]; */
452 unsigned char *upred_ptr = x->dst.u_buffer; /*&x->predictor[256];*/
453 unsigned char *vpred_ptr = x->dst.v_buffer; /*&x->predictor[320];*/
454 int uv_stride = x->dst.uv_stride;
455 int i, j;
457 if (pbi->common.filter_level)
459 uabove_row = pbi->mt_uabove_row[mb_row] + mb_col*8 +16;
460 vabove_row = pbi->mt_vabove_row[mb_row] + mb_col*8 +16;
461 uleft_col = pbi->mt_uleft_col[mb_row];
462 vleft_col = pbi->mt_vleft_col[mb_row];
463 } else
465 uabove_row = x->dst.u_buffer - x->dst.uv_stride;
466 vabove_row = x->dst.v_buffer - x->dst.uv_stride;
468 for (i = 0; i < 8; i++)
470 uleft_buf[i] = x->dst.u_buffer [i* x->dst.uv_stride -1];
471 vleft_buf[i] = x->dst.v_buffer [i* x->dst.uv_stride -1];
473 uleft_col = uleft_buf;
474 vleft_col = vleft_buf;
476 utop_left = uabove_row[-1];
477 vtop_left = vabove_row[-1];
479 switch (x->mode_info_context->mbmi.uv_mode)
481 case DC_PRED:
483 int expected_udc;
484 int expected_vdc;
485 int i;
486 int shift;
487 int Uaverage = 0;
488 int Vaverage = 0;
490 if (x->up_available)
492 for (i = 0; i < 8; i++)
494 Uaverage += uabove_row[i];
495 Vaverage += vabove_row[i];
499 if (x->left_available)
501 for (i = 0; i < 8; i++)
503 Uaverage += uleft_col[i];
504 Vaverage += vleft_col[i];
508 if (!x->up_available && !x->left_available)
510 expected_udc = 128;
511 expected_vdc = 128;
513 else
515 shift = 2 + x->up_available + x->left_available;
516 expected_udc = (Uaverage + (1 << (shift - 1))) >> shift;
517 expected_vdc = (Vaverage + (1 << (shift - 1))) >> shift;
521 /*vpx_memset(upred_ptr,expected_udc,64);
522 vpx_memset(vpred_ptr,expected_vdc,64);*/
523 for (i = 0; i < 8; i++)
525 vpx_memset(upred_ptr, expected_udc, 8);
526 vpx_memset(vpred_ptr, expected_vdc, 8);
527 upred_ptr += uv_stride; /*8;*/
528 vpred_ptr += uv_stride; /*8;*/
531 break;
532 case V_PRED:
534 int i;
536 for (i = 0; i < 8; i++)
538 vpx_memcpy(upred_ptr, uabove_row, 8);
539 vpx_memcpy(vpred_ptr, vabove_row, 8);
540 upred_ptr += uv_stride; /*8;*/
541 vpred_ptr += uv_stride; /*8;*/
545 break;
546 case H_PRED:
548 int i;
550 for (i = 0; i < 8; i++)
552 vpx_memset(upred_ptr, uleft_col[i], 8);
553 vpx_memset(vpred_ptr, vleft_col[i], 8);
554 upred_ptr += uv_stride; /*8;*/
555 vpred_ptr += uv_stride; /*8;*/
559 break;
560 case TM_PRED:
562 int i;
564 for (i = 0; i < 8; i++)
566 for (j = 0; j < 8; j++)
568 int predu = uleft_col[i] + uabove_row[j] - utop_left;
569 int predv = vleft_col[i] + vabove_row[j] - vtop_left;
571 if (predu < 0)
572 predu = 0;
574 if (predu > 255)
575 predu = 255;
577 if (predv < 0)
578 predv = 0;
580 if (predv > 255)
581 predv = 255;
583 upred_ptr[j] = predu;
584 vpred_ptr[j] = predv;
587 upred_ptr += uv_stride; /*8;*/
588 vpred_ptr += uv_stride; /*8;*/
592 break;
593 case B_PRED:
594 case NEARESTMV:
595 case NEARMV:
596 case ZEROMV:
597 case NEWMV:
598 case SPLITMV:
599 case MB_MODE_COUNT:
600 break;
605 void vp8mt_predict_intra4x4(VP8D_COMP *pbi,
606 MACROBLOCKD *xd,
607 int b_mode,
608 unsigned char *predictor,
609 int mb_row,
610 int mb_col,
611 int num)
613 int i, r, c;
615 unsigned char *Above; /* = *(x->base_dst) + x->dst - x->dst_stride; */
616 unsigned char Left[4];
617 unsigned char top_left; /* = Above[-1]; */
619 BLOCKD *x = &xd->block[num];
621 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
622 if (num < 4 && pbi->common.filter_level)
623 Above = pbi->mt_yabove_row[mb_row] + mb_col*16 + num*4 + 32;
624 else
625 Above = *(x->base_dst) + x->dst - x->dst_stride;
627 if (num%4==0 && pbi->common.filter_level)
629 for (i=0; i<4; i++)
630 Left[i] = pbi->mt_yleft_col[mb_row][num + i];
631 }else
633 Left[0] = (*(x->base_dst))[x->dst - 1];
634 Left[1] = (*(x->base_dst))[x->dst - 1 + x->dst_stride];
635 Left[2] = (*(x->base_dst))[x->dst - 1 + 2 * x->dst_stride];
636 Left[3] = (*(x->base_dst))[x->dst - 1 + 3 * x->dst_stride];
639 if ((num==4 || num==8 || num==12) && pbi->common.filter_level)
640 top_left = pbi->mt_yleft_col[mb_row][num-1];
641 else
642 top_left = Above[-1];
644 switch (b_mode)
646 case B_DC_PRED:
648 int expected_dc = 0;
650 for (i = 0; i < 4; i++)
652 expected_dc += Above[i];
653 expected_dc += Left[i];
656 expected_dc = (expected_dc + 4) >> 3;
658 for (r = 0; r < 4; r++)
660 for (c = 0; c < 4; c++)
662 predictor[c] = expected_dc;
665 predictor += 16;
668 break;
669 case B_TM_PRED:
671 /* prediction similar to true_motion prediction */
672 for (r = 0; r < 4; r++)
674 for (c = 0; c < 4; c++)
676 int pred = Above[c] - top_left + Left[r];
678 if (pred < 0)
679 pred = 0;
681 if (pred > 255)
682 pred = 255;
684 predictor[c] = pred;
687 predictor += 16;
690 break;
692 case B_VE_PRED:
695 unsigned int ap[4];
696 ap[0] = (top_left + 2 * Above[0] + Above[1] + 2) >> 2;
697 ap[1] = (Above[0] + 2 * Above[1] + Above[2] + 2) >> 2;
698 ap[2] = (Above[1] + 2 * Above[2] + Above[3] + 2) >> 2;
699 ap[3] = (Above[2] + 2 * Above[3] + Above[4] + 2) >> 2;
701 for (r = 0; r < 4; r++)
703 for (c = 0; c < 4; c++)
706 predictor[c] = ap[c];
709 predictor += 16;
713 break;
716 case B_HE_PRED:
719 unsigned int lp[4];
720 lp[0] = (top_left + 2 * Left[0] + Left[1] + 2) >> 2;
721 lp[1] = (Left[0] + 2 * Left[1] + Left[2] + 2) >> 2;
722 lp[2] = (Left[1] + 2 * Left[2] + Left[3] + 2) >> 2;
723 lp[3] = (Left[2] + 2 * Left[3] + Left[3] + 2) >> 2;
725 for (r = 0; r < 4; r++)
727 for (c = 0; c < 4; c++)
729 predictor[c] = lp[r];
732 predictor += 16;
735 break;
736 case B_LD_PRED:
738 unsigned char *ptr = Above;
739 predictor[0 * 16 + 0] = (ptr[0] + ptr[1] * 2 + ptr[2] + 2) >> 2;
740 predictor[0 * 16 + 1] =
741 predictor[1 * 16 + 0] = (ptr[1] + ptr[2] * 2 + ptr[3] + 2) >> 2;
742 predictor[0 * 16 + 2] =
743 predictor[1 * 16 + 1] =
744 predictor[2 * 16 + 0] = (ptr[2] + ptr[3] * 2 + ptr[4] + 2) >> 2;
745 predictor[0 * 16 + 3] =
746 predictor[1 * 16 + 2] =
747 predictor[2 * 16 + 1] =
748 predictor[3 * 16 + 0] = (ptr[3] + ptr[4] * 2 + ptr[5] + 2) >> 2;
749 predictor[1 * 16 + 3] =
750 predictor[2 * 16 + 2] =
751 predictor[3 * 16 + 1] = (ptr[4] + ptr[5] * 2 + ptr[6] + 2) >> 2;
752 predictor[2 * 16 + 3] =
753 predictor[3 * 16 + 2] = (ptr[5] + ptr[6] * 2 + ptr[7] + 2) >> 2;
754 predictor[3 * 16 + 3] = (ptr[6] + ptr[7] * 2 + ptr[7] + 2) >> 2;
757 break;
758 case B_RD_PRED:
761 unsigned char pp[9];
763 pp[0] = Left[3];
764 pp[1] = Left[2];
765 pp[2] = Left[1];
766 pp[3] = Left[0];
767 pp[4] = top_left;
768 pp[5] = Above[0];
769 pp[6] = Above[1];
770 pp[7] = Above[2];
771 pp[8] = Above[3];
773 predictor[3 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
774 predictor[3 * 16 + 1] =
775 predictor[2 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
776 predictor[3 * 16 + 2] =
777 predictor[2 * 16 + 1] =
778 predictor[1 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
779 predictor[3 * 16 + 3] =
780 predictor[2 * 16 + 2] =
781 predictor[1 * 16 + 1] =
782 predictor[0 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
783 predictor[2 * 16 + 3] =
784 predictor[1 * 16 + 2] =
785 predictor[0 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
786 predictor[1 * 16 + 3] =
787 predictor[0 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
788 predictor[0 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
791 break;
792 case B_VR_PRED:
795 unsigned char pp[9];
797 pp[0] = Left[3];
798 pp[1] = Left[2];
799 pp[2] = Left[1];
800 pp[3] = Left[0];
801 pp[4] = top_left;
802 pp[5] = Above[0];
803 pp[6] = Above[1];
804 pp[7] = Above[2];
805 pp[8] = Above[3];
808 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
809 predictor[2 * 16 + 0] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
810 predictor[3 * 16 + 1] =
811 predictor[1 * 16 + 0] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
812 predictor[2 * 16 + 1] =
813 predictor[0 * 16 + 0] = (pp[4] + pp[5] + 1) >> 1;
814 predictor[3 * 16 + 2] =
815 predictor[1 * 16 + 1] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
816 predictor[2 * 16 + 2] =
817 predictor[0 * 16 + 1] = (pp[5] + pp[6] + 1) >> 1;
818 predictor[3 * 16 + 3] =
819 predictor[1 * 16 + 2] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
820 predictor[2 * 16 + 3] =
821 predictor[0 * 16 + 2] = (pp[6] + pp[7] + 1) >> 1;
822 predictor[1 * 16 + 3] = (pp[6] + pp[7] * 2 + pp[8] + 2) >> 2;
823 predictor[0 * 16 + 3] = (pp[7] + pp[8] + 1) >> 1;
826 break;
827 case B_VL_PRED:
830 unsigned char *pp = Above;
832 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
833 predictor[1 * 16 + 0] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
834 predictor[2 * 16 + 0] =
835 predictor[0 * 16 + 1] = (pp[1] + pp[2] + 1) >> 1;
836 predictor[1 * 16 + 1] =
837 predictor[3 * 16 + 0] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
838 predictor[2 * 16 + 1] =
839 predictor[0 * 16 + 2] = (pp[2] + pp[3] + 1) >> 1;
840 predictor[3 * 16 + 1] =
841 predictor[1 * 16 + 2] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
842 predictor[0 * 16 + 3] =
843 predictor[2 * 16 + 2] = (pp[3] + pp[4] + 1) >> 1;
844 predictor[1 * 16 + 3] =
845 predictor[3 * 16 + 2] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
846 predictor[2 * 16 + 3] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
847 predictor[3 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
849 break;
851 case B_HD_PRED:
853 unsigned char pp[9];
854 pp[0] = Left[3];
855 pp[1] = Left[2];
856 pp[2] = Left[1];
857 pp[3] = Left[0];
858 pp[4] = top_left;
859 pp[5] = Above[0];
860 pp[6] = Above[1];
861 pp[7] = Above[2];
862 pp[8] = Above[3];
865 predictor[3 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
866 predictor[3 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
867 predictor[2 * 16 + 0] =
868 predictor[3 * 16 + 2] = (pp[1] + pp[2] + 1) >> 1;
869 predictor[2 * 16 + 1] =
870 predictor[3 * 16 + 3] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
871 predictor[2 * 16 + 2] =
872 predictor[1 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
873 predictor[2 * 16 + 3] =
874 predictor[1 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[4] + 2) >> 2;
875 predictor[1 * 16 + 2] =
876 predictor[0 * 16 + 0] = (pp[3] + pp[4] + 1) >> 1;
877 predictor[1 * 16 + 3] =
878 predictor[0 * 16 + 1] = (pp[3] + pp[4] * 2 + pp[5] + 2) >> 2;
879 predictor[0 * 16 + 2] = (pp[4] + pp[5] * 2 + pp[6] + 2) >> 2;
880 predictor[0 * 16 + 3] = (pp[5] + pp[6] * 2 + pp[7] + 2) >> 2;
882 break;
885 case B_HU_PRED:
887 unsigned char *pp = Left;
888 predictor[0 * 16 + 0] = (pp[0] + pp[1] + 1) >> 1;
889 predictor[0 * 16 + 1] = (pp[0] + pp[1] * 2 + pp[2] + 2) >> 2;
890 predictor[0 * 16 + 2] =
891 predictor[1 * 16 + 0] = (pp[1] + pp[2] + 1) >> 1;
892 predictor[0 * 16 + 3] =
893 predictor[1 * 16 + 1] = (pp[1] + pp[2] * 2 + pp[3] + 2) >> 2;
894 predictor[1 * 16 + 2] =
895 predictor[2 * 16 + 0] = (pp[2] + pp[3] + 1) >> 1;
896 predictor[1 * 16 + 3] =
897 predictor[2 * 16 + 1] = (pp[2] + pp[3] * 2 + pp[3] + 2) >> 2;
898 predictor[2 * 16 + 2] =
899 predictor[2 * 16 + 3] =
900 predictor[3 * 16 + 0] =
901 predictor[3 * 16 + 1] =
902 predictor[3 * 16 + 2] =
903 predictor[3 * 16 + 3] = pp[3];
905 break;
911 /* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
912 * to the right prediction have filled in pixels to use.
914 void vp8mt_intra_prediction_down_copy(VP8D_COMP *pbi, MACROBLOCKD *x, int mb_row, int mb_col)
916 unsigned char *above_right; /* = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; */
917 unsigned int *src_ptr;
918 unsigned int *dst_ptr0;
919 unsigned int *dst_ptr1;
920 unsigned int *dst_ptr2;
922 if (pbi->common.filter_level)
923 above_right = pbi->mt_yabove_row[mb_row] + mb_col*16 + 32 +16;
924 else
925 above_right = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16;
927 src_ptr = (unsigned int *)above_right;
928 /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
929 dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
930 dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/
931 dst_ptr0 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 3 * x->block[0].dst_stride);
932 dst_ptr1 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 7 * x->block[0].dst_stride);
933 dst_ptr2 = (unsigned int *)(*(x->block[0].base_dst) + x->block[0].dst + 16 + 11 * x->block[0].dst_stride);
934 *dst_ptr0 = *src_ptr;
935 *dst_ptr1 = *src_ptr;
936 *dst_ptr2 = *src_ptr;