2 * Copyright (c) 2010 The WebM project authors. All Rights Reserved.
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
12 #include "vpx_ports/config.h"
13 #include "vp8/common/recon.h"
14 #include "vp8/common/reconintra.h"
15 #include "vpx_mem/vpx_mem.h"
16 #include "onyxd_int.h"
18 /* For skip_recon_mb(), add vp8_build_intra_predictors_mby_s(MACROBLOCKD *x) and
19 * vp8_build_intra_predictors_mbuv_s(MACROBLOCKD *x).
22 void vp8mt_build_intra_predictors_mby(VP8D_COMP
*pbi
, MACROBLOCKD
*x
, int mb_row
, int mb_col
)
24 unsigned char *yabove_row
; /* = x->dst.y_buffer - x->dst.y_stride; */
25 unsigned char *yleft_col
;
26 unsigned char yleft_buf
[16];
27 unsigned char ytop_left
; /* = yabove_row[-1]; */
28 unsigned char *ypred_ptr
= x
->predictor
;
31 if (pbi
->common
.filter_level
)
33 yabove_row
= pbi
->mt_yabove_row
[mb_row
] + mb_col
*16 +32;
34 yleft_col
= pbi
->mt_yleft_col
[mb_row
];
37 yabove_row
= x
->dst
.y_buffer
- x
->dst
.y_stride
;
39 for (i
= 0; i
< 16; i
++)
40 yleft_buf
[i
] = x
->dst
.y_buffer
[i
* x
->dst
.y_stride
-1];
41 yleft_col
= yleft_buf
;
44 ytop_left
= yabove_row
[-1];
47 switch (x
->mode_info_context
->mbmi
.mode
)
57 if (x
->up_available
|| x
->left_available
)
61 for (i
= 0; i
< 16; i
++)
63 average
+= yabove_row
[i
];
67 if (x
->left_available
)
70 for (i
= 0; i
< 16; i
++)
72 average
+= yleft_col
[i
];
79 shift
= 3 + x
->up_available
+ x
->left_available
;
80 expected_dc
= (average
+ (1 << (shift
- 1))) >> shift
;
87 vpx_memset(ypred_ptr
, expected_dc
, 256);
93 for (r
= 0; r
< 16; r
++)
96 ((int *)ypred_ptr
)[0] = ((int *)yabove_row
)[0];
97 ((int *)ypred_ptr
)[1] = ((int *)yabove_row
)[1];
98 ((int *)ypred_ptr
)[2] = ((int *)yabove_row
)[2];
99 ((int *)ypred_ptr
)[3] = ((int *)yabove_row
)[3];
107 for (r
= 0; r
< 16; r
++)
110 vpx_memset(ypred_ptr
, yleft_col
[r
], 16);
119 for (r
= 0; r
< 16; r
++)
121 for (c
= 0; c
< 16; c
++)
123 int pred
= yleft_col
[r
] + yabove_row
[ c
] - ytop_left
;
150 void vp8mt_build_intra_predictors_mby_s(VP8D_COMP
*pbi
, MACROBLOCKD
*x
, int mb_row
, int mb_col
)
152 unsigned char *yabove_row
; /* = x->dst.y_buffer - x->dst.y_stride; */
153 unsigned char *yleft_col
;
154 unsigned char yleft_buf
[16];
155 unsigned char ytop_left
; /* = yabove_row[-1]; */
156 unsigned char *ypred_ptr
= x
->predictor
;
159 int y_stride
= x
->dst
.y_stride
;
160 ypred_ptr
= x
->dst
.y_buffer
; /*x->predictor;*/
162 if (pbi
->common
.filter_level
)
164 yabove_row
= pbi
->mt_yabove_row
[mb_row
] + mb_col
*16 +32;
165 yleft_col
= pbi
->mt_yleft_col
[mb_row
];
168 yabove_row
= x
->dst
.y_buffer
- x
->dst
.y_stride
;
170 for (i
= 0; i
< 16; i
++)
171 yleft_buf
[i
] = x
->dst
.y_buffer
[i
* x
->dst
.y_stride
-1];
172 yleft_col
= yleft_buf
;
175 ytop_left
= yabove_row
[-1];
178 switch (x
->mode_info_context
->mbmi
.mode
)
188 if (x
->up_available
|| x
->left_available
)
192 for (i
= 0; i
< 16; i
++)
194 average
+= yabove_row
[i
];
198 if (x
->left_available
)
201 for (i
= 0; i
< 16; i
++)
203 average
+= yleft_col
[i
];
210 shift
= 3 + x
->up_available
+ x
->left_available
;
211 expected_dc
= (average
+ (1 << (shift
- 1))) >> shift
;
218 /*vpx_memset(ypred_ptr, expected_dc, 256);*/
219 for (r
= 0; r
< 16; r
++)
221 vpx_memset(ypred_ptr
, expected_dc
, 16);
222 ypred_ptr
+= y_stride
; /*16;*/
229 for (r
= 0; r
< 16; r
++)
232 ((int *)ypred_ptr
)[0] = ((int *)yabove_row
)[0];
233 ((int *)ypred_ptr
)[1] = ((int *)yabove_row
)[1];
234 ((int *)ypred_ptr
)[2] = ((int *)yabove_row
)[2];
235 ((int *)ypred_ptr
)[3] = ((int *)yabove_row
)[3];
236 ypred_ptr
+= y_stride
; /*16;*/
243 for (r
= 0; r
< 16; r
++)
246 vpx_memset(ypred_ptr
, yleft_col
[r
], 16);
247 ypred_ptr
+= y_stride
; /*16;*/
255 for (r
= 0; r
< 16; r
++)
257 for (c
= 0; c
< 16; c
++)
259 int pred
= yleft_col
[r
] + yabove_row
[ c
] - ytop_left
;
270 ypred_ptr
+= y_stride
; /*16;*/
286 void vp8mt_build_intra_predictors_mbuv(VP8D_COMP
*pbi
, MACROBLOCKD
*x
, int mb_row
, int mb_col
)
288 unsigned char *uabove_row
; /* = x->dst.u_buffer - x->dst.uv_stride; */
289 unsigned char *uleft_col
; /*[16];*/
290 unsigned char uleft_buf
[8];
291 unsigned char utop_left
; /* = uabove_row[-1]; */
292 unsigned char *vabove_row
; /* = x->dst.v_buffer - x->dst.uv_stride; */
293 unsigned char *vleft_col
; /*[20];*/
294 unsigned char vleft_buf
[8];
295 unsigned char vtop_left
; /* = vabove_row[-1]; */
296 unsigned char *upred_ptr
= &x
->predictor
[256];
297 unsigned char *vpred_ptr
= &x
->predictor
[320];
300 if (pbi
->common
.filter_level
)
302 uabove_row
= pbi
->mt_uabove_row
[mb_row
] + mb_col
*8 +16;
303 vabove_row
= pbi
->mt_vabove_row
[mb_row
] + mb_col
*8 +16;
304 uleft_col
= pbi
->mt_uleft_col
[mb_row
];
305 vleft_col
= pbi
->mt_vleft_col
[mb_row
];
308 uabove_row
= x
->dst
.u_buffer
- x
->dst
.uv_stride
;
309 vabove_row
= x
->dst
.v_buffer
- x
->dst
.uv_stride
;
311 for (i
= 0; i
< 8; i
++)
313 uleft_buf
[i
] = x
->dst
.u_buffer
[i
* x
->dst
.uv_stride
-1];
314 vleft_buf
[i
] = x
->dst
.v_buffer
[i
* x
->dst
.uv_stride
-1];
316 uleft_col
= uleft_buf
;
317 vleft_col
= vleft_buf
;
319 utop_left
= uabove_row
[-1];
320 vtop_left
= vabove_row
[-1];
322 switch (x
->mode_info_context
->mbmi
.uv_mode
)
335 for (i
= 0; i
< 8; i
++)
337 Uaverage
+= uabove_row
[i
];
338 Vaverage
+= vabove_row
[i
];
342 if (x
->left_available
)
344 for (i
= 0; i
< 8; i
++)
346 Uaverage
+= uleft_col
[i
];
347 Vaverage
+= vleft_col
[i
];
351 if (!x
->up_available
&& !x
->left_available
)
358 shift
= 2 + x
->up_available
+ x
->left_available
;
359 expected_udc
= (Uaverage
+ (1 << (shift
- 1))) >> shift
;
360 expected_vdc
= (Vaverage
+ (1 << (shift
- 1))) >> shift
;
364 vpx_memset(upred_ptr
, expected_udc
, 64);
365 vpx_memset(vpred_ptr
, expected_vdc
, 64);
374 for (i
= 0; i
< 8; i
++)
376 vpx_memcpy(upred_ptr
, uabove_row
, 8);
377 vpx_memcpy(vpred_ptr
, vabove_row
, 8);
388 for (i
= 0; i
< 8; i
++)
390 vpx_memset(upred_ptr
, uleft_col
[i
], 8);
391 vpx_memset(vpred_ptr
, vleft_col
[i
], 8);
402 for (i
= 0; i
< 8; i
++)
404 for (j
= 0; j
< 8; j
++)
406 int predu
= uleft_col
[i
] + uabove_row
[j
] - utop_left
;
407 int predv
= vleft_col
[i
] + vabove_row
[j
] - vtop_left
;
421 upred_ptr
[j
] = predu
;
422 vpred_ptr
[j
] = predv
;
442 void vp8mt_build_intra_predictors_mbuv_s(VP8D_COMP
*pbi
, MACROBLOCKD
*x
, int mb_row
, int mb_col
)
444 unsigned char *uabove_row
; /* = x->dst.u_buffer - x->dst.uv_stride; */
445 unsigned char *uleft_col
; /*[16];*/
446 unsigned char uleft_buf
[8];
447 unsigned char utop_left
; /* = uabove_row[-1]; */
448 unsigned char *vabove_row
; /* = x->dst.v_buffer - x->dst.uv_stride; */
449 unsigned char *vleft_col
; /*[20];*/
450 unsigned char vleft_buf
[8];
451 unsigned char vtop_left
; /* = vabove_row[-1]; */
452 unsigned char *upred_ptr
= x
->dst
.u_buffer
; /*&x->predictor[256];*/
453 unsigned char *vpred_ptr
= x
->dst
.v_buffer
; /*&x->predictor[320];*/
454 int uv_stride
= x
->dst
.uv_stride
;
457 if (pbi
->common
.filter_level
)
459 uabove_row
= pbi
->mt_uabove_row
[mb_row
] + mb_col
*8 +16;
460 vabove_row
= pbi
->mt_vabove_row
[mb_row
] + mb_col
*8 +16;
461 uleft_col
= pbi
->mt_uleft_col
[mb_row
];
462 vleft_col
= pbi
->mt_vleft_col
[mb_row
];
465 uabove_row
= x
->dst
.u_buffer
- x
->dst
.uv_stride
;
466 vabove_row
= x
->dst
.v_buffer
- x
->dst
.uv_stride
;
468 for (i
= 0; i
< 8; i
++)
470 uleft_buf
[i
] = x
->dst
.u_buffer
[i
* x
->dst
.uv_stride
-1];
471 vleft_buf
[i
] = x
->dst
.v_buffer
[i
* x
->dst
.uv_stride
-1];
473 uleft_col
= uleft_buf
;
474 vleft_col
= vleft_buf
;
476 utop_left
= uabove_row
[-1];
477 vtop_left
= vabove_row
[-1];
479 switch (x
->mode_info_context
->mbmi
.uv_mode
)
492 for (i
= 0; i
< 8; i
++)
494 Uaverage
+= uabove_row
[i
];
495 Vaverage
+= vabove_row
[i
];
499 if (x
->left_available
)
501 for (i
= 0; i
< 8; i
++)
503 Uaverage
+= uleft_col
[i
];
504 Vaverage
+= vleft_col
[i
];
508 if (!x
->up_available
&& !x
->left_available
)
515 shift
= 2 + x
->up_available
+ x
->left_available
;
516 expected_udc
= (Uaverage
+ (1 << (shift
- 1))) >> shift
;
517 expected_vdc
= (Vaverage
+ (1 << (shift
- 1))) >> shift
;
521 /*vpx_memset(upred_ptr,expected_udc,64);
522 vpx_memset(vpred_ptr,expected_vdc,64);*/
523 for (i
= 0; i
< 8; i
++)
525 vpx_memset(upred_ptr
, expected_udc
, 8);
526 vpx_memset(vpred_ptr
, expected_vdc
, 8);
527 upred_ptr
+= uv_stride
; /*8;*/
528 vpred_ptr
+= uv_stride
; /*8;*/
536 for (i
= 0; i
< 8; i
++)
538 vpx_memcpy(upred_ptr
, uabove_row
, 8);
539 vpx_memcpy(vpred_ptr
, vabove_row
, 8);
540 upred_ptr
+= uv_stride
; /*8;*/
541 vpred_ptr
+= uv_stride
; /*8;*/
550 for (i
= 0; i
< 8; i
++)
552 vpx_memset(upred_ptr
, uleft_col
[i
], 8);
553 vpx_memset(vpred_ptr
, vleft_col
[i
], 8);
554 upred_ptr
+= uv_stride
; /*8;*/
555 vpred_ptr
+= uv_stride
; /*8;*/
564 for (i
= 0; i
< 8; i
++)
566 for (j
= 0; j
< 8; j
++)
568 int predu
= uleft_col
[i
] + uabove_row
[j
] - utop_left
;
569 int predv
= vleft_col
[i
] + vabove_row
[j
] - vtop_left
;
583 upred_ptr
[j
] = predu
;
584 vpred_ptr
[j
] = predv
;
587 upred_ptr
+= uv_stride
; /*8;*/
588 vpred_ptr
+= uv_stride
; /*8;*/
605 void vp8mt_predict_intra4x4(VP8D_COMP
*pbi
,
608 unsigned char *predictor
,
615 unsigned char *Above
; /* = *(x->base_dst) + x->dst - x->dst_stride; */
616 unsigned char Left
[4];
617 unsigned char top_left
; /* = Above[-1]; */
619 BLOCKD
*x
= &xd
->block
[num
];
621 /*Caution: For some b_mode, it needs 8 pixels (4 above + 4 above-right).*/
622 if (num
< 4 && pbi
->common
.filter_level
)
623 Above
= pbi
->mt_yabove_row
[mb_row
] + mb_col
*16 + num
*4 + 32;
625 Above
= *(x
->base_dst
) + x
->dst
- x
->dst_stride
;
627 if (num
%4==0 && pbi
->common
.filter_level
)
630 Left
[i
] = pbi
->mt_yleft_col
[mb_row
][num
+ i
];
633 Left
[0] = (*(x
->base_dst
))[x
->dst
- 1];
634 Left
[1] = (*(x
->base_dst
))[x
->dst
- 1 + x
->dst_stride
];
635 Left
[2] = (*(x
->base_dst
))[x
->dst
- 1 + 2 * x
->dst_stride
];
636 Left
[3] = (*(x
->base_dst
))[x
->dst
- 1 + 3 * x
->dst_stride
];
639 if ((num
==4 || num
==8 || num
==12) && pbi
->common
.filter_level
)
640 top_left
= pbi
->mt_yleft_col
[mb_row
][num
-1];
642 top_left
= Above
[-1];
650 for (i
= 0; i
< 4; i
++)
652 expected_dc
+= Above
[i
];
653 expected_dc
+= Left
[i
];
656 expected_dc
= (expected_dc
+ 4) >> 3;
658 for (r
= 0; r
< 4; r
++)
660 for (c
= 0; c
< 4; c
++)
662 predictor
[c
] = expected_dc
;
671 /* prediction similar to true_motion prediction */
672 for (r
= 0; r
< 4; r
++)
674 for (c
= 0; c
< 4; c
++)
676 int pred
= Above
[c
] - top_left
+ Left
[r
];
696 ap
[0] = (top_left
+ 2 * Above
[0] + Above
[1] + 2) >> 2;
697 ap
[1] = (Above
[0] + 2 * Above
[1] + Above
[2] + 2) >> 2;
698 ap
[2] = (Above
[1] + 2 * Above
[2] + Above
[3] + 2) >> 2;
699 ap
[3] = (Above
[2] + 2 * Above
[3] + Above
[4] + 2) >> 2;
701 for (r
= 0; r
< 4; r
++)
703 for (c
= 0; c
< 4; c
++)
706 predictor
[c
] = ap
[c
];
720 lp
[0] = (top_left
+ 2 * Left
[0] + Left
[1] + 2) >> 2;
721 lp
[1] = (Left
[0] + 2 * Left
[1] + Left
[2] + 2) >> 2;
722 lp
[2] = (Left
[1] + 2 * Left
[2] + Left
[3] + 2) >> 2;
723 lp
[3] = (Left
[2] + 2 * Left
[3] + Left
[3] + 2) >> 2;
725 for (r
= 0; r
< 4; r
++)
727 for (c
= 0; c
< 4; c
++)
729 predictor
[c
] = lp
[r
];
738 unsigned char *ptr
= Above
;
739 predictor
[0 * 16 + 0] = (ptr
[0] + ptr
[1] * 2 + ptr
[2] + 2) >> 2;
740 predictor
[0 * 16 + 1] =
741 predictor
[1 * 16 + 0] = (ptr
[1] + ptr
[2] * 2 + ptr
[3] + 2) >> 2;
742 predictor
[0 * 16 + 2] =
743 predictor
[1 * 16 + 1] =
744 predictor
[2 * 16 + 0] = (ptr
[2] + ptr
[3] * 2 + ptr
[4] + 2) >> 2;
745 predictor
[0 * 16 + 3] =
746 predictor
[1 * 16 + 2] =
747 predictor
[2 * 16 + 1] =
748 predictor
[3 * 16 + 0] = (ptr
[3] + ptr
[4] * 2 + ptr
[5] + 2) >> 2;
749 predictor
[1 * 16 + 3] =
750 predictor
[2 * 16 + 2] =
751 predictor
[3 * 16 + 1] = (ptr
[4] + ptr
[5] * 2 + ptr
[6] + 2) >> 2;
752 predictor
[2 * 16 + 3] =
753 predictor
[3 * 16 + 2] = (ptr
[5] + ptr
[6] * 2 + ptr
[7] + 2) >> 2;
754 predictor
[3 * 16 + 3] = (ptr
[6] + ptr
[7] * 2 + ptr
[7] + 2) >> 2;
773 predictor
[3 * 16 + 0] = (pp
[0] + pp
[1] * 2 + pp
[2] + 2) >> 2;
774 predictor
[3 * 16 + 1] =
775 predictor
[2 * 16 + 0] = (pp
[1] + pp
[2] * 2 + pp
[3] + 2) >> 2;
776 predictor
[3 * 16 + 2] =
777 predictor
[2 * 16 + 1] =
778 predictor
[1 * 16 + 0] = (pp
[2] + pp
[3] * 2 + pp
[4] + 2) >> 2;
779 predictor
[3 * 16 + 3] =
780 predictor
[2 * 16 + 2] =
781 predictor
[1 * 16 + 1] =
782 predictor
[0 * 16 + 0] = (pp
[3] + pp
[4] * 2 + pp
[5] + 2) >> 2;
783 predictor
[2 * 16 + 3] =
784 predictor
[1 * 16 + 2] =
785 predictor
[0 * 16 + 1] = (pp
[4] + pp
[5] * 2 + pp
[6] + 2) >> 2;
786 predictor
[1 * 16 + 3] =
787 predictor
[0 * 16 + 2] = (pp
[5] + pp
[6] * 2 + pp
[7] + 2) >> 2;
788 predictor
[0 * 16 + 3] = (pp
[6] + pp
[7] * 2 + pp
[8] + 2) >> 2;
808 predictor
[3 * 16 + 0] = (pp
[1] + pp
[2] * 2 + pp
[3] + 2) >> 2;
809 predictor
[2 * 16 + 0] = (pp
[2] + pp
[3] * 2 + pp
[4] + 2) >> 2;
810 predictor
[3 * 16 + 1] =
811 predictor
[1 * 16 + 0] = (pp
[3] + pp
[4] * 2 + pp
[5] + 2) >> 2;
812 predictor
[2 * 16 + 1] =
813 predictor
[0 * 16 + 0] = (pp
[4] + pp
[5] + 1) >> 1;
814 predictor
[3 * 16 + 2] =
815 predictor
[1 * 16 + 1] = (pp
[4] + pp
[5] * 2 + pp
[6] + 2) >> 2;
816 predictor
[2 * 16 + 2] =
817 predictor
[0 * 16 + 1] = (pp
[5] + pp
[6] + 1) >> 1;
818 predictor
[3 * 16 + 3] =
819 predictor
[1 * 16 + 2] = (pp
[5] + pp
[6] * 2 + pp
[7] + 2) >> 2;
820 predictor
[2 * 16 + 3] =
821 predictor
[0 * 16 + 2] = (pp
[6] + pp
[7] + 1) >> 1;
822 predictor
[1 * 16 + 3] = (pp
[6] + pp
[7] * 2 + pp
[8] + 2) >> 2;
823 predictor
[0 * 16 + 3] = (pp
[7] + pp
[8] + 1) >> 1;
830 unsigned char *pp
= Above
;
832 predictor
[0 * 16 + 0] = (pp
[0] + pp
[1] + 1) >> 1;
833 predictor
[1 * 16 + 0] = (pp
[0] + pp
[1] * 2 + pp
[2] + 2) >> 2;
834 predictor
[2 * 16 + 0] =
835 predictor
[0 * 16 + 1] = (pp
[1] + pp
[2] + 1) >> 1;
836 predictor
[1 * 16 + 1] =
837 predictor
[3 * 16 + 0] = (pp
[1] + pp
[2] * 2 + pp
[3] + 2) >> 2;
838 predictor
[2 * 16 + 1] =
839 predictor
[0 * 16 + 2] = (pp
[2] + pp
[3] + 1) >> 1;
840 predictor
[3 * 16 + 1] =
841 predictor
[1 * 16 + 2] = (pp
[2] + pp
[3] * 2 + pp
[4] + 2) >> 2;
842 predictor
[0 * 16 + 3] =
843 predictor
[2 * 16 + 2] = (pp
[3] + pp
[4] + 1) >> 1;
844 predictor
[1 * 16 + 3] =
845 predictor
[3 * 16 + 2] = (pp
[3] + pp
[4] * 2 + pp
[5] + 2) >> 2;
846 predictor
[2 * 16 + 3] = (pp
[4] + pp
[5] * 2 + pp
[6] + 2) >> 2;
847 predictor
[3 * 16 + 3] = (pp
[5] + pp
[6] * 2 + pp
[7] + 2) >> 2;
865 predictor
[3 * 16 + 0] = (pp
[0] + pp
[1] + 1) >> 1;
866 predictor
[3 * 16 + 1] = (pp
[0] + pp
[1] * 2 + pp
[2] + 2) >> 2;
867 predictor
[2 * 16 + 0] =
868 predictor
[3 * 16 + 2] = (pp
[1] + pp
[2] + 1) >> 1;
869 predictor
[2 * 16 + 1] =
870 predictor
[3 * 16 + 3] = (pp
[1] + pp
[2] * 2 + pp
[3] + 2) >> 2;
871 predictor
[2 * 16 + 2] =
872 predictor
[1 * 16 + 0] = (pp
[2] + pp
[3] + 1) >> 1;
873 predictor
[2 * 16 + 3] =
874 predictor
[1 * 16 + 1] = (pp
[2] + pp
[3] * 2 + pp
[4] + 2) >> 2;
875 predictor
[1 * 16 + 2] =
876 predictor
[0 * 16 + 0] = (pp
[3] + pp
[4] + 1) >> 1;
877 predictor
[1 * 16 + 3] =
878 predictor
[0 * 16 + 1] = (pp
[3] + pp
[4] * 2 + pp
[5] + 2) >> 2;
879 predictor
[0 * 16 + 2] = (pp
[4] + pp
[5] * 2 + pp
[6] + 2) >> 2;
880 predictor
[0 * 16 + 3] = (pp
[5] + pp
[6] * 2 + pp
[7] + 2) >> 2;
887 unsigned char *pp
= Left
;
888 predictor
[0 * 16 + 0] = (pp
[0] + pp
[1] + 1) >> 1;
889 predictor
[0 * 16 + 1] = (pp
[0] + pp
[1] * 2 + pp
[2] + 2) >> 2;
890 predictor
[0 * 16 + 2] =
891 predictor
[1 * 16 + 0] = (pp
[1] + pp
[2] + 1) >> 1;
892 predictor
[0 * 16 + 3] =
893 predictor
[1 * 16 + 1] = (pp
[1] + pp
[2] * 2 + pp
[3] + 2) >> 2;
894 predictor
[1 * 16 + 2] =
895 predictor
[2 * 16 + 0] = (pp
[2] + pp
[3] + 1) >> 1;
896 predictor
[1 * 16 + 3] =
897 predictor
[2 * 16 + 1] = (pp
[2] + pp
[3] * 2 + pp
[3] + 2) >> 2;
898 predictor
[2 * 16 + 2] =
899 predictor
[2 * 16 + 3] =
900 predictor
[3 * 16 + 0] =
901 predictor
[3 * 16 + 1] =
902 predictor
[3 * 16 + 2] =
903 predictor
[3 * 16 + 3] = pp
[3];
911 /* copy 4 bytes from the above right down so that the 4x4 prediction modes using pixels above and
912 * to the right prediction have filled in pixels to use.
914 void vp8mt_intra_prediction_down_copy(VP8D_COMP
*pbi
, MACROBLOCKD
*x
, int mb_row
, int mb_col
)
916 unsigned char *above_right
; /* = *(x->block[0].base_dst) + x->block[0].dst - x->block[0].dst_stride + 16; */
917 unsigned int *src_ptr
;
918 unsigned int *dst_ptr0
;
919 unsigned int *dst_ptr1
;
920 unsigned int *dst_ptr2
;
922 if (pbi
->common
.filter_level
)
923 above_right
= pbi
->mt_yabove_row
[mb_row
] + mb_col
*16 + 32 +16;
925 above_right
= *(x
->block
[0].base_dst
) + x
->block
[0].dst
- x
->block
[0].dst_stride
+ 16;
927 src_ptr
= (unsigned int *)above_right
;
928 /*dst_ptr0 = (unsigned int *)(above_right + 4 * x->block[0].dst_stride);
929 dst_ptr1 = (unsigned int *)(above_right + 8 * x->block[0].dst_stride);
930 dst_ptr2 = (unsigned int *)(above_right + 12 * x->block[0].dst_stride);*/
931 dst_ptr0
= (unsigned int *)(*(x
->block
[0].base_dst
) + x
->block
[0].dst
+ 16 + 3 * x
->block
[0].dst_stride
);
932 dst_ptr1
= (unsigned int *)(*(x
->block
[0].base_dst
) + x
->block
[0].dst
+ 16 + 7 * x
->block
[0].dst_stride
);
933 dst_ptr2
= (unsigned int *)(*(x
->block
[0].base_dst
) + x
->block
[0].dst
+ 16 + 11 * x
->block
[0].dst_stride
);
934 *dst_ptr0
= *src_ptr
;
935 *dst_ptr1
= *src_ptr
;
936 *dst_ptr2
= *src_ptr
;