2 * H.26L/H.264/AVC/JVT/14496-10/... decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
24 * H.264 / AVC / MPEG-4 part10 macroblock decoding
31 #include "libavutil/common.h"
32 #include "libavutil/intreadwrite.h"
37 #include "rectangle.h"
38 #include "threadframe.h"
40 static inline int get_lowest_part_list_y(H264SliceContext
*sl
,
41 int n
, int height
, int y_offset
, int list
)
43 int raw_my
= sl
->mv_cache
[list
][scan8
[n
]][1];
44 int filter_height_down
= (raw_my
& 3) ? 3 : 0;
45 int full_my
= (raw_my
>> 2) + y_offset
;
46 int bottom
= full_my
+ filter_height_down
+ height
;
48 av_assert2(height
>= 0);
50 return FFMAX(0, bottom
);
53 static inline void get_lowest_part_y(const H264Context
*h
, H264SliceContext
*sl
,
54 int16_t refs
[2][48], int n
,
55 int height
, int y_offset
, int list0
,
56 int list1
, int *nrefs
)
60 y_offset
+= 16 * (sl
->mb_y
>> MB_FIELD(sl
));
63 int ref_n
= sl
->ref_cache
[0][scan8
[n
]];
64 H264Ref
*ref
= &sl
->ref_list
[0][ref_n
];
66 // Error resilience puts the current picture in the ref list.
67 // Don't try to wait on these as it will cause a deadlock.
68 // Fields can wait on each other, though.
69 if (ref
->parent
->tf
.progress
!= h
->cur_pic
.tf
.progress
||
70 (ref
->reference
& 3) != h
->picture_structure
) {
71 my
= get_lowest_part_list_y(sl
, n
, height
, y_offset
, 0);
72 if (refs
[0][ref_n
] < 0)
74 refs
[0][ref_n
] = FFMAX(refs
[0][ref_n
], my
);
79 int ref_n
= sl
->ref_cache
[1][scan8
[n
]];
80 H264Ref
*ref
= &sl
->ref_list
[1][ref_n
];
82 if (ref
->parent
->tf
.progress
!= h
->cur_pic
.tf
.progress
||
83 (ref
->reference
& 3) != h
->picture_structure
) {
84 my
= get_lowest_part_list_y(sl
, n
, height
, y_offset
, 1);
85 if (refs
[1][ref_n
] < 0)
87 refs
[1][ref_n
] = FFMAX(refs
[1][ref_n
], my
);
93 * Wait until all reference frames are available for MC operations.
95 * @param h the H.264 context
97 static void await_references(const H264Context
*h
, H264SliceContext
*sl
)
99 const int mb_xy
= sl
->mb_xy
;
100 const int mb_type
= h
->cur_pic
.mb_type
[mb_xy
];
102 int nrefs
[2] = { 0 };
105 memset(refs
, -1, sizeof(refs
));
107 if (IS_16X16(mb_type
)) {
108 get_lowest_part_y(h
, sl
, refs
, 0, 16, 0,
109 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1), nrefs
);
110 } else if (IS_16X8(mb_type
)) {
111 get_lowest_part_y(h
, sl
, refs
, 0, 8, 0,
112 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1), nrefs
);
113 get_lowest_part_y(h
, sl
, refs
, 8, 8, 8,
114 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1), nrefs
);
115 } else if (IS_8X16(mb_type
)) {
116 get_lowest_part_y(h
, sl
, refs
, 0, 16, 0,
117 IS_DIR(mb_type
, 0, 0), IS_DIR(mb_type
, 0, 1), nrefs
);
118 get_lowest_part_y(h
, sl
, refs
, 4, 16, 0,
119 IS_DIR(mb_type
, 1, 0), IS_DIR(mb_type
, 1, 1), nrefs
);
123 av_assert2(IS_8X8(mb_type
));
125 for (i
= 0; i
< 4; i
++) {
126 const int sub_mb_type
= sl
->sub_mb_type
[i
];
128 int y_offset
= (i
& 2) << 2;
130 if (IS_SUB_8X8(sub_mb_type
)) {
131 get_lowest_part_y(h
, sl
, refs
, n
, 8, y_offset
,
132 IS_DIR(sub_mb_type
, 0, 0),
133 IS_DIR(sub_mb_type
, 0, 1),
135 } else if (IS_SUB_8X4(sub_mb_type
)) {
136 get_lowest_part_y(h
, sl
, refs
, n
, 4, y_offset
,
137 IS_DIR(sub_mb_type
, 0, 0),
138 IS_DIR(sub_mb_type
, 0, 1),
140 get_lowest_part_y(h
, sl
, refs
, n
+ 2, 4, y_offset
+ 4,
141 IS_DIR(sub_mb_type
, 0, 0),
142 IS_DIR(sub_mb_type
, 0, 1),
144 } else if (IS_SUB_4X8(sub_mb_type
)) {
145 get_lowest_part_y(h
, sl
, refs
, n
, 8, y_offset
,
146 IS_DIR(sub_mb_type
, 0, 0),
147 IS_DIR(sub_mb_type
, 0, 1),
149 get_lowest_part_y(h
, sl
, refs
, n
+ 1, 8, y_offset
,
150 IS_DIR(sub_mb_type
, 0, 0),
151 IS_DIR(sub_mb_type
, 0, 1),
155 av_assert2(IS_SUB_4X4(sub_mb_type
));
156 for (j
= 0; j
< 4; j
++) {
157 int sub_y_offset
= y_offset
+ 2 * (j
& 2);
158 get_lowest_part_y(h
, sl
, refs
, n
+ j
, 4, sub_y_offset
,
159 IS_DIR(sub_mb_type
, 0, 0),
160 IS_DIR(sub_mb_type
, 0, 1),
167 for (list
= sl
->list_count
- 1; list
>= 0; list
--)
168 for (ref
= 0; ref
< 48 && nrefs
[list
]; ref
++) {
169 int row
= refs
[list
][ref
];
171 H264Ref
*ref_pic
= &sl
->ref_list
[list
][ref
];
172 int ref_field
= ref_pic
->reference
- 1;
173 int ref_field_picture
= ref_pic
->parent
->field_picture
;
174 int pic_height
= 16 * h
->mb_height
>> ref_field_picture
;
176 row
<<= MB_MBAFF(sl
);
179 if (!FIELD_PICTURE(h
) && ref_field_picture
) { // frame referencing two fields
180 av_assert2((ref_pic
->parent
->reference
& 3) == 3);
181 ff_thread_await_progress(&ref_pic
->parent
->tf
,
182 FFMIN((row
>> 1) - !(row
& 1),
185 ff_thread_await_progress(&ref_pic
->parent
->tf
,
186 FFMIN((row
>> 1), pic_height
- 1),
188 } else if (FIELD_PICTURE(h
) && !ref_field_picture
) { // field referencing one field of a frame
189 ff_thread_await_progress(&ref_pic
->parent
->tf
,
190 FFMIN(row
* 2 + ref_field
,
193 } else if (FIELD_PICTURE(h
)) {
194 ff_thread_await_progress(&ref_pic
->parent
->tf
,
195 FFMIN(row
, pic_height
- 1),
198 ff_thread_await_progress(&ref_pic
->parent
->tf
,
199 FFMIN(row
, pic_height
- 1),
206 static av_always_inline
void mc_dir_part(const H264Context
*h
, H264SliceContext
*sl
,
208 int n
, int square
, int height
,
210 uint8_t *dest_y
, uint8_t *dest_cb
,
212 int src_x_offset
, int src_y_offset
,
213 const qpel_mc_func
*qpix_op
,
214 h264_chroma_mc_func chroma_op
,
215 int pixel_shift
, int chroma_idc
)
217 const int mx
= sl
->mv_cache
[list
][scan8
[n
]][0] + src_x_offset
* 8;
218 int my
= sl
->mv_cache
[list
][scan8
[n
]][1] + src_y_offset
* 8;
219 const int luma_xy
= (mx
& 3) + ((my
& 3) << 2);
220 ptrdiff_t offset
= (mx
>> 2) * (1 << pixel_shift
) + (my
>> 2) * sl
->mb_linesize
;
221 uint8_t *src_y
= pic
->data
[0] + offset
;
222 uint8_t *src_cb
, *src_cr
;
224 int extra_height
= 0;
226 const int full_mx
= mx
>> 2;
227 const int full_my
= my
>> 2;
228 const int pic_width
= 16 * h
->mb_width
;
229 const int pic_height
= 16 * h
->mb_height
>> MB_FIELD(sl
);
237 if (full_mx
< 0 - extra_width
||
238 full_my
< 0 - extra_height
||
239 full_mx
+ 16 /*FIXME*/ > pic_width
+ extra_width
||
240 full_my
+ 16 /*FIXME*/ > pic_height
+ extra_height
) {
241 h
->vdsp
.emulated_edge_mc(sl
->edge_emu_buffer
,
242 src_y
- (2 << pixel_shift
) - 2 * sl
->mb_linesize
,
243 sl
->mb_linesize
, sl
->mb_linesize
,
244 16 + 5, 16 + 5 /*FIXME*/, full_mx
- 2,
245 full_my
- 2, pic_width
, pic_height
);
246 src_y
= sl
->edge_emu_buffer
+ (2 << pixel_shift
) + 2 * sl
->mb_linesize
;
250 qpix_op
[luma_xy
](dest_y
, src_y
, sl
->mb_linesize
); // FIXME try variable height perhaps?
252 qpix_op
[luma_xy
](dest_y
+ delta
, src_y
+ delta
, sl
->mb_linesize
);
254 if (CONFIG_GRAY
&& h
->flags
& AV_CODEC_FLAG_GRAY
)
257 if (chroma_idc
== 3 /* yuv444 */) {
258 src_cb
= pic
->data
[1] + offset
;
260 h
->vdsp
.emulated_edge_mc(sl
->edge_emu_buffer
,
261 src_cb
- (2 << pixel_shift
) - 2 * sl
->mb_linesize
,
262 sl
->mb_linesize
, sl
->mb_linesize
,
263 16 + 5, 16 + 5 /*FIXME*/,
264 full_mx
- 2, full_my
- 2,
265 pic_width
, pic_height
);
266 src_cb
= sl
->edge_emu_buffer
+ (2 << pixel_shift
) + 2 * sl
->mb_linesize
;
268 qpix_op
[luma_xy
](dest_cb
, src_cb
, sl
->mb_linesize
); // FIXME try variable height perhaps?
270 qpix_op
[luma_xy
](dest_cb
+ delta
, src_cb
+ delta
, sl
->mb_linesize
);
272 src_cr
= pic
->data
[2] + offset
;
274 h
->vdsp
.emulated_edge_mc(sl
->edge_emu_buffer
,
275 src_cr
- (2 << pixel_shift
) - 2 * sl
->mb_linesize
,
276 sl
->mb_linesize
, sl
->mb_linesize
,
277 16 + 5, 16 + 5 /*FIXME*/,
278 full_mx
- 2, full_my
- 2,
279 pic_width
, pic_height
);
280 src_cr
= sl
->edge_emu_buffer
+ (2 << pixel_shift
) + 2 * sl
->mb_linesize
;
282 qpix_op
[luma_xy
](dest_cr
, src_cr
, sl
->mb_linesize
); // FIXME try variable height perhaps?
284 qpix_op
[luma_xy
](dest_cr
+ delta
, src_cr
+ delta
, sl
->mb_linesize
);
288 ysh
= 3 - (chroma_idc
== 2 /* yuv422 */);
289 if (chroma_idc
== 1 /* yuv420 */ && MB_FIELD(sl
)) {
290 // chroma offset when predicting from a field of opposite parity
291 my
+= 2 * ((sl
->mb_y
& 1) - (pic
->reference
- 1));
292 emu
|= (my
>> 3) < 0 || (my
>> 3) + 8 >= (pic_height
>> 1);
295 src_cb
= pic
->data
[1] + ((mx
>> 3) * (1 << pixel_shift
)) +
296 (my
>> ysh
) * sl
->mb_uvlinesize
;
297 src_cr
= pic
->data
[2] + ((mx
>> 3) * (1 << pixel_shift
)) +
298 (my
>> ysh
) * sl
->mb_uvlinesize
;
301 h
->vdsp
.emulated_edge_mc(sl
->edge_emu_buffer
, src_cb
,
302 sl
->mb_uvlinesize
, sl
->mb_uvlinesize
,
303 9, 8 * chroma_idc
+ 1, (mx
>> 3), (my
>> ysh
),
304 pic_width
>> 1, pic_height
>> (chroma_idc
== 1 /* yuv420 */));
305 src_cb
= sl
->edge_emu_buffer
;
307 chroma_op(dest_cb
, src_cb
, sl
->mb_uvlinesize
,
308 height
>> (chroma_idc
== 1 /* yuv420 */),
309 mx
& 7, ((unsigned)my
<< (chroma_idc
== 2 /* yuv422 */)) & 7);
312 h
->vdsp
.emulated_edge_mc(sl
->edge_emu_buffer
, src_cr
,
313 sl
->mb_uvlinesize
, sl
->mb_uvlinesize
,
314 9, 8 * chroma_idc
+ 1, (mx
>> 3), (my
>> ysh
),
315 pic_width
>> 1, pic_height
>> (chroma_idc
== 1 /* yuv420 */));
316 src_cr
= sl
->edge_emu_buffer
;
318 chroma_op(dest_cr
, src_cr
, sl
->mb_uvlinesize
, height
>> (chroma_idc
== 1 /* yuv420 */),
319 mx
& 7, ((unsigned)my
<< (chroma_idc
== 2 /* yuv422 */)) & 7);
322 static av_always_inline
void mc_part_std(const H264Context
*h
, H264SliceContext
*sl
,
324 int height
, int delta
,
325 uint8_t *dest_y
, uint8_t *dest_cb
,
327 int x_offset
, int y_offset
,
328 const qpel_mc_func
*qpix_put
,
329 h264_chroma_mc_func chroma_put
,
330 const qpel_mc_func
*qpix_avg
,
331 h264_chroma_mc_func chroma_avg
,
332 int list0
, int list1
,
333 int pixel_shift
, int chroma_idc
)
335 const qpel_mc_func
*qpix_op
= qpix_put
;
336 h264_chroma_mc_func chroma_op
= chroma_put
;
338 dest_y
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
339 if (chroma_idc
== 3 /* yuv444 */) {
340 dest_cb
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
341 dest_cr
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
342 } else if (chroma_idc
== 2 /* yuv422 */) {
343 dest_cb
+= (x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_uvlinesize
;
344 dest_cr
+= (x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_uvlinesize
;
345 } else { /* yuv420 */
346 dest_cb
+= (x_offset
<< pixel_shift
) + y_offset
* sl
->mb_uvlinesize
;
347 dest_cr
+= (x_offset
<< pixel_shift
) + y_offset
* sl
->mb_uvlinesize
;
349 x_offset
+= 8 * sl
->mb_x
;
350 y_offset
+= 8 * (sl
->mb_y
>> MB_FIELD(sl
));
353 H264Ref
*ref
= &sl
->ref_list
[0][sl
->ref_cache
[0][scan8
[n
]]];
354 mc_dir_part(h
, sl
, ref
, n
, square
, height
, delta
, 0,
355 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
356 qpix_op
, chroma_op
, pixel_shift
, chroma_idc
);
359 chroma_op
= chroma_avg
;
363 H264Ref
*ref
= &sl
->ref_list
[1][sl
->ref_cache
[1][scan8
[n
]]];
364 mc_dir_part(h
, sl
, ref
, n
, square
, height
, delta
, 1,
365 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
366 qpix_op
, chroma_op
, pixel_shift
, chroma_idc
);
370 static av_always_inline
void mc_part_weighted(const H264Context
*h
, H264SliceContext
*sl
,
372 int height
, int delta
,
373 uint8_t *dest_y
, uint8_t *dest_cb
,
375 int x_offset
, int y_offset
,
376 const qpel_mc_func
*qpix_put
,
377 h264_chroma_mc_func chroma_put
,
378 h264_weight_func luma_weight_op
,
379 h264_weight_func chroma_weight_op
,
380 h264_biweight_func luma_weight_avg
,
381 h264_biweight_func chroma_weight_avg
,
382 int list0
, int list1
,
383 int pixel_shift
, int chroma_idc
)
387 dest_y
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
388 if (chroma_idc
== 3 /* yuv444 */) {
389 chroma_height
= height
;
390 chroma_weight_avg
= luma_weight_avg
;
391 chroma_weight_op
= luma_weight_op
;
392 dest_cb
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
393 dest_cr
+= (2 * x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_linesize
;
394 } else if (chroma_idc
== 2 /* yuv422 */) {
395 chroma_height
= height
;
396 dest_cb
+= (x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_uvlinesize
;
397 dest_cr
+= (x_offset
<< pixel_shift
) + 2 * y_offset
* sl
->mb_uvlinesize
;
398 } else { /* yuv420 */
399 chroma_height
= height
>> 1;
400 dest_cb
+= (x_offset
<< pixel_shift
) + y_offset
* sl
->mb_uvlinesize
;
401 dest_cr
+= (x_offset
<< pixel_shift
) + y_offset
* sl
->mb_uvlinesize
;
403 x_offset
+= 8 * sl
->mb_x
;
404 y_offset
+= 8 * (sl
->mb_y
>> MB_FIELD(sl
));
406 if (list0
&& list1
) {
407 /* don't optimize for luma-only case, since B-frames usually
408 * use implicit weights => chroma too. */
409 uint8_t *tmp_cb
= sl
->bipred_scratchpad
;
410 uint8_t *tmp_cr
= sl
->bipred_scratchpad
+ (16 << pixel_shift
);
411 uint8_t *tmp_y
= sl
->bipred_scratchpad
+ 16 * sl
->mb_uvlinesize
;
412 int refn0
= sl
->ref_cache
[0][scan8
[n
]];
413 int refn1
= sl
->ref_cache
[1][scan8
[n
]];
415 mc_dir_part(h
, sl
, &sl
->ref_list
[0][refn0
], n
, square
, height
, delta
, 0,
416 dest_y
, dest_cb
, dest_cr
,
417 x_offset
, y_offset
, qpix_put
, chroma_put
,
418 pixel_shift
, chroma_idc
);
419 mc_dir_part(h
, sl
, &sl
->ref_list
[1][refn1
], n
, square
, height
, delta
, 1,
420 tmp_y
, tmp_cb
, tmp_cr
,
421 x_offset
, y_offset
, qpix_put
, chroma_put
,
422 pixel_shift
, chroma_idc
);
424 if (sl
->pwt
.use_weight
== 2) {
425 int weight0
= sl
->pwt
.implicit_weight
[refn0
][refn1
][sl
->mb_y
& 1];
426 int weight1
= 64 - weight0
;
427 luma_weight_avg(dest_y
, tmp_y
, sl
->mb_linesize
,
428 height
, 5, weight0
, weight1
, 0);
429 if (!CONFIG_GRAY
|| !(h
->flags
& AV_CODEC_FLAG_GRAY
)) {
430 chroma_weight_avg(dest_cb
, tmp_cb
, sl
->mb_uvlinesize
,
431 chroma_height
, 5, weight0
, weight1
, 0);
432 chroma_weight_avg(dest_cr
, tmp_cr
, sl
->mb_uvlinesize
,
433 chroma_height
, 5, weight0
, weight1
, 0);
436 luma_weight_avg(dest_y
, tmp_y
, sl
->mb_linesize
, height
,
437 sl
->pwt
.luma_log2_weight_denom
,
438 sl
->pwt
.luma_weight
[refn0
][0][0],
439 sl
->pwt
.luma_weight
[refn1
][1][0],
440 sl
->pwt
.luma_weight
[refn0
][0][1] +
441 sl
->pwt
.luma_weight
[refn1
][1][1]);
442 if (!CONFIG_GRAY
|| !(h
->flags
& AV_CODEC_FLAG_GRAY
)) {
443 chroma_weight_avg(dest_cb
, tmp_cb
, sl
->mb_uvlinesize
, chroma_height
,
444 sl
->pwt
.chroma_log2_weight_denom
,
445 sl
->pwt
.chroma_weight
[refn0
][0][0][0],
446 sl
->pwt
.chroma_weight
[refn1
][1][0][0],
447 sl
->pwt
.chroma_weight
[refn0
][0][0][1] +
448 sl
->pwt
.chroma_weight
[refn1
][1][0][1]);
449 chroma_weight_avg(dest_cr
, tmp_cr
, sl
->mb_uvlinesize
, chroma_height
,
450 sl
->pwt
.chroma_log2_weight_denom
,
451 sl
->pwt
.chroma_weight
[refn0
][0][1][0],
452 sl
->pwt
.chroma_weight
[refn1
][1][1][0],
453 sl
->pwt
.chroma_weight
[refn0
][0][1][1] +
454 sl
->pwt
.chroma_weight
[refn1
][1][1][1]);
458 int list
= list1
? 1 : 0;
459 int refn
= sl
->ref_cache
[list
][scan8
[n
]];
460 H264Ref
*ref
= &sl
->ref_list
[list
][refn
];
461 mc_dir_part(h
, sl
, ref
, n
, square
, height
, delta
, list
,
462 dest_y
, dest_cb
, dest_cr
, x_offset
, y_offset
,
463 qpix_put
, chroma_put
, pixel_shift
, chroma_idc
);
465 luma_weight_op(dest_y
, sl
->mb_linesize
, height
,
466 sl
->pwt
.luma_log2_weight_denom
,
467 sl
->pwt
.luma_weight
[refn
][list
][0],
468 sl
->pwt
.luma_weight
[refn
][list
][1]);
469 if (!CONFIG_GRAY
|| !(h
->flags
& AV_CODEC_FLAG_GRAY
)) {
470 if (sl
->pwt
.use_weight_chroma
) {
471 chroma_weight_op(dest_cb
, sl
->mb_uvlinesize
, chroma_height
,
472 sl
->pwt
.chroma_log2_weight_denom
,
473 sl
->pwt
.chroma_weight
[refn
][list
][0][0],
474 sl
->pwt
.chroma_weight
[refn
][list
][0][1]);
475 chroma_weight_op(dest_cr
, sl
->mb_uvlinesize
, chroma_height
,
476 sl
->pwt
.chroma_log2_weight_denom
,
477 sl
->pwt
.chroma_weight
[refn
][list
][1][0],
478 sl
->pwt
.chroma_weight
[refn
][list
][1][1]);
484 static av_always_inline
void prefetch_motion(const H264Context
*h
, H264SliceContext
*sl
,
485 int list
, int pixel_shift
,
488 /* fetch pixels for estimated mv 4 macroblocks ahead
489 * optimized for 64byte cache lines */
490 const int refn
= sl
->ref_cache
[list
][scan8
[0]];
492 const int mx
= (sl
->mv_cache
[list
][scan8
[0]][0] >> 2) + 16 * sl
->mb_x
+ 8;
493 const int my
= (sl
->mv_cache
[list
][scan8
[0]][1] >> 2) + 16 * sl
->mb_y
;
494 uint8_t **src
= sl
->ref_list
[list
][refn
].data
;
495 int off
= mx
* (1<< pixel_shift
) +
496 (my
+ (sl
->mb_x
& 3) * 4) * sl
->mb_linesize
+
498 h
->vdsp
.prefetch(src
[0] + off
, sl
->linesize
, 4);
499 if (chroma_idc
== 3 /* yuv444 */) {
500 h
->vdsp
.prefetch(src
[1] + off
, sl
->linesize
, 4);
501 h
->vdsp
.prefetch(src
[2] + off
, sl
->linesize
, 4);
503 off
= ((mx
>>1)+64) * (1<<pixel_shift
) + ((my
>>1) + (sl
->mb_x
&7))*sl
->uvlinesize
;
504 h
->vdsp
.prefetch(src
[1] + off
, src
[2] - src
[1], 2);
509 static av_always_inline
void xchg_mb_border(const H264Context
*h
, H264SliceContext
*sl
,
511 uint8_t *src_cb
, uint8_t *src_cr
,
512 int linesize
, int uvlinesize
,
513 int xchg
, int chroma444
,
514 int simple
, int pixel_shift
)
519 uint8_t *top_border_m1
;
522 if (!simple
&& FRAME_MBAFF(h
)) {
527 top_idx
= MB_MBAFF(sl
) ? 0 : 1;
531 if (sl
->deblocking_filter
== 2) {
532 deblock_topleft
= h
->slice_table
[sl
->mb_xy
- 1 - (h
->mb_stride
<< MB_FIELD(sl
))] == sl
->slice_num
;
533 deblock_top
= sl
->top_type
;
535 deblock_topleft
= (sl
->mb_x
> 0);
536 deblock_top
= (sl
->mb_y
> !!MB_FIELD(sl
));
539 src_y
-= linesize
+ 1 + pixel_shift
;
540 src_cb
-= uvlinesize
+ 1 + pixel_shift
;
541 src_cr
-= uvlinesize
+ 1 + pixel_shift
;
543 top_border_m1
= sl
->top_borders
[top_idx
][sl
->mb_x
- 1];
544 top_border
= sl
->top_borders
[top_idx
][sl
->mb_x
];
546 #define XCHG(a, b, xchg) \
549 AV_SWAP64(b + 0, a + 0); \
550 AV_SWAP64(b + 8, a + 8); \
560 if (deblock_topleft
) {
561 XCHG(top_border_m1
+ (8 << pixel_shift
),
562 src_y
- (7 << pixel_shift
), 1);
564 XCHG(top_border
+ (0 << pixel_shift
), src_y
+ (1 << pixel_shift
), xchg
);
565 XCHG(top_border
+ (8 << pixel_shift
), src_y
+ (9 << pixel_shift
), 1);
566 if (sl
->mb_x
+ 1 < h
->mb_width
) {
567 XCHG(sl
->top_borders
[top_idx
][sl
->mb_x
+ 1],
568 src_y
+ (17 << pixel_shift
), 1);
570 if (simple
|| !CONFIG_GRAY
|| !(h
->flags
& AV_CODEC_FLAG_GRAY
)) {
572 if (deblock_topleft
) {
573 XCHG(top_border_m1
+ (24 << pixel_shift
), src_cb
- (7 << pixel_shift
), 1);
574 XCHG(top_border_m1
+ (40 << pixel_shift
), src_cr
- (7 << pixel_shift
), 1);
576 XCHG(top_border
+ (16 << pixel_shift
), src_cb
+ (1 << pixel_shift
), xchg
);
577 XCHG(top_border
+ (24 << pixel_shift
), src_cb
+ (9 << pixel_shift
), 1);
578 XCHG(top_border
+ (32 << pixel_shift
), src_cr
+ (1 << pixel_shift
), xchg
);
579 XCHG(top_border
+ (40 << pixel_shift
), src_cr
+ (9 << pixel_shift
), 1);
580 if (sl
->mb_x
+ 1 < h
->mb_width
) {
581 XCHG(sl
->top_borders
[top_idx
][sl
->mb_x
+ 1] + (16 << pixel_shift
), src_cb
+ (17 << pixel_shift
), 1);
582 XCHG(sl
->top_borders
[top_idx
][sl
->mb_x
+ 1] + (32 << pixel_shift
), src_cr
+ (17 << pixel_shift
), 1);
585 if (deblock_topleft
) {
586 XCHG(top_border_m1
+ (16 << pixel_shift
), src_cb
- (7 << pixel_shift
), 1);
587 XCHG(top_border_m1
+ (24 << pixel_shift
), src_cr
- (7 << pixel_shift
), 1);
589 XCHG(top_border
+ (16 << pixel_shift
), src_cb
+ 1 + pixel_shift
, 1);
590 XCHG(top_border
+ (24 << pixel_shift
), src_cr
+ 1 + pixel_shift
, 1);
596 static av_always_inline
int dctcoef_get(int16_t *mb
, int high_bit_depth
,
599 if (high_bit_depth
) {
600 return AV_RN32A(((int32_t *)mb
) + index
);
602 return AV_RN16A(mb
+ index
);
605 static av_always_inline
void dctcoef_set(int16_t *mb
, int high_bit_depth
,
606 int index
, int value
)
608 if (high_bit_depth
) {
609 AV_WN32A(((int32_t *)mb
) + index
, value
);
611 AV_WN16A(mb
+ index
, value
);
614 static av_always_inline
void hl_decode_mb_predict_luma(const H264Context
*h
,
615 H264SliceContext
*sl
,
616 int mb_type
, int simple
,
617 int transform_bypass
,
619 const int *block_offset
,
621 uint8_t *dest_y
, int p
)
623 void (*idct_add
)(uint8_t *dst
, int16_t *block
, int stride
);
624 void (*idct_dc_add
)(uint8_t *dst
, int16_t *block
, int stride
);
626 int qscale
= p
== 0 ? sl
->qscale
: sl
->chroma_qp
[p
- 1];
627 block_offset
+= 16 * p
;
628 if (IS_INTRA4x4(mb_type
)) {
629 if (IS_8x8DCT(mb_type
)) {
630 if (transform_bypass
) {
632 idct_add
= h
->h264dsp
.h264_add_pixels8_clear
;
634 idct_dc_add
= h
->h264dsp
.h264_idct8_dc_add
;
635 idct_add
= h
->h264dsp
.h264_idct8_add
;
637 for (i
= 0; i
< 16; i
+= 4) {
638 uint8_t *const ptr
= dest_y
+ block_offset
[i
];
639 const int dir
= sl
->intra4x4_pred_mode_cache
[scan8
[i
]];
640 if (transform_bypass
&& h
->ps
.sps
->profile_idc
== 244 && dir
<= 1) {
641 if (h
->x264_build
< 151U) {
642 h
->hpc
.pred8x8l_add
[dir
](ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
644 h
->hpc
.pred8x8l_filter_add
[dir
](ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
),
645 (sl
-> topleft_samples_available
<< i
) & 0x8000,
646 (sl
->topright_samples_available
<< i
) & 0x4000, linesize
);
648 const int nnz
= sl
->non_zero_count_cache
[scan8
[i
+ p
* 16]];
649 h
->hpc
.pred8x8l
[dir
](ptr
, (sl
->topleft_samples_available
<< i
) & 0x8000,
650 (sl
->topright_samples_available
<< i
) & 0x4000, linesize
);
652 if (nnz
== 1 && dctcoef_get(sl
->mb
, pixel_shift
, i
* 16 + p
* 256))
653 idct_dc_add(ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
655 idct_add(ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
660 if (transform_bypass
) {
662 idct_add
= h
->h264dsp
.h264_add_pixels4_clear
;
664 idct_dc_add
= h
->h264dsp
.h264_idct_dc_add
;
665 idct_add
= h
->h264dsp
.h264_idct_add
;
667 for (i
= 0; i
< 16; i
++) {
668 uint8_t *const ptr
= dest_y
+ block_offset
[i
];
669 const int dir
= sl
->intra4x4_pred_mode_cache
[scan8
[i
]];
671 if (transform_bypass
&& h
->ps
.sps
->profile_idc
== 244 && dir
<= 1) {
672 h
->hpc
.pred4x4_add
[dir
](ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
677 if (dir
== DIAG_DOWN_LEFT_PRED
|| dir
== VERT_LEFT_PRED
) {
678 const int topright_avail
= (sl
->topright_samples_available
<< i
) & 0x8000;
679 av_assert2(sl
->mb_y
|| linesize
<= block_offset
[i
]);
680 if (!topright_avail
) {
682 tr_high
= ((uint16_t *)ptr
)[3 - linesize
/ 2] * 0x0001000100010001ULL
;
683 topright
= (uint8_t *)&tr_high
;
685 tr
= ptr
[3 - linesize
] * 0x01010101u
;
686 topright
= (uint8_t *)&tr
;
689 topright
= ptr
+ (4 << pixel_shift
) - linesize
;
693 h
->hpc
.pred4x4
[dir
](ptr
, topright
, linesize
);
694 nnz
= sl
->non_zero_count_cache
[scan8
[i
+ p
* 16]];
696 if (nnz
== 1 && dctcoef_get(sl
->mb
, pixel_shift
, i
* 16 + p
* 256))
697 idct_dc_add(ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
699 idct_add(ptr
, sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
), linesize
);
705 h
->hpc
.pred16x16
[sl
->intra16x16_pred_mode
](dest_y
, linesize
);
706 if (sl
->non_zero_count_cache
[scan8
[LUMA_DC_BLOCK_INDEX
+ p
]]) {
707 if (!transform_bypass
)
708 h
->h264dsp
.h264_luma_dc_dequant_idct(sl
->mb
+ (p
* 256 << pixel_shift
),
710 h
->ps
.pps
->dequant4_coeff
[p
][qscale
][0]);
712 static const uint8_t dc_mapping
[16] = {
713 0 * 16, 1 * 16, 4 * 16, 5 * 16,
714 2 * 16, 3 * 16, 6 * 16, 7 * 16,
715 8 * 16, 9 * 16, 12 * 16, 13 * 16,
716 10 * 16, 11 * 16, 14 * 16, 15 * 16
718 for (i
= 0; i
< 16; i
++)
719 dctcoef_set(sl
->mb
+ (p
* 256 << pixel_shift
),
720 pixel_shift
, dc_mapping
[i
],
721 dctcoef_get(sl
->mb_luma_dc
[p
],
728 static av_always_inline
void hl_decode_mb_idct_luma(const H264Context
*h
, H264SliceContext
*sl
,
729 int mb_type
, int simple
,
730 int transform_bypass
,
732 const int *block_offset
,
734 uint8_t *dest_y
, int p
)
736 void (*idct_add
)(uint8_t *dst
, int16_t *block
, int stride
);
738 block_offset
+= 16 * p
;
739 if (!IS_INTRA4x4(mb_type
)) {
740 if (IS_INTRA16x16(mb_type
)) {
741 if (transform_bypass
) {
742 if (h
->ps
.sps
->profile_idc
== 244 &&
743 (sl
->intra16x16_pred_mode
== VERT_PRED8x8
||
744 sl
->intra16x16_pred_mode
== HOR_PRED8x8
)) {
745 h
->hpc
.pred16x16_add
[sl
->intra16x16_pred_mode
](dest_y
, block_offset
,
746 sl
->mb
+ (p
* 256 << pixel_shift
),
749 for (i
= 0; i
< 16; i
++)
750 if (sl
->non_zero_count_cache
[scan8
[i
+ p
* 16]] ||
751 dctcoef_get(sl
->mb
, pixel_shift
, i
* 16 + p
* 256))
752 h
->h264dsp
.h264_add_pixels4_clear(dest_y
+ block_offset
[i
],
753 sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
),
757 h
->h264dsp
.h264_idct_add16intra(dest_y
, block_offset
,
758 sl
->mb
+ (p
* 256 << pixel_shift
),
760 sl
->non_zero_count_cache
+ p
* 5 * 8);
762 } else if (sl
->cbp
& 15) {
763 if (transform_bypass
) {
764 const int di
= IS_8x8DCT(mb_type
) ? 4 : 1;
765 idct_add
= IS_8x8DCT(mb_type
) ? h
->h264dsp
.h264_add_pixels8_clear
766 : h
->h264dsp
.h264_add_pixels4_clear
;
767 for (i
= 0; i
< 16; i
+= di
)
768 if (sl
->non_zero_count_cache
[scan8
[i
+ p
* 16]])
769 idct_add(dest_y
+ block_offset
[i
],
770 sl
->mb
+ (i
* 16 + p
* 256 << pixel_shift
),
773 if (IS_8x8DCT(mb_type
))
774 h
->h264dsp
.h264_idct8_add4(dest_y
, block_offset
,
775 sl
->mb
+ (p
* 256 << pixel_shift
),
777 sl
->non_zero_count_cache
+ p
* 5 * 8);
779 h
->h264dsp
.h264_idct_add16(dest_y
, block_offset
,
780 sl
->mb
+ (p
* 256 << pixel_shift
),
782 sl
->non_zero_count_cache
+ p
* 5 * 8);
790 #include "h264_mb_template.c"
794 #include "h264_mb_template.c"
798 #include "h264_mb_template.c"
800 void ff_h264_hl_decode_mb(const H264Context
*h
, H264SliceContext
*sl
)
802 const int mb_xy
= sl
->mb_xy
;
803 const int mb_type
= h
->cur_pic
.mb_type
[mb_xy
];
804 int is_complex
= CONFIG_SMALL
|| sl
->is_complex
||
805 IS_INTRA_PCM(mb_type
) || sl
->qscale
== 0;
808 if (is_complex
|| h
->pixel_shift
)
809 hl_decode_mb_444_complex(h
, sl
);
811 hl_decode_mb_444_simple_8(h
, sl
);
812 } else if (is_complex
) {
813 hl_decode_mb_complex(h
, sl
);
814 } else if (h
->pixel_shift
) {
815 hl_decode_mb_simple_16(h
, sl
);
817 hl_decode_mb_simple_8(h
, sl
);