aarch64: Add assembly support for -fsanitize=hwaddress tagged globals.
[libav.git] / libavcodec / h264dec.c
blob4bfd78962d9ece474034b6749954464cb905ed99
1 /*
2 * H.26L/H.264/AVC/JVT/14496-10/... decoder
3 * Copyright (c) 2003 Michael Niedermayer <michaelni@gmx.at>
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 /**
23 * @file
24 * H.264 / AVC / MPEG-4 part10 codec.
25 * @author Michael Niedermayer <michaelni@gmx.at>
28 #include "libavutil/display.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/stereo3d.h"
32 #include "libavutil/timer.h"
33 #include "internal.h"
34 #include "bytestream.h"
35 #include "cabac.h"
36 #include "cabac_functions.h"
37 #include "error_resilience.h"
38 #include "avcodec.h"
39 #include "golomb_legacy.h"
40 #include "h264.h"
41 #include "h264dec.h"
42 #include "h2645_parse.h"
43 #include "h264data.h"
44 #include "h264chroma.h"
45 #include "h264_mvpred.h"
46 #include "h264_ps.h"
47 #include "hwaccel.h"
48 #include "mathops.h"
49 #include "me_cmp.h"
50 #include "mpegutils.h"
51 #include "profiles.h"
52 #include "rectangle.h"
53 #include "thread.h"
55 #include <assert.h>
57 const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
59 static void h264_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
60 int (*mv)[2][4][2],
61 int mb_x, int mb_y, int mb_intra, int mb_skipped)
63 H264Context *h = opaque;
64 H264SliceContext *sl = &h->slice_ctx[0];
66 sl->mb_x = mb_x;
67 sl->mb_y = mb_y;
68 sl->mb_xy = mb_x + mb_y * h->mb_stride;
69 memset(sl->non_zero_count_cache, 0, sizeof(sl->non_zero_count_cache));
70 assert(ref >= 0);
71 /* FIXME: It is possible albeit uncommon that slice references
72 * differ between slices. We take the easy approach and ignore
73 * it for now. If this turns out to have any relevance in
74 * practice then correct remapping should be added. */
75 if (ref >= sl->ref_count[0])
76 ref = 0;
77 fill_rectangle(&h->cur_pic.ref_index[0][4 * sl->mb_xy],
78 2, 2, 2, ref, 1);
79 fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
80 fill_rectangle(sl->mv_cache[0][scan8[0]], 4, 4, 8,
81 pack16to32((*mv)[0][0][0], (*mv)[0][0][1]), 4);
82 assert(!FRAME_MBAFF(h));
83 ff_h264_hl_decode_mb(h, &h->slice_ctx[0]);
86 void ff_h264_draw_horiz_band(const H264Context *h, H264SliceContext *sl,
87 int y, int height)
89 AVCodecContext *avctx = h->avctx;
90 const AVFrame *src = h->cur_pic.f;
91 const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
92 int vshift = desc->log2_chroma_h;
93 const int field_pic = h->picture_structure != PICT_FRAME;
94 if (field_pic) {
95 height <<= 1;
96 y <<= 1;
99 height = FFMIN(height, avctx->height - y);
101 if (field_pic && h->first_field && !(avctx->slice_flags & SLICE_FLAG_ALLOW_FIELD))
102 return;
104 if (avctx->draw_horiz_band) {
105 int offset[AV_NUM_DATA_POINTERS];
106 int i;
108 offset[0] = y * src->linesize[0];
109 offset[1] =
110 offset[2] = (y >> vshift) * src->linesize[1];
111 for (i = 3; i < AV_NUM_DATA_POINTERS; i++)
112 offset[i] = 0;
114 emms_c();
116 avctx->draw_horiz_band(avctx, src, offset,
117 y, h->picture_structure, height);
121 void ff_h264_free_tables(H264Context *h)
123 int i;
125 av_freep(&h->intra4x4_pred_mode);
126 av_freep(&h->chroma_pred_mode_table);
127 av_freep(&h->cbp_table);
128 av_freep(&h->mvd_table[0]);
129 av_freep(&h->mvd_table[1]);
130 av_freep(&h->direct_table);
131 av_freep(&h->non_zero_count);
132 av_freep(&h->slice_table_base);
133 h->slice_table = NULL;
134 av_freep(&h->list_counts);
136 av_freep(&h->mb2b_xy);
137 av_freep(&h->mb2br_xy);
139 av_buffer_pool_uninit(&h->qscale_table_pool);
140 av_buffer_pool_uninit(&h->mb_type_pool);
141 av_buffer_pool_uninit(&h->motion_val_pool);
142 av_buffer_pool_uninit(&h->ref_index_pool);
144 for (i = 0; i < h->nb_slice_ctx; i++) {
145 H264SliceContext *sl = &h->slice_ctx[i];
147 av_freep(&sl->dc_val_base);
148 av_freep(&sl->er.mb_index2xy);
149 av_freep(&sl->er.error_status_table);
150 av_freep(&sl->er.er_temp_buffer);
152 av_freep(&sl->bipred_scratchpad);
153 av_freep(&sl->edge_emu_buffer);
154 av_freep(&sl->top_borders[0]);
155 av_freep(&sl->top_borders[1]);
157 sl->bipred_scratchpad_allocated = 0;
158 sl->edge_emu_buffer_allocated = 0;
159 sl->top_borders_allocated[0] = 0;
160 sl->top_borders_allocated[1] = 0;
164 int ff_h264_alloc_tables(H264Context *h)
166 const int big_mb_num = h->mb_stride * (h->mb_height + 1);
167 const int row_mb_num = h->mb_stride * 2 * h->nb_slice_ctx;
168 int x, y;
170 FF_ALLOCZ_OR_GOTO(h->avctx, h->intra4x4_pred_mode,
171 row_mb_num * 8 * sizeof(uint8_t), fail)
172 h->slice_ctx[0].intra4x4_pred_mode = h->intra4x4_pred_mode;
174 FF_ALLOCZ_OR_GOTO(h->avctx, h->non_zero_count,
175 big_mb_num * 48 * sizeof(uint8_t), fail)
176 FF_ALLOCZ_OR_GOTO(h->avctx, h->slice_table_base,
177 (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base), fail)
178 FF_ALLOCZ_OR_GOTO(h->avctx, h->cbp_table,
179 big_mb_num * sizeof(uint16_t), fail)
180 FF_ALLOCZ_OR_GOTO(h->avctx, h->chroma_pred_mode_table,
181 big_mb_num * sizeof(uint8_t), fail)
182 FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[0],
183 16 * row_mb_num * sizeof(uint8_t), fail);
184 FF_ALLOCZ_OR_GOTO(h->avctx, h->mvd_table[1],
185 16 * row_mb_num * sizeof(uint8_t), fail);
186 h->slice_ctx[0].mvd_table[0] = h->mvd_table[0];
187 h->slice_ctx[0].mvd_table[1] = h->mvd_table[1];
189 FF_ALLOCZ_OR_GOTO(h->avctx, h->direct_table,
190 4 * big_mb_num * sizeof(uint8_t), fail);
191 FF_ALLOCZ_OR_GOTO(h->avctx, h->list_counts,
192 big_mb_num * sizeof(uint8_t), fail)
194 memset(h->slice_table_base, -1,
195 (big_mb_num + h->mb_stride) * sizeof(*h->slice_table_base));
196 h->slice_table = h->slice_table_base + h->mb_stride * 2 + 1;
198 FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2b_xy,
199 big_mb_num * sizeof(uint32_t), fail);
200 FF_ALLOCZ_OR_GOTO(h->avctx, h->mb2br_xy,
201 big_mb_num * sizeof(uint32_t), fail);
202 for (y = 0; y < h->mb_height; y++)
203 for (x = 0; x < h->mb_width; x++) {
204 const int mb_xy = x + y * h->mb_stride;
205 const int b_xy = 4 * x + 4 * y * h->b_stride;
207 h->mb2b_xy[mb_xy] = b_xy;
208 h->mb2br_xy[mb_xy] = 8 * (FMO ? mb_xy : (mb_xy % (2 * h->mb_stride)));
211 return 0;
213 fail:
214 ff_h264_free_tables(h);
215 return AVERROR(ENOMEM);
219 * Init context
220 * Allocate buffers which are not shared amongst multiple threads.
222 int ff_h264_slice_context_init(H264Context *h, H264SliceContext *sl)
224 ERContext *er = &sl->er;
225 int mb_array_size = h->mb_height * h->mb_stride;
226 int y_size = (2 * h->mb_width + 1) * (2 * h->mb_height + 1);
227 int c_size = h->mb_stride * (h->mb_height + 1);
228 int yc_size = y_size + 2 * c_size;
229 int x, y, i;
231 sl->ref_cache[0][scan8[5] + 1] =
232 sl->ref_cache[0][scan8[7] + 1] =
233 sl->ref_cache[0][scan8[13] + 1] =
234 sl->ref_cache[1][scan8[5] + 1] =
235 sl->ref_cache[1][scan8[7] + 1] =
236 sl->ref_cache[1][scan8[13] + 1] = PART_NOT_AVAILABLE;
238 if (CONFIG_ERROR_RESILIENCE) {
239 /* init ER */
240 er->avctx = h->avctx;
241 er->decode_mb = h264_er_decode_mb;
242 er->opaque = h;
243 er->quarter_sample = 1;
245 er->mb_num = h->mb_num;
246 er->mb_width = h->mb_width;
247 er->mb_height = h->mb_height;
248 er->mb_stride = h->mb_stride;
249 er->b8_stride = h->mb_width * 2 + 1;
251 // error resilience code looks cleaner with this
252 FF_ALLOCZ_OR_GOTO(h->avctx, er->mb_index2xy,
253 (h->mb_num + 1) * sizeof(int), fail);
255 for (y = 0; y < h->mb_height; y++)
256 for (x = 0; x < h->mb_width; x++)
257 er->mb_index2xy[x + y * h->mb_width] = x + y * h->mb_stride;
259 er->mb_index2xy[h->mb_height * h->mb_width] = (h->mb_height - 1) *
260 h->mb_stride + h->mb_width;
262 FF_ALLOCZ_OR_GOTO(h->avctx, er->error_status_table,
263 mb_array_size * sizeof(uint8_t), fail);
265 FF_ALLOC_OR_GOTO(h->avctx, er->er_temp_buffer,
266 h->mb_height * h->mb_stride, fail);
268 FF_ALLOCZ_OR_GOTO(h->avctx, sl->dc_val_base,
269 yc_size * sizeof(int16_t), fail);
270 er->dc_val[0] = sl->dc_val_base + h->mb_width * 2 + 2;
271 er->dc_val[1] = sl->dc_val_base + y_size + h->mb_stride + 1;
272 er->dc_val[2] = er->dc_val[1] + c_size;
273 for (i = 0; i < yc_size; i++)
274 sl->dc_val_base[i] = 1024;
277 return 0;
279 fail:
280 return AVERROR(ENOMEM); // ff_h264_free_tables will clean up for us
283 static int h264_init_context(AVCodecContext *avctx, H264Context *h)
285 int i;
287 h->avctx = avctx;
289 h->width_from_caller = avctx->width;
290 h->height_from_caller = avctx->height;
292 h->picture_structure = PICT_FRAME;
293 h->workaround_bugs = avctx->workaround_bugs;
294 h->flags = avctx->flags;
295 h->poc.prev_poc_msb = 1 << 16;
296 h->recovery_frame = -1;
297 h->x264_build = -1;
298 h->frame_recovered = 0;
300 h->next_outputed_poc = INT_MIN;
301 for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
302 h->last_pocs[i] = INT_MIN;
304 ff_h264_sei_uninit(&h->sei);
306 avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
308 h->nb_slice_ctx = (avctx->active_thread_type & FF_THREAD_SLICE) ? avctx->thread_count : 1;
309 h->slice_ctx = av_mallocz_array(h->nb_slice_ctx, sizeof(*h->slice_ctx));
310 if (!h->slice_ctx) {
311 h->nb_slice_ctx = 0;
312 return AVERROR(ENOMEM);
315 for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
316 h->DPB[i].f = av_frame_alloc();
317 if (!h->DPB[i].f)
318 return AVERROR(ENOMEM);
321 h->cur_pic.f = av_frame_alloc();
322 if (!h->cur_pic.f)
323 return AVERROR(ENOMEM);
325 h->output_frame = av_frame_alloc();
326 if (!h->output_frame)
327 return AVERROR(ENOMEM);
329 for (i = 0; i < h->nb_slice_ctx; i++)
330 h->slice_ctx[i].h264 = h;
332 return 0;
335 static av_cold int h264_decode_end(AVCodecContext *avctx)
337 H264Context *h = avctx->priv_data;
338 int i;
340 ff_h264_free_tables(h);
342 for (i = 0; i < H264_MAX_PICTURE_COUNT; i++) {
343 ff_h264_unref_picture(h, &h->DPB[i]);
344 av_frame_free(&h->DPB[i].f);
347 h->cur_pic_ptr = NULL;
349 av_freep(&h->slice_ctx);
350 h->nb_slice_ctx = 0;
352 for (i = 0; i < MAX_SPS_COUNT; i++)
353 av_buffer_unref(&h->ps.sps_list[i]);
355 for (i = 0; i < MAX_PPS_COUNT; i++)
356 av_buffer_unref(&h->ps.pps_list[i]);
358 ff_h2645_packet_uninit(&h->pkt);
360 ff_h264_unref_picture(h, &h->cur_pic);
361 av_frame_free(&h->cur_pic.f);
362 av_frame_free(&h->output_frame);
364 return 0;
367 static AVOnce h264_vlc_init = AV_ONCE_INIT;
369 static av_cold int h264_decode_init(AVCodecContext *avctx)
371 H264Context *h = avctx->priv_data;
372 int ret;
374 ret = h264_init_context(avctx, h);
375 if (ret < 0)
376 return ret;
378 ret = ff_thread_once(&h264_vlc_init, ff_h264_decode_init_vlc);
379 if (ret != 0) {
380 av_log(avctx, AV_LOG_ERROR, "pthread_once has failed.");
381 return AVERROR_UNKNOWN;
384 if (avctx->ticks_per_frame == 1)
385 h->avctx->framerate.num *= 2;
386 avctx->ticks_per_frame = 2;
388 if (avctx->extradata_size > 0 && avctx->extradata) {
389 ret = ff_h264_decode_extradata(avctx->extradata, avctx->extradata_size,
390 &h->ps, &h->is_avc, &h->nal_length_size,
391 avctx->err_recognition, avctx);
392 if (ret < 0) {
393 h264_decode_end(avctx);
394 return ret;
398 if (h->ps.sps && h->ps.sps->bitstream_restriction_flag &&
399 h->avctx->has_b_frames < h->ps.sps->num_reorder_frames) {
400 h->avctx->has_b_frames = h->ps.sps->num_reorder_frames;
403 avctx->internal->allocate_progress = 1;
405 if (h->enable_er) {
406 av_log(avctx, AV_LOG_WARNING,
407 "Error resilience is enabled. It is unsafe and unsupported and may crash. "
408 "Use it at your own risk\n");
411 return 0;
414 static int decode_init_thread_copy(AVCodecContext *avctx)
416 H264Context *h = avctx->priv_data;
417 int ret;
419 if (!avctx->internal->is_copy)
420 return 0;
422 memset(h, 0, sizeof(*h));
424 ret = h264_init_context(avctx, h);
425 if (ret < 0)
426 return ret;
428 h->context_initialized = 0;
430 return 0;
434 * instantaneous decoder refresh.
436 static void idr(H264Context *h)
438 ff_h264_remove_all_refs(h);
439 h->poc.prev_frame_num =
440 h->poc.prev_frame_num_offset =
441 h->poc.prev_poc_msb =
442 h->poc.prev_poc_lsb = 0;
445 /* forget old pics after a seek */
446 void ff_h264_flush_change(H264Context *h)
448 int i;
449 for (i = 0; i < MAX_DELAYED_PIC_COUNT; i++)
450 h->last_pocs[i] = INT_MIN;
451 h->next_outputed_poc = INT_MIN;
452 h->prev_interlaced_frame = 1;
453 idr(h);
454 if (h->cur_pic_ptr)
455 h->cur_pic_ptr->reference = 0;
456 h->first_field = 0;
457 h->recovery_frame = -1;
458 h->frame_recovered = 0;
461 /* forget old pics after a seek */
462 static void flush_dpb(AVCodecContext *avctx)
464 H264Context *h = avctx->priv_data;
465 int i;
467 memset(h->delayed_pic, 0, sizeof(h->delayed_pic));
469 ff_h264_flush_change(h);
470 ff_h264_sei_uninit(&h->sei);
472 for (i = 0; i < H264_MAX_PICTURE_COUNT; i++)
473 ff_h264_unref_picture(h, &h->DPB[i]);
474 h->cur_pic_ptr = NULL;
475 ff_h264_unref_picture(h, &h->cur_pic);
477 h->mb_y = 0;
479 ff_h264_free_tables(h);
480 h->context_initialized = 0;
483 static int get_last_needed_nal(H264Context *h)
485 int nals_needed = 0;
486 int i, ret;
488 for (i = 0; i < h->pkt.nb_nals; i++) {
489 H2645NAL *nal = &h->pkt.nals[i];
490 GetBitContext gb;
492 /* packets can sometimes contain multiple PPS/SPS,
493 * e.g. two PAFF field pictures in one packet, or a demuxer
494 * which splits NALs strangely if so, when frame threading we
495 * can't start the next thread until we've read all of them */
496 switch (nal->type) {
497 case H264_NAL_SPS:
498 case H264_NAL_PPS:
499 nals_needed = i;
500 break;
501 case H264_NAL_DPA:
502 case H264_NAL_IDR_SLICE:
503 case H264_NAL_SLICE:
504 ret = init_get_bits8(&gb, nal->data + 1, nal->size - 1);
505 if (ret < 0) {
506 av_log(h->avctx, AV_LOG_ERROR, "Invalid zero-sized VCL NAL unit\n");
507 if (h->avctx->err_recognition & AV_EF_EXPLODE)
508 return ret;
510 break;
512 if (!get_ue_golomb(&gb))
513 nals_needed = i;
517 return nals_needed;
520 static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
522 AVCodecContext *const avctx = h->avctx;
523 int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
524 int i, ret = 0;
526 if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
527 h->current_slice = 0;
528 h->field_started = 0;
529 if (!h->first_field)
530 h->cur_pic_ptr = NULL;
531 ff_h264_sei_uninit(&h->sei);
534 ret = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, h->is_avc,
535 h->nal_length_size, avctx->codec_id);
536 if (ret < 0) {
537 av_log(avctx, AV_LOG_ERROR,
538 "Error splitting the input into NAL units.\n");
540 /* There are samples in the wild with mp4-style extradata, but Annex B
541 * data in the packets. If we fail parsing the packet as mp4, try it again
542 * as Annex B. */
543 if (h->is_avc && !(avctx->err_recognition & AV_EF_EXPLODE)) {
544 int err = ff_h2645_packet_split(&h->pkt, buf, buf_size, avctx, 0, 0,
545 avctx->codec_id);
546 if (err >= 0) {
547 av_log(avctx, AV_LOG_WARNING,
548 "The stream seems to contain AVCC extradata with Annex B "
549 "formatted data, which is invalid.");
550 h->is_avc = 0;
551 ret = 0;
555 if (ret < 0)
556 return ret;
559 if (avctx->active_thread_type & FF_THREAD_FRAME)
560 nals_needed = get_last_needed_nal(h);
562 for (i = 0; i < h->pkt.nb_nals; i++) {
563 H2645NAL *nal = &h->pkt.nals[i];
564 int max_slice_ctx, err;
566 if (avctx->skip_frame >= AVDISCARD_NONREF &&
567 nal->ref_idc == 0 && nal->type != H264_NAL_SEI)
568 continue;
570 // FIXME these should stop being context-global variables
571 h->nal_ref_idc = nal->ref_idc;
572 h->nal_unit_type = nal->type;
574 err = 0;
575 switch (nal->type) {
576 case H264_NAL_IDR_SLICE:
577 idr(h); // FIXME ensure we don't lose some frames if there is reordering
578 case H264_NAL_SLICE:
579 if ((err = ff_h264_queue_decode_slice(h, nal)))
580 break;
582 if (avctx->active_thread_type & FF_THREAD_FRAME &&
583 i >= nals_needed && !h->setup_finished && h->cur_pic_ptr) {
584 ff_thread_finish_setup(avctx);
585 h->setup_finished = 1;
588 max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
589 if (h->nb_slice_ctx_queued == max_slice_ctx) {
590 if (avctx->hwaccel) {
591 ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
592 h->nb_slice_ctx_queued = 0;
593 } else
594 ret = ff_h264_execute_decode_slices(h);
595 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
596 goto end;
598 break;
599 case H264_NAL_DPA:
600 case H264_NAL_DPB:
601 case H264_NAL_DPC:
602 avpriv_request_sample(avctx, "data partitioning");
603 ret = AVERROR(ENOSYS);
604 goto end;
605 break;
606 case H264_NAL_SEI:
607 ret = ff_h264_sei_decode(&h->sei, &nal->gb, &h->ps, avctx);
608 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
609 goto end;
610 break;
611 case H264_NAL_SPS:
612 ret = ff_h264_decode_seq_parameter_set(&nal->gb, avctx, &h->ps);
613 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
614 goto end;
615 break;
616 case H264_NAL_PPS:
617 ret = ff_h264_decode_picture_parameter_set(&nal->gb, avctx, &h->ps,
618 nal->size_bits);
619 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
620 goto end;
621 break;
622 case H264_NAL_AUD:
623 case H264_NAL_END_SEQUENCE:
624 case H264_NAL_END_STREAM:
625 case H264_NAL_FILLER_DATA:
626 case H264_NAL_SPS_EXT:
627 case H264_NAL_AUXILIARY_SLICE:
628 break;
629 default:
630 av_log(avctx, AV_LOG_DEBUG, "Unknown NAL code: %d (%d bits)\n",
631 nal->type, nal->size_bits);
634 if (err < 0) {
635 av_log(h->avctx, AV_LOG_ERROR, "decode_slice_header error\n");
639 ret = ff_h264_execute_decode_slices(h);
640 if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
641 goto end;
643 ret = 0;
644 end:
645 /* clean up */
646 if (h->cur_pic_ptr && !h->droppable) {
647 ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
648 h->picture_structure == PICT_BOTTOM_FIELD);
651 return (ret < 0) ? ret : buf_size;
655 * Return the number of bytes consumed for building the current frame.
657 static int get_consumed_bytes(int pos, int buf_size)
659 if (pos == 0)
660 pos = 1; // avoid infinite loops (I doubt that is needed but...)
661 if (pos + 10 > buf_size)
662 pos = buf_size; // oops ;)
664 return pos;
667 static int h264_decode_frame(AVCodecContext *avctx, void *data,
668 int *got_frame, AVPacket *avpkt)
670 const uint8_t *buf = avpkt->data;
671 int buf_size = avpkt->size;
672 H264Context *h = avctx->priv_data;
673 AVFrame *pict = data;
674 int buf_index = 0;
675 int ret;
676 const uint8_t *new_extradata;
677 int new_extradata_size;
679 h->flags = avctx->flags;
680 h->setup_finished = 0;
681 h->nb_slice_ctx_queued = 0;
683 /* end of stream, output what is still in the buffers */
684 out:
685 if (buf_size == 0) {
686 H264Picture *out;
687 int i, out_idx;
689 h->cur_pic_ptr = NULL;
691 // FIXME factorize this with the output code below
692 out = h->delayed_pic[0];
693 out_idx = 0;
694 for (i = 1;
695 h->delayed_pic[i] &&
696 !h->delayed_pic[i]->f->key_frame &&
697 !h->delayed_pic[i]->mmco_reset;
698 i++)
699 if (h->delayed_pic[i]->poc < out->poc) {
700 out = h->delayed_pic[i];
701 out_idx = i;
704 for (i = out_idx; h->delayed_pic[i]; i++)
705 h->delayed_pic[i] = h->delayed_pic[i + 1];
707 if (out) {
708 ret = av_frame_ref(pict, out->f);
709 if (ret < 0)
710 return ret;
711 *got_frame = 1;
714 return buf_index;
717 new_extradata_size = 0;
718 new_extradata = av_packet_get_side_data(avpkt, AV_PKT_DATA_NEW_EXTRADATA,
719 &new_extradata_size);
720 if (new_extradata_size > 0 && new_extradata) {
721 ret = ff_h264_decode_extradata(new_extradata, new_extradata_size,
722 &h->ps, &h->is_avc, &h->nal_length_size,
723 avctx->err_recognition, avctx);
724 if (ret < 0)
725 return ret;
728 buf_index = decode_nal_units(h, buf, buf_size);
729 if (buf_index < 0)
730 return AVERROR_INVALIDDATA;
732 if (!h->cur_pic_ptr && h->nal_unit_type == H264_NAL_END_SEQUENCE) {
733 buf_size = 0;
734 goto out;
737 if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
738 if (avctx->skip_frame >= AVDISCARD_NONREF)
739 return 0;
740 av_log(avctx, AV_LOG_ERROR, "no frame!\n");
741 return AVERROR_INVALIDDATA;
744 if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS) ||
745 (h->mb_y >= h->mb_height && h->mb_height)) {
746 if (h->field_started)
747 ff_h264_field_end(h, &h->slice_ctx[0], 0);
749 *got_frame = 0;
750 if (h->output_frame->buf[0]) {
751 ret = av_frame_ref(pict, h->output_frame);
752 av_frame_unref(h->output_frame);
753 if (ret < 0)
754 return ret;
755 *got_frame = 1;
759 assert(pict->buf[0] || !*got_frame);
761 return get_consumed_bytes(buf_index, buf_size);
764 #define OFFSET(x) offsetof(H264Context, x)
765 #define VD AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_DECODING_PARAM
766 static const AVOption h264_options[] = {
767 { "enable_er", "Enable error resilience on damaged frames (unsafe)", OFFSET(enable_er), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, VD },
768 { NULL },
771 static const AVClass h264_class = {
772 .class_name = "h264",
773 .item_name = av_default_item_name,
774 .option = h264_options,
775 .version = LIBAVUTIL_VERSION_INT,
778 AVCodec ff_h264_decoder = {
779 .name = "h264",
780 .long_name = NULL_IF_CONFIG_SMALL("H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10"),
781 .type = AVMEDIA_TYPE_VIDEO,
782 .id = AV_CODEC_ID_H264,
783 .priv_data_size = sizeof(H264Context),
784 .init = h264_decode_init,
785 .close = h264_decode_end,
786 .decode = h264_decode_frame,
787 .capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
788 AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
789 AV_CODEC_CAP_FRAME_THREADS,
790 .hw_configs = (const AVCodecHWConfigInternal*[]) {
791 #if CONFIG_H264_CUVID_HWACCEL
792 HWACCEL_CUVID(h264),
793 #endif
794 #if CONFIG_H264_DXVA2_HWACCEL
795 HWACCEL_DXVA2(h264),
796 #endif
797 #if CONFIG_H264_D3D11VA_HWACCEL
798 HWACCEL_D3D11VA(h264),
799 #endif
800 #if CONFIG_H264_D3D11VA2_HWACCEL
801 HWACCEL_D3D11VA2(h264),
802 #endif
803 #if CONFIG_H264_VAAPI_HWACCEL
804 HWACCEL_VAAPI(h264),
805 #endif
806 #if CONFIG_H264_VDPAU_HWACCEL
807 HWACCEL_VDPAU(h264),
808 #endif
809 #if CONFIG_H264_VDA_HWACCEL
810 HW_CONFIG_HWACCEL(0, 0, 1, VDA, NONE, ff_h264_vda_hwaccel),
811 #endif
812 #if CONFIG_H264_VDA_OLD_HWACCEL
813 HW_CONFIG_HWACCEL(0, 0, 1, VDA_VLD, NONE, ff_h264_vda_old_hwaccel),
814 #endif
815 NULL
817 .caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING,
818 .flush = flush_dpb,
819 .init_thread_copy = ONLY_IF_THREADS_ENABLED(decode_init_thread_copy),
820 .update_thread_context = ONLY_IF_THREADS_ENABLED(ff_h264_update_thread_context),
821 .profiles = NULL_IF_CONFIG_SMALL(ff_h264_profiles),
822 .priv_class = &h264_class,