2 * Video Decode and Presentation API for UNIX (VDPAU) is used for
3 * HW decode acceleration for MPEG-1/2, MPEG-4 ASP, H.264 and VC-1.
5 * Copyright (c) 2008 NVIDIA
7 * This file is part of Libav.
9 * Libav is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU Lesser General Public
11 * License as published by the Free Software Foundation; either
12 * version 2.1 of the License, or (at your option) any later version.
14 * Libav is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * Lesser General Public License for more details.
19 * You should have received a copy of the GNU Lesser General Public
20 * License along with Libav; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
33 #include "vdpau_internal.h"
36 * @addtogroup VDPAU_Decoding
41 int ff_vdpau_common_start_frame(AVCodecContext
*avctx
,
42 av_unused
const uint8_t *buffer
,
43 av_unused
uint32_t size
)
45 AVVDPAUContext
*hwctx
= avctx
->hwaccel_context
;
47 hwctx
->bitstream_buffers_used
= 0;
51 int ff_vdpau_mpeg_end_frame(AVCodecContext
*avctx
)
53 AVVDPAUContext
*hwctx
= avctx
->hwaccel_context
;
54 MpegEncContext
*s
= avctx
->priv_data
;
55 VdpVideoSurface surf
= ff_vdpau_get_surface_id(s
->current_picture_ptr
);
57 hwctx
->render(hwctx
->decoder
, surf
, (void *)&hwctx
->info
,
58 hwctx
->bitstream_buffers_used
, hwctx
->bitstream_buffers
);
60 ff_mpeg_draw_horiz_band(s
, 0, s
->avctx
->height
);
61 hwctx
->bitstream_buffers_used
= 0;
66 int ff_vdpau_add_buffer(AVCodecContext
*avctx
,
67 const uint8_t *buf
, uint32_t size
)
69 AVVDPAUContext
*hwctx
= avctx
->hwaccel_context
;
70 VdpBitstreamBuffer
*buffers
= hwctx
->bitstream_buffers
;
72 buffers
= av_fast_realloc(buffers
, &hwctx
->bitstream_buffers_allocated
,
73 (hwctx
->bitstream_buffers_used
+ 1) * sizeof(*buffers
));
75 return AVERROR(ENOMEM
);
77 hwctx
->bitstream_buffers
= buffers
;
78 buffers
+= hwctx
->bitstream_buffers_used
++;
80 buffers
->struct_version
= VDP_BITSTREAM_BUFFER_VERSION
;
81 buffers
->bitstream
= buf
;
82 buffers
->bitstream_bytes
= size
;
86 /* Obsolete non-hwaccel VDPAU support below... */
88 void ff_vdpau_h264_set_reference_frames(H264Context
*h
)
90 struct vdpau_render_state
*render
, *render_ref
;
91 VdpReferenceFrameH264
*rf
, *rf2
;
93 int i
, list
, pic_frame_idx
;
95 render
= (struct vdpau_render_state
*)h
->cur_pic_ptr
->f
.data
[0];
98 rf
= &render
->info
.h264
.referenceFrames
[0];
99 #define H264_RF_COUNT FF_ARRAY_ELEMS(render->info.h264.referenceFrames)
101 for (list
= 0; list
< 2; ++list
) {
102 Picture
**lp
= list
? h
->long_ref
: h
->short_ref
;
103 int ls
= list
? 16 : h
->short_ref_count
;
105 for (i
= 0; i
< ls
; ++i
) {
107 if (!pic
|| !pic
->reference
)
109 pic_frame_idx
= pic
->long_ref
? pic
->pic_id
: pic
->frame_num
;
111 render_ref
= (struct vdpau_render_state
*)pic
->f
.data
[0];
114 rf2
= &render
->info
.h264
.referenceFrames
[0];
117 (rf2
->surface
== render_ref
->surface
)
118 && (rf2
->is_long_term
== pic
->long_ref
)
119 && (rf2
->frame_idx
== pic_frame_idx
)
125 rf2
->top_is_reference
|= (pic
->reference
& PICT_TOP_FIELD
) ? VDP_TRUE
: VDP_FALSE
;
126 rf2
->bottom_is_reference
|= (pic
->reference
& PICT_BOTTOM_FIELD
) ? VDP_TRUE
: VDP_FALSE
;
130 if (rf
>= &render
->info
.h264
.referenceFrames
[H264_RF_COUNT
])
133 rf
->surface
= render_ref
->surface
;
134 rf
->is_long_term
= pic
->long_ref
;
135 rf
->top_is_reference
= (pic
->reference
& PICT_TOP_FIELD
) ? VDP_TRUE
: VDP_FALSE
;
136 rf
->bottom_is_reference
= (pic
->reference
& PICT_BOTTOM_FIELD
) ? VDP_TRUE
: VDP_FALSE
;
137 rf
->field_order_cnt
[0] = pic
->field_poc
[0];
138 rf
->field_order_cnt
[1] = pic
->field_poc
[1];
139 rf
->frame_idx
= pic_frame_idx
;
145 for (; rf
< &render
->info
.h264
.referenceFrames
[H264_RF_COUNT
]; ++rf
) {
146 rf
->surface
= VDP_INVALID_HANDLE
;
147 rf
->is_long_term
= 0;
148 rf
->top_is_reference
= 0;
149 rf
->bottom_is_reference
= 0;
150 rf
->field_order_cnt
[0] = 0;
151 rf
->field_order_cnt
[1] = 0;
156 void ff_vdpau_add_data_chunk(uint8_t *data
, const uint8_t *buf
, int buf_size
)
158 struct vdpau_render_state
*render
= (struct vdpau_render_state
*)data
;
161 render
->bitstream_buffers
= av_fast_realloc(
162 render
->bitstream_buffers
,
163 &render
->bitstream_buffers_allocated
,
164 sizeof(*render
->bitstream_buffers
)*(render
->bitstream_buffers_used
+ 1)
167 render
->bitstream_buffers
[render
->bitstream_buffers_used
].struct_version
= VDP_BITSTREAM_BUFFER_VERSION
;
168 render
->bitstream_buffers
[render
->bitstream_buffers_used
].bitstream
= buf
;
169 render
->bitstream_buffers
[render
->bitstream_buffers_used
].bitstream_bytes
= buf_size
;
170 render
->bitstream_buffers_used
++;
173 void ff_vdpau_h264_picture_start(H264Context
*h
)
175 struct vdpau_render_state
*render
;
178 render
= (struct vdpau_render_state
*)h
->cur_pic_ptr
->f
.data
[0];
181 for (i
= 0; i
< 2; ++i
) {
182 int foc
= h
->cur_pic_ptr
->field_poc
[i
];
185 render
->info
.h264
.field_order_cnt
[i
] = foc
;
188 render
->info
.h264
.frame_num
= h
->frame_num
;
191 void ff_vdpau_h264_picture_complete(H264Context
*h
)
193 struct vdpau_render_state
*render
;
195 render
= (struct vdpau_render_state
*)h
->cur_pic_ptr
->f
.data
[0];
198 render
->info
.h264
.slice_count
= h
->slice_num
;
199 if (render
->info
.h264
.slice_count
< 1)
202 render
->info
.h264
.is_reference
= (h
->cur_pic_ptr
->reference
& 3) ? VDP_TRUE
: VDP_FALSE
;
203 render
->info
.h264
.field_pic_flag
= h
->picture_structure
!= PICT_FRAME
;
204 render
->info
.h264
.bottom_field_flag
= h
->picture_structure
== PICT_BOTTOM_FIELD
;
205 render
->info
.h264
.num_ref_frames
= h
->sps
.ref_frame_count
;
206 render
->info
.h264
.mb_adaptive_frame_field_flag
= h
->sps
.mb_aff
&& !render
->info
.h264
.field_pic_flag
;
207 render
->info
.h264
.constrained_intra_pred_flag
= h
->pps
.constrained_intra_pred
;
208 render
->info
.h264
.weighted_pred_flag
= h
->pps
.weighted_pred
;
209 render
->info
.h264
.weighted_bipred_idc
= h
->pps
.weighted_bipred_idc
;
210 render
->info
.h264
.frame_mbs_only_flag
= h
->sps
.frame_mbs_only_flag
;
211 render
->info
.h264
.transform_8x8_mode_flag
= h
->pps
.transform_8x8_mode
;
212 render
->info
.h264
.chroma_qp_index_offset
= h
->pps
.chroma_qp_index_offset
[0];
213 render
->info
.h264
.second_chroma_qp_index_offset
= h
->pps
.chroma_qp_index_offset
[1];
214 render
->info
.h264
.pic_init_qp_minus26
= h
->pps
.init_qp
- 26;
215 render
->info
.h264
.num_ref_idx_l0_active_minus1
= h
->pps
.ref_count
[0] - 1;
216 render
->info
.h264
.num_ref_idx_l1_active_minus1
= h
->pps
.ref_count
[1] - 1;
217 render
->info
.h264
.log2_max_frame_num_minus4
= h
->sps
.log2_max_frame_num
- 4;
218 render
->info
.h264
.pic_order_cnt_type
= h
->sps
.poc_type
;
219 render
->info
.h264
.log2_max_pic_order_cnt_lsb_minus4
= h
->sps
.poc_type
? 0 : h
->sps
.log2_max_poc_lsb
- 4;
220 render
->info
.h264
.delta_pic_order_always_zero_flag
= h
->sps
.delta_pic_order_always_zero_flag
;
221 render
->info
.h264
.direct_8x8_inference_flag
= h
->sps
.direct_8x8_inference_flag
;
222 render
->info
.h264
.entropy_coding_mode_flag
= h
->pps
.cabac
;
223 render
->info
.h264
.pic_order_present_flag
= h
->pps
.pic_order_present
;
224 render
->info
.h264
.deblocking_filter_control_present_flag
= h
->pps
.deblocking_filter_parameters_present
;
225 render
->info
.h264
.redundant_pic_cnt_present_flag
= h
->pps
.redundant_pic_cnt_present
;
226 memcpy(render
->info
.h264
.scaling_lists_4x4
, h
->pps
.scaling_matrix4
, sizeof(render
->info
.h264
.scaling_lists_4x4
));
227 memcpy(render
->info
.h264
.scaling_lists_8x8
[0], h
->pps
.scaling_matrix8
[0], sizeof(render
->info
.h264
.scaling_lists_8x8
[0]));
228 memcpy(render
->info
.h264
.scaling_lists_8x8
[1], h
->pps
.scaling_matrix8
[3], sizeof(render
->info
.h264
.scaling_lists_8x8
[0]));
230 ff_h264_draw_horiz_band(h
, 0, h
->avctx
->height
);
231 render
->bitstream_buffers_used
= 0;
234 void ff_vdpau_mpeg_picture_complete(MpegEncContext
*s
, const uint8_t *buf
,
235 int buf_size
, int slice_count
)
237 struct vdpau_render_state
*render
, *last
, *next
;
240 if (!s
->current_picture_ptr
) return;
242 render
= (struct vdpau_render_state
*)s
->current_picture_ptr
->f
.data
[0];
245 /* fill VdpPictureInfoMPEG1Or2 struct */
246 render
->info
.mpeg
.picture_structure
= s
->picture_structure
;
247 render
->info
.mpeg
.picture_coding_type
= s
->pict_type
;
248 render
->info
.mpeg
.intra_dc_precision
= s
->intra_dc_precision
;
249 render
->info
.mpeg
.frame_pred_frame_dct
= s
->frame_pred_frame_dct
;
250 render
->info
.mpeg
.concealment_motion_vectors
= s
->concealment_motion_vectors
;
251 render
->info
.mpeg
.intra_vlc_format
= s
->intra_vlc_format
;
252 render
->info
.mpeg
.alternate_scan
= s
->alternate_scan
;
253 render
->info
.mpeg
.q_scale_type
= s
->q_scale_type
;
254 render
->info
.mpeg
.top_field_first
= s
->top_field_first
;
255 render
->info
.mpeg
.full_pel_forward_vector
= s
->full_pel
[0]; // MPEG-1 only. Set 0 for MPEG-2
256 render
->info
.mpeg
.full_pel_backward_vector
= s
->full_pel
[1]; // MPEG-1 only. Set 0 for MPEG-2
257 render
->info
.mpeg
.f_code
[0][0] = s
->mpeg_f_code
[0][0]; // For MPEG-1 fill both horiz. & vert.
258 render
->info
.mpeg
.f_code
[0][1] = s
->mpeg_f_code
[0][1];
259 render
->info
.mpeg
.f_code
[1][0] = s
->mpeg_f_code
[1][0];
260 render
->info
.mpeg
.f_code
[1][1] = s
->mpeg_f_code
[1][1];
261 for (i
= 0; i
< 64; ++i
) {
262 render
->info
.mpeg
.intra_quantizer_matrix
[i
] = s
->intra_matrix
[i
];
263 render
->info
.mpeg
.non_intra_quantizer_matrix
[i
] = s
->inter_matrix
[i
];
266 render
->info
.mpeg
.forward_reference
= VDP_INVALID_HANDLE
;
267 render
->info
.mpeg
.backward_reference
= VDP_INVALID_HANDLE
;
269 switch(s
->pict_type
){
270 case AV_PICTURE_TYPE_B
:
271 next
= (struct vdpau_render_state
*)s
->next_picture
.f
.data
[0];
273 render
->info
.mpeg
.backward_reference
= next
->surface
;
274 // no return here, going to set forward prediction
275 case AV_PICTURE_TYPE_P
:
276 last
= (struct vdpau_render_state
*)s
->last_picture
.f
.data
[0];
277 if (!last
) // FIXME: Does this test make sense?
278 last
= render
; // predict second field from the first
279 render
->info
.mpeg
.forward_reference
= last
->surface
;
282 ff_vdpau_add_data_chunk(s
->current_picture_ptr
->f
.data
[0], buf
, buf_size
);
284 render
->info
.mpeg
.slice_count
= slice_count
;
287 ff_mpeg_draw_horiz_band(s
, 0, s
->avctx
->height
);
288 render
->bitstream_buffers_used
= 0;
291 void ff_vdpau_vc1_decode_picture(MpegEncContext
*s
, const uint8_t *buf
,
294 VC1Context
*v
= s
->avctx
->priv_data
;
295 struct vdpau_render_state
*render
, *last
, *next
;
297 render
= (struct vdpau_render_state
*)s
->current_picture
.f
.data
[0];
300 /* fill LvPictureInfoVC1 struct */
301 render
->info
.vc1
.frame_coding_mode
= v
->fcm
;
302 render
->info
.vc1
.postprocflag
= v
->postprocflag
;
303 render
->info
.vc1
.pulldown
= v
->broadcast
;
304 render
->info
.vc1
.interlace
= v
->interlace
;
305 render
->info
.vc1
.tfcntrflag
= v
->tfcntrflag
;
306 render
->info
.vc1
.finterpflag
= v
->finterpflag
;
307 render
->info
.vc1
.psf
= v
->psf
;
308 render
->info
.vc1
.dquant
= v
->dquant
;
309 render
->info
.vc1
.panscan_flag
= v
->panscanflag
;
310 render
->info
.vc1
.refdist_flag
= v
->refdist_flag
;
311 render
->info
.vc1
.quantizer
= v
->quantizer_mode
;
312 render
->info
.vc1
.extended_mv
= v
->extended_mv
;
313 render
->info
.vc1
.extended_dmv
= v
->extended_dmv
;
314 render
->info
.vc1
.overlap
= v
->overlap
;
315 render
->info
.vc1
.vstransform
= v
->vstransform
;
316 render
->info
.vc1
.loopfilter
= v
->s
.loop_filter
;
317 render
->info
.vc1
.fastuvmc
= v
->fastuvmc
;
318 render
->info
.vc1
.range_mapy_flag
= v
->range_mapy_flag
;
319 render
->info
.vc1
.range_mapy
= v
->range_mapy
;
320 render
->info
.vc1
.range_mapuv_flag
= v
->range_mapuv_flag
;
321 render
->info
.vc1
.range_mapuv
= v
->range_mapuv
;
322 /* Specific to simple/main profile only */
323 render
->info
.vc1
.multires
= v
->multires
;
324 render
->info
.vc1
.syncmarker
= v
->s
.resync_marker
;
325 render
->info
.vc1
.rangered
= v
->rangered
| (v
->rangeredfrm
<< 1);
326 render
->info
.vc1
.maxbframes
= v
->s
.max_b_frames
;
328 render
->info
.vc1
.deblockEnable
= v
->postprocflag
& 1;
329 render
->info
.vc1
.pquant
= v
->pq
;
331 render
->info
.vc1
.forward_reference
= VDP_INVALID_HANDLE
;
332 render
->info
.vc1
.backward_reference
= VDP_INVALID_HANDLE
;
335 render
->info
.vc1
.picture_type
= 4;
337 render
->info
.vc1
.picture_type
= s
->pict_type
- 1 + s
->pict_type
/ 3;
339 switch(s
->pict_type
){
340 case AV_PICTURE_TYPE_B
:
341 next
= (struct vdpau_render_state
*)s
->next_picture
.f
.data
[0];
343 render
->info
.vc1
.backward_reference
= next
->surface
;
344 // no break here, going to set forward prediction
345 case AV_PICTURE_TYPE_P
:
346 last
= (struct vdpau_render_state
*)s
->last_picture
.f
.data
[0];
347 if (!last
) // FIXME: Does this test make sense?
348 last
= render
; // predict second field from the first
349 render
->info
.vc1
.forward_reference
= last
->surface
;
352 ff_vdpau_add_data_chunk(s
->current_picture_ptr
->f
.data
[0], buf
, buf_size
);
354 render
->info
.vc1
.slice_count
= 1;
356 ff_mpeg_draw_horiz_band(s
, 0, s
->avctx
->height
);
357 render
->bitstream_buffers_used
= 0;
360 void ff_vdpau_mpeg4_decode_picture(MpegEncContext
*s
, const uint8_t *buf
,
363 struct vdpau_render_state
*render
, *last
, *next
;
366 if (!s
->current_picture_ptr
) return;
368 render
= (struct vdpau_render_state
*)s
->current_picture_ptr
->f
.data
[0];
371 /* fill VdpPictureInfoMPEG4Part2 struct */
372 render
->info
.mpeg4
.trd
[0] = s
->pp_time
;
373 render
->info
.mpeg4
.trb
[0] = s
->pb_time
;
374 render
->info
.mpeg4
.trd
[1] = s
->pp_field_time
>> 1;
375 render
->info
.mpeg4
.trb
[1] = s
->pb_field_time
>> 1;
376 render
->info
.mpeg4
.vop_time_increment_resolution
= s
->avctx
->time_base
.den
;
377 render
->info
.mpeg4
.vop_coding_type
= 0;
378 render
->info
.mpeg4
.vop_fcode_forward
= s
->f_code
;
379 render
->info
.mpeg4
.vop_fcode_backward
= s
->b_code
;
380 render
->info
.mpeg4
.resync_marker_disable
= !s
->resync_marker
;
381 render
->info
.mpeg4
.interlaced
= !s
->progressive_sequence
;
382 render
->info
.mpeg4
.quant_type
= s
->mpeg_quant
;
383 render
->info
.mpeg4
.quarter_sample
= s
->quarter_sample
;
384 render
->info
.mpeg4
.short_video_header
= s
->avctx
->codec
->id
== AV_CODEC_ID_H263
;
385 render
->info
.mpeg4
.rounding_control
= s
->no_rounding
;
386 render
->info
.mpeg4
.alternate_vertical_scan_flag
= s
->alternate_scan
;
387 render
->info
.mpeg4
.top_field_first
= s
->top_field_first
;
388 for (i
= 0; i
< 64; ++i
) {
389 render
->info
.mpeg4
.intra_quantizer_matrix
[i
] = s
->intra_matrix
[i
];
390 render
->info
.mpeg4
.non_intra_quantizer_matrix
[i
] = s
->inter_matrix
[i
];
392 render
->info
.mpeg4
.forward_reference
= VDP_INVALID_HANDLE
;
393 render
->info
.mpeg4
.backward_reference
= VDP_INVALID_HANDLE
;
395 switch (s
->pict_type
) {
396 case AV_PICTURE_TYPE_B
:
397 next
= (struct vdpau_render_state
*)s
->next_picture
.f
.data
[0];
399 render
->info
.mpeg4
.backward_reference
= next
->surface
;
400 render
->info
.mpeg4
.vop_coding_type
= 2;
401 // no break here, going to set forward prediction
402 case AV_PICTURE_TYPE_P
:
403 last
= (struct vdpau_render_state
*)s
->last_picture
.f
.data
[0];
405 render
->info
.mpeg4
.forward_reference
= last
->surface
;
408 ff_vdpau_add_data_chunk(s
->current_picture_ptr
->f
.data
[0], buf
, buf_size
);
410 ff_mpeg_draw_horiz_band(s
, 0, s
->avctx
->height
);
411 render
->bitstream_buffers_used
= 0;