2 * XVideo Motion Compensation
3 * Copyright (c) 2003 Ivan Kalvachev
5 * This file is part of Libav.
7 * Libav is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * Libav is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with Libav; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23 #include <X11/extensions/XvMC.h>
26 #include "mpegvideo.h"
32 #include "xvmc_internal.h"
35 * Initialize the block field of the MpegEncContext pointer passed as
36 * parameter after making sure that the data is not corrupted.
37 * In order to implement something like direct rendering instead of decoding
38 * coefficients in s->blocks and then copying them, copy them directly
39 * into the data_blocks array provided by xvmc.
41 void ff_xvmc_init_block(MpegEncContext
*s
)
43 struct xvmc_pix_fmt
*render
= (struct xvmc_pix_fmt
*)s
->current_picture
.f
.data
[2];
44 assert(render
&& render
->xvmc_id
== AV_XVMC_ID
);
46 s
->block
= (int16_t (*)[64])(render
->data_blocks
+ render
->next_free_data_block_num
* 64);
50 * Fill individual block pointers, so there are no gaps in the data_block array
51 * in case not all blocks in the macroblock are coded.
53 void ff_xvmc_pack_pblocks(MpegEncContext
*s
, int cbp
)
56 const int mb_block_count
= 4 + (1 << s
->chroma_format
);
58 cbp
<<= 12-mb_block_count
;
59 for (i
= 0; i
< mb_block_count
; i
++) {
61 s
->pblocks
[i
] = &s
->block
[j
++];
69 * Find and store the surfaces that are used as reference frames.
70 * This function should be called for every new field and/or frame.
71 * It should be safe to call the function a few times for the same field.
73 int ff_xvmc_field_start(MpegEncContext
*s
, AVCodecContext
*avctx
)
75 struct xvmc_pix_fmt
*last
, *next
, *render
= (struct xvmc_pix_fmt
*)s
->current_picture
.f
.data
[2];
76 const int mb_block_count
= 4 + (1 << s
->chroma_format
);
79 if (!render
|| render
->xvmc_id
!= AV_XVMC_ID
||
80 !render
->data_blocks
|| !render
->mv_blocks
||
81 (unsigned int)render
->allocated_mv_blocks
> INT_MAX
/(64*6) ||
82 (unsigned int)render
->allocated_data_blocks
> INT_MAX
/64 ||
84 av_log(avctx
, AV_LOG_ERROR
,
85 "Render token doesn't look as expected.\n");
86 return -1; // make sure that this is a render packet
89 if (render
->filled_mv_blocks_num
) {
90 av_log(avctx
, AV_LOG_ERROR
,
91 "Rendering surface contains %i unprocessed blocks.\n",
92 render
->filled_mv_blocks_num
);
95 if (render
->allocated_mv_blocks
< 1 ||
96 render
->allocated_data_blocks
< render
->allocated_mv_blocks
*mb_block_count
||
97 render
->start_mv_blocks_num
>= render
->allocated_mv_blocks
||
98 render
->next_free_data_block_num
>
99 render
->allocated_data_blocks
-
100 mb_block_count
*(render
->allocated_mv_blocks
-render
->start_mv_blocks_num
)) {
101 av_log(avctx
, AV_LOG_ERROR
,
102 "Rendering surface doesn't provide enough block structures to work with.\n");
106 render
->picture_structure
= s
->picture_structure
;
107 render
->flags
= s
->first_field
? 0 : XVMC_SECOND_FIELD
;
108 render
->p_future_surface
= NULL
;
109 render
->p_past_surface
= NULL
;
111 switch(s
->pict_type
) {
112 case AV_PICTURE_TYPE_I
:
113 return 0; // no prediction from other frames
114 case AV_PICTURE_TYPE_B
:
115 next
= (struct xvmc_pix_fmt
*)s
->next_picture
.f
.data
[2];
118 if (next
->xvmc_id
!= AV_XVMC_ID
)
120 render
->p_future_surface
= next
->p_surface
;
121 // no return here, going to set forward prediction
122 case AV_PICTURE_TYPE_P
:
123 last
= (struct xvmc_pix_fmt
*)s
->last_picture
.f
.data
[2];
125 last
= render
; // predict second field from the first
126 if (last
->xvmc_id
!= AV_XVMC_ID
)
128 render
->p_past_surface
= last
->p_surface
;
136 * Complete frame/field rendering by passing any remaining blocks.
137 * Normally ff_draw_horiz_band() is called for each slice, however,
138 * some leftover blocks, for example from error_resilience(), may remain.
139 * It should be safe to call the function a few times for the same field.
141 void ff_xvmc_field_end(MpegEncContext
*s
)
143 struct xvmc_pix_fmt
*render
= (struct xvmc_pix_fmt
*)s
->current_picture
.f
.data
[2];
146 if (render
->filled_mv_blocks_num
> 0)
147 ff_mpeg_draw_horiz_band(s
, 0, 0);
151 * Synthesize the data needed by XvMC to render one macroblock of data.
152 * Fill all relevant fields, if necessary do IDCT.
154 void ff_xvmc_decode_mb(MpegEncContext
*s
)
156 XvMCMacroBlock
*mv_block
;
157 struct xvmc_pix_fmt
*render
;
158 int i
, cbp
, blocks_per_mb
;
160 const int mb_xy
= s
->mb_y
* s
->mb_stride
+ s
->mb_x
;
164 av_log(s
->avctx
, AV_LOG_ERROR
, "XVMC doesn't support encoding!!!\n");
168 // from MPV_decode_mb(), update DC predictors for P macroblocks
172 s
->last_dc
[2] = 128 << s
->intra_dc_precision
;
175 // MC doesn't skip blocks
179 // Do I need to export quant when I could not perform postprocessing?
180 // Anyway, it doesn't hurt.
181 s
->current_picture
.f
.qscale_table
[mb_xy
] = s
->qscale
;
183 // start of XVMC-specific code
184 render
= (struct xvmc_pix_fmt
*)s
->current_picture
.f
.data
[2];
186 assert(render
->xvmc_id
== AV_XVMC_ID
);
187 assert(render
->mv_blocks
);
189 // take the next free macroblock
190 mv_block
= &render
->mv_blocks
[render
->start_mv_blocks_num
+
191 render
->filled_mv_blocks_num
];
193 mv_block
->x
= s
->mb_x
;
194 mv_block
->y
= s
->mb_y
;
195 mv_block
->dct_type
= s
->interlaced_dct
; // XVMC_DCT_TYPE_FRAME/FIELD;
197 mv_block
->macroblock_type
= XVMC_MB_TYPE_INTRA
; // no MC, all done
199 mv_block
->macroblock_type
= XVMC_MB_TYPE_PATTERN
;
201 if (s
->mv_dir
& MV_DIR_FORWARD
) {
202 mv_block
->macroblock_type
|= XVMC_MB_TYPE_MOTION_FORWARD
;
203 // PMV[n][dir][xy] = mv[dir][n][xy]
204 mv_block
->PMV
[0][0][0] = s
->mv
[0][0][0];
205 mv_block
->PMV
[0][0][1] = s
->mv
[0][0][1];
206 mv_block
->PMV
[1][0][0] = s
->mv
[0][1][0];
207 mv_block
->PMV
[1][0][1] = s
->mv
[0][1][1];
209 if (s
->mv_dir
& MV_DIR_BACKWARD
) {
210 mv_block
->macroblock_type
|= XVMC_MB_TYPE_MOTION_BACKWARD
;
211 mv_block
->PMV
[0][1][0] = s
->mv
[1][0][0];
212 mv_block
->PMV
[0][1][1] = s
->mv
[1][0][1];
213 mv_block
->PMV
[1][1][0] = s
->mv
[1][1][0];
214 mv_block
->PMV
[1][1][1] = s
->mv
[1][1][1];
219 mv_block
->motion_type
= XVMC_PREDICTION_FRAME
;
222 mv_block
->motion_type
= XVMC_PREDICTION_16x8
;
225 mv_block
->motion_type
= XVMC_PREDICTION_FIELD
;
226 if (s
->picture_structure
== PICT_FRAME
) {
227 mv_block
->PMV
[0][0][1] <<= 1;
228 mv_block
->PMV
[1][0][1] <<= 1;
229 mv_block
->PMV
[0][1][1] <<= 1;
230 mv_block
->PMV
[1][1][1] <<= 1;
234 mv_block
->motion_type
= XVMC_PREDICTION_DUAL_PRIME
;
235 if (s
->picture_structure
== PICT_FRAME
) {
237 mv_block
->PMV
[0][0][0] = s
->mv
[0][0][0]; // top from top
238 mv_block
->PMV
[0][0][1] = s
->mv
[0][0][1] << 1;
240 mv_block
->PMV
[0][1][0] = s
->mv
[0][0][0]; // bottom from bottom
241 mv_block
->PMV
[0][1][1] = s
->mv
[0][0][1] << 1;
243 mv_block
->PMV
[1][0][0] = s
->mv
[0][2][0]; // dmv00, top from bottom
244 mv_block
->PMV
[1][0][1] = s
->mv
[0][2][1] << 1; // dmv01
246 mv_block
->PMV
[1][1][0] = s
->mv
[0][3][0]; // dmv10, bottom from top
247 mv_block
->PMV
[1][1][1] = s
->mv
[0][3][1] << 1; // dmv11
250 mv_block
->PMV
[0][1][0] = s
->mv
[0][2][0]; // dmv00
251 mv_block
->PMV
[0][1][1] = s
->mv
[0][2][1]; // dmv01
258 mv_block
->motion_vertical_field_select
= 0;
260 // set correct field references
261 if (s
->mv_type
== MV_TYPE_FIELD
|| s
->mv_type
== MV_TYPE_16X8
) {
262 mv_block
->motion_vertical_field_select
|= s
->field_select
[0][0];
263 mv_block
->motion_vertical_field_select
|= s
->field_select
[1][0] << 1;
264 mv_block
->motion_vertical_field_select
|= s
->field_select
[0][1] << 2;
265 mv_block
->motion_vertical_field_select
|= s
->field_select
[1][1] << 3;
268 // time to handle data blocks
269 mv_block
->index
= render
->next_free_data_block_num
;
272 if (s
->chroma_format
>= 2) {
273 blocks_per_mb
= 4 + (1 << s
->chroma_format
);
278 for (i
= 0; i
< blocks_per_mb
; i
++) {
280 if (s
->block_last_index
[i
] >= 0)
284 if (s
->flags
& CODEC_FLAG_GRAY
) {
285 if (s
->mb_intra
) { // intra frames are always full chroma blocks
286 for (i
= 4; i
< blocks_per_mb
; i
++) {
287 memset(s
->pblocks
[i
], 0, sizeof(*s
->pblocks
[i
])); // so we need to clear them
288 if (!render
->unsigned_intra
)
289 *s
->pblocks
[i
][0] = 1 << 10;
292 cbp
&= 0xf << (blocks_per_mb
- 4);
293 blocks_per_mb
= 4; // luminance blocks only
296 mv_block
->coded_block_pattern
= cbp
;
298 mv_block
->macroblock_type
&= ~XVMC_MB_TYPE_PATTERN
;
300 for (i
= 0; i
< blocks_per_mb
; i
++) {
301 if (s
->block_last_index
[i
] >= 0) {
302 // I do not have unsigned_intra MOCO to test, hope it is OK.
303 if (s
->mb_intra
&& (render
->idct
|| !render
->unsigned_intra
))
304 *s
->pblocks
[i
][0] -= 1 << 10;
306 s
->dsp
.idct(*s
->pblocks
[i
]);
307 /* It is unclear if MC hardware requires pixel diff values to be
308 * in the range [-255;255]. TODO: Clipping if such hardware is
309 * ever found. As of now it would only be an unnecessary
312 // copy blocks only if the codec doesn't support pblocks reordering
313 if (s
->avctx
->xvmc_acceleration
== 1) {
314 memcpy(&render
->data_blocks
[render
->next_free_data_block_num
*64],
315 s
->pblocks
[i
], sizeof(*s
->pblocks
[i
]));
317 render
->next_free_data_block_num
++;
320 render
->filled_mv_blocks_num
++;
322 assert(render
->filled_mv_blocks_num
<= render
->allocated_mv_blocks
);
323 assert(render
->next_free_data_block_num
<= render
->allocated_data_blocks
);
324 /* The above conditions should not be able to fail as long as this function
325 * is used and the following 'if ()' automatically calls a callback to free
329 if (render
->filled_mv_blocks_num
== render
->allocated_mv_blocks
)
330 ff_mpeg_draw_horiz_band(s
, 0, 0);