2 * The simplest mpeg encoder (well, it was the simplest!)
3 * Copyright (c) 2000,2001 Fabrice Bellard
4 * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
6 * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
8 * This file is part of FFmpeg.
10 * FFmpeg is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU Lesser General Public
12 * License as published by the Free Software Foundation; either
13 * version 2.1 of the License, or (at your option) any later version.
15 * FFmpeg is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * Lesser General Public License for more details.
20 * You should have received a copy of the GNU Lesser General Public
21 * License along with FFmpeg; if not, write to the Free Software
22 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
26 * @file libavcodec/mpegvideo_common.h
27 * The simplest mpeg encoder (well, it was the simplest!).
30 #ifndef AVCODEC_MPEGVIDEO_COMMON_H
31 #define AVCODEC_MPEGVIDEO_COMMON_H
36 #include "mpegvideo.h"
42 int dct_quantize_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
43 int dct_quantize_trellis_c(MpegEncContext
*s
, DCTELEM
*block
, int n
, int qscale
, int *overflow
);
44 void denoise_dct_c(MpegEncContext
*s
, DCTELEM
*block
);
48 * The pixels are allocated/set by calling get_buffer() if shared=0
50 int alloc_picture(MpegEncContext
*s
, Picture
*pic
, int shared
);
53 * sets the given MpegEncContext to common defaults (same for encoding and decoding).
54 * the changed fields will not depend upon the prior state of the MpegEncContext.
56 void MPV_common_defaults(MpegEncContext
*s
);
58 static inline void gmc1_motion(MpegEncContext
*s
,
59 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
60 uint8_t **ref_picture
)
63 int offset
, src_x
, src_y
, linesize
, uvlinesize
;
64 int motion_x
, motion_y
;
67 motion_x
= s
->sprite_offset
[0][0];
68 motion_y
= s
->sprite_offset
[0][1];
69 src_x
= s
->mb_x
* 16 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
70 src_y
= s
->mb_y
* 16 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
71 motion_x
<<=(3-s
->sprite_warping_accuracy
);
72 motion_y
<<=(3-s
->sprite_warping_accuracy
);
73 src_x
= av_clip(src_x
, -16, s
->width
);
74 if (src_x
== s
->width
)
76 src_y
= av_clip(src_y
, -16, s
->height
);
77 if (src_y
== s
->height
)
80 linesize
= s
->linesize
;
81 uvlinesize
= s
->uvlinesize
;
83 ptr
= ref_picture
[0] + (src_y
* linesize
) + src_x
;
85 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
86 if( (unsigned)src_x
>= s
->h_edge_pos
- 17
87 || (unsigned)src_y
>= s
->v_edge_pos
- 17){
88 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, linesize
, 17, 17, src_x
, src_y
, s
->h_edge_pos
, s
->v_edge_pos
);
89 ptr
= s
->edge_emu_buffer
;
93 if((motion_x
|motion_y
)&7){
94 s
->dsp
.gmc1(dest_y
, ptr
, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
95 s
->dsp
.gmc1(dest_y
+8, ptr
+8, linesize
, 16, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
99 dxy
= ((motion_x
>>3)&1) | ((motion_y
>>2)&2);
101 s
->dsp
.put_no_rnd_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
103 s
->dsp
.put_pixels_tab
[0][dxy
](dest_y
, ptr
, linesize
, 16);
107 if(CONFIG_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
109 motion_x
= s
->sprite_offset
[1][0];
110 motion_y
= s
->sprite_offset
[1][1];
111 src_x
= s
->mb_x
* 8 + (motion_x
>> (s
->sprite_warping_accuracy
+1));
112 src_y
= s
->mb_y
* 8 + (motion_y
>> (s
->sprite_warping_accuracy
+1));
113 motion_x
<<=(3-s
->sprite_warping_accuracy
);
114 motion_y
<<=(3-s
->sprite_warping_accuracy
);
115 src_x
= av_clip(src_x
, -8, s
->width
>>1);
116 if (src_x
== s
->width
>>1)
118 src_y
= av_clip(src_y
, -8, s
->height
>>1);
119 if (src_y
== s
->height
>>1)
122 offset
= (src_y
* uvlinesize
) + src_x
;
123 ptr
= ref_picture
[1] + offset
;
124 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
125 if( (unsigned)src_x
>= (s
->h_edge_pos
>>1) - 9
126 || (unsigned)src_y
>= (s
->v_edge_pos
>>1) - 9){
127 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
128 ptr
= s
->edge_emu_buffer
;
132 s
->dsp
.gmc1(dest_cb
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
134 ptr
= ref_picture
[2] + offset
;
136 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, uvlinesize
, 9, 9, src_x
, src_y
, s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
137 ptr
= s
->edge_emu_buffer
;
139 s
->dsp
.gmc1(dest_cr
, ptr
, uvlinesize
, 8, motion_x
&15, motion_y
&15, 128 - s
->no_rounding
);
144 static inline void gmc_motion(MpegEncContext
*s
,
145 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
146 uint8_t **ref_picture
)
149 int linesize
, uvlinesize
;
150 const int a
= s
->sprite_warping_accuracy
;
153 linesize
= s
->linesize
;
154 uvlinesize
= s
->uvlinesize
;
156 ptr
= ref_picture
[0];
158 ox
= s
->sprite_offset
[0][0] + s
->sprite_delta
[0][0]*s
->mb_x
*16 + s
->sprite_delta
[0][1]*s
->mb_y
*16;
159 oy
= s
->sprite_offset
[0][1] + s
->sprite_delta
[1][0]*s
->mb_x
*16 + s
->sprite_delta
[1][1]*s
->mb_y
*16;
161 s
->dsp
.gmc(dest_y
, ptr
, linesize
, 16,
164 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
165 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
166 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
167 s
->h_edge_pos
, s
->v_edge_pos
);
168 s
->dsp
.gmc(dest_y
+8, ptr
, linesize
, 16,
169 ox
+ s
->sprite_delta
[0][0]*8,
170 oy
+ s
->sprite_delta
[1][0]*8,
171 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
172 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
173 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
174 s
->h_edge_pos
, s
->v_edge_pos
);
176 if(CONFIG_GRAY
&& s
->flags
&CODEC_FLAG_GRAY
) return;
178 ox
= s
->sprite_offset
[1][0] + s
->sprite_delta
[0][0]*s
->mb_x
*8 + s
->sprite_delta
[0][1]*s
->mb_y
*8;
179 oy
= s
->sprite_offset
[1][1] + s
->sprite_delta
[1][0]*s
->mb_x
*8 + s
->sprite_delta
[1][1]*s
->mb_y
*8;
181 ptr
= ref_picture
[1];
182 s
->dsp
.gmc(dest_cb
, ptr
, uvlinesize
, 8,
185 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
186 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
187 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
188 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
190 ptr
= ref_picture
[2];
191 s
->dsp
.gmc(dest_cr
, ptr
, uvlinesize
, 8,
194 s
->sprite_delta
[0][0], s
->sprite_delta
[0][1],
195 s
->sprite_delta
[1][0], s
->sprite_delta
[1][1],
196 a
+1, (1<<(2*a
+1)) - s
->no_rounding
,
197 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
200 static inline int hpel_motion(MpegEncContext
*s
,
201 uint8_t *dest
, uint8_t *src
,
202 int field_based
, int field_select
,
203 int src_x
, int src_y
,
204 int width
, int height
, int stride
,
205 int h_edge_pos
, int v_edge_pos
,
206 int w
, int h
, op_pixels_func
*pix_op
,
207 int motion_x
, int motion_y
)
212 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
213 src_x
+= motion_x
>> 1;
214 src_y
+= motion_y
>> 1;
216 /* WARNING: do no forget half pels */
217 src_x
= av_clip(src_x
, -16, width
); //FIXME unneeded for emu?
220 src_y
= av_clip(src_y
, -16, height
);
223 src
+= src_y
* stride
+ src_x
;
225 if(s
->unrestricted_mv
&& (s
->flags
&CODEC_FLAG_EMU_EDGE
)){
226 if( (unsigned)src_x
> h_edge_pos
- (motion_x
&1) - w
227 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
228 ff_emulated_edge_mc(s
->edge_emu_buffer
, src
, s
->linesize
, w
+1, (h
+1)<<field_based
,
229 src_x
, src_y
<<field_based
, h_edge_pos
, s
->v_edge_pos
);
230 src
= s
->edge_emu_buffer
;
236 pix_op
[dxy
](dest
, src
, stride
, h
);
240 static av_always_inline
241 void mpeg_motion_internal(MpegEncContext
*s
,
242 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
243 int field_based
, int bottom_field
, int field_select
,
244 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
245 int motion_x
, int motion_y
, int h
, int is_mpeg12
)
247 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
248 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
,
249 uvsrc_x
, uvsrc_y
, v_edge_pos
, uvlinesize
, linesize
;
252 if(s
->quarter_sample
)
259 v_edge_pos
= s
->v_edge_pos
>> field_based
;
260 linesize
= s
->current_picture
.linesize
[0] << field_based
;
261 uvlinesize
= s
->current_picture
.linesize
[1] << field_based
;
263 dxy
= ((motion_y
& 1) << 1) | (motion_x
& 1);
264 src_x
= s
->mb_x
* 16 + (motion_x
>> 1);
265 src_y
=(s
->mb_y
<<(4-field_based
)) + (motion_y
>> 1);
267 if (!is_mpeg12
&& s
->out_format
== FMT_H263
) {
268 if((s
->workaround_bugs
& FF_BUG_HPEL_CHROMA
) && field_based
){
269 mx
= (motion_x
>>1)|(motion_x
&1);
271 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
272 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
273 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
275 uvdxy
= dxy
| (motion_y
& 2) | ((motion_x
& 2) >> 1);
279 }else if(!is_mpeg12
&& s
->out_format
== FMT_H261
){//even chroma mv's are full pel in H261
283 uvsrc_x
= s
->mb_x
*8 + mx
;
284 uvsrc_y
= s
->mb_y
*8 + my
;
286 if(s
->chroma_y_shift
){
289 uvdxy
= ((my
& 1) << 1) | (mx
& 1);
290 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
291 uvsrc_y
= (s
->mb_y
<<(3-field_based
)) + (my
>> 1);
293 if(s
->chroma_x_shift
){
296 uvdxy
= ((motion_y
& 1) << 1) | (mx
& 1);
297 uvsrc_x
= s
->mb_x
* 8 + (mx
>> 1);
308 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
309 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
310 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
312 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&1) - 16
313 || (unsigned)src_y
> v_edge_pos
- (motion_y
&1) - h
){
314 if(is_mpeg12
|| s
->codec_id
== CODEC_ID_MPEG2VIDEO
||
315 s
->codec_id
== CODEC_ID_MPEG1VIDEO
){
316 av_log(s
->avctx
,AV_LOG_DEBUG
,
317 "MPEG motion vector out of boundary\n");
318 if(!s
->chroma_y_shift
)
321 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
,
323 src_x
, src_y
<<field_based
,
324 s
->h_edge_pos
, s
->v_edge_pos
);
325 ptr_y
= s
->edge_emu_buffer
;
326 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
327 uint8_t *uvbuf
= s
->edge_emu_buffer
+18*s
->linesize
;
328 ff_emulated_edge_mc(uvbuf
,
329 ptr_cb
, s
->uvlinesize
,
331 uvsrc_x
, uvsrc_y
<<field_based
,
332 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
333 ff_emulated_edge_mc(uvbuf
+16,
334 ptr_cr
, s
->uvlinesize
,
336 uvsrc_x
, uvsrc_y
<<field_based
,
337 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
343 if(bottom_field
){ //FIXME use this for field pix too instead of the obnoxious hack which changes picture.data
344 dest_y
+= s
->linesize
;
345 dest_cb
+= s
->uvlinesize
;
346 dest_cr
+= s
->uvlinesize
;
350 ptr_y
+= s
->linesize
;
351 ptr_cb
+= s
->uvlinesize
;
352 ptr_cr
+= s
->uvlinesize
;
355 pix_op
[0][dxy
](dest_y
, ptr_y
, linesize
, h
);
357 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
358 pix_op
[s
->chroma_x_shift
][uvdxy
]
359 (dest_cb
, ptr_cb
, uvlinesize
, h
>> s
->chroma_y_shift
);
360 pix_op
[s
->chroma_x_shift
][uvdxy
]
361 (dest_cr
, ptr_cr
, uvlinesize
, h
>> s
->chroma_y_shift
);
363 if(!is_mpeg12
&& (CONFIG_H261_ENCODER
|| CONFIG_H261_DECODER
) &&
364 s
->out_format
== FMT_H261
){
365 ff_h261_loop_filter(s
);
368 /* apply one mpeg motion vector to the three components */
369 static av_always_inline
370 void mpeg_motion(MpegEncContext
*s
,
371 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
372 int field_based
, int bottom_field
, int field_select
,
373 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
374 int motion_x
, int motion_y
, int h
)
377 if(s
->out_format
== FMT_MPEG1
)
378 mpeg_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, field_based
,
379 bottom_field
, field_select
, ref_picture
, pix_op
,
380 motion_x
, motion_y
, h
, 1);
383 mpeg_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, field_based
,
384 bottom_field
, field_select
, ref_picture
, pix_op
,
385 motion_x
, motion_y
, h
, 0);
388 //FIXME move to dsputil, avg variant, 16x16 version
389 static inline void put_obmc(uint8_t *dst
, uint8_t *src
[5], int stride
){
391 uint8_t * const top
= src
[1];
392 uint8_t * const left
= src
[2];
393 uint8_t * const mid
= src
[0];
394 uint8_t * const right
= src
[3];
395 uint8_t * const bottom
= src
[4];
396 #define OBMC_FILTER(x, t, l, m, r, b)\
397 dst[x]= (t*top[x] + l*left[x] + m*mid[x] + r*right[x] + b*bottom[x] + 4)>>3
398 #define OBMC_FILTER4(x, t, l, m, r, b)\
399 OBMC_FILTER(x , t, l, m, r, b);\
400 OBMC_FILTER(x+1 , t, l, m, r, b);\
401 OBMC_FILTER(x +stride, t, l, m, r, b);\
402 OBMC_FILTER(x+1+stride, t, l, m, r, b);
405 OBMC_FILTER (x
, 2, 2, 4, 0, 0);
406 OBMC_FILTER (x
+1, 2, 1, 5, 0, 0);
407 OBMC_FILTER4(x
+2, 2, 1, 5, 0, 0);
408 OBMC_FILTER4(x
+4, 2, 0, 5, 1, 0);
409 OBMC_FILTER (x
+6, 2, 0, 5, 1, 0);
410 OBMC_FILTER (x
+7, 2, 0, 4, 2, 0);
412 OBMC_FILTER (x
, 1, 2, 5, 0, 0);
413 OBMC_FILTER (x
+1, 1, 2, 5, 0, 0);
414 OBMC_FILTER (x
+6, 1, 0, 5, 2, 0);
415 OBMC_FILTER (x
+7, 1, 0, 5, 2, 0);
417 OBMC_FILTER4(x
, 1, 2, 5, 0, 0);
418 OBMC_FILTER4(x
+2, 1, 1, 6, 0, 0);
419 OBMC_FILTER4(x
+4, 1, 0, 6, 1, 0);
420 OBMC_FILTER4(x
+6, 1, 0, 5, 2, 0);
422 OBMC_FILTER4(x
, 0, 2, 5, 0, 1);
423 OBMC_FILTER4(x
+2, 0, 1, 6, 0, 1);
424 OBMC_FILTER4(x
+4, 0, 0, 6, 1, 1);
425 OBMC_FILTER4(x
+6, 0, 0, 5, 2, 1);
427 OBMC_FILTER (x
, 0, 2, 5, 0, 1);
428 OBMC_FILTER (x
+1, 0, 2, 5, 0, 1);
429 OBMC_FILTER4(x
+2, 0, 1, 5, 0, 2);
430 OBMC_FILTER4(x
+4, 0, 0, 5, 1, 2);
431 OBMC_FILTER (x
+6, 0, 0, 5, 2, 1);
432 OBMC_FILTER (x
+7, 0, 0, 5, 2, 1);
434 OBMC_FILTER (x
, 0, 2, 4, 0, 2);
435 OBMC_FILTER (x
+1, 0, 1, 5, 0, 2);
436 OBMC_FILTER (x
+6, 0, 0, 5, 1, 2);
437 OBMC_FILTER (x
+7, 0, 0, 4, 2, 2);
440 /* obmc for 1 8x8 luma block */
441 static inline void obmc_motion(MpegEncContext
*s
,
442 uint8_t *dest
, uint8_t *src
,
443 int src_x
, int src_y
,
444 op_pixels_func
*pix_op
,
445 int16_t mv
[5][2]/* mid top left right bottom*/)
451 assert(s
->quarter_sample
==0);
454 if(i
&& mv
[i
][0]==mv
[MID
][0] && mv
[i
][1]==mv
[MID
][1]){
457 ptr
[i
]= s
->obmc_scratchpad
+ 8*(i
&1) + s
->linesize
*8*(i
>>1);
458 hpel_motion(s
, ptr
[i
], src
, 0, 0,
460 s
->width
, s
->height
, s
->linesize
,
461 s
->h_edge_pos
, s
->v_edge_pos
,
467 put_obmc(dest
, ptr
, s
->linesize
);
470 static inline void qpel_motion(MpegEncContext
*s
,
471 uint8_t *dest_y
, uint8_t *dest_cb
, uint8_t *dest_cr
,
472 int field_based
, int bottom_field
, int field_select
,
473 uint8_t **ref_picture
, op_pixels_func (*pix_op
)[4],
474 qpel_mc_func (*qpix_op
)[16],
475 int motion_x
, int motion_y
, int h
)
477 uint8_t *ptr_y
, *ptr_cb
, *ptr_cr
;
478 int dxy
, uvdxy
, mx
, my
, src_x
, src_y
, uvsrc_x
, uvsrc_y
, v_edge_pos
, linesize
, uvlinesize
;
480 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
481 src_x
= s
->mb_x
* 16 + (motion_x
>> 2);
482 src_y
= s
->mb_y
* (16 >> field_based
) + (motion_y
>> 2);
484 v_edge_pos
= s
->v_edge_pos
>> field_based
;
485 linesize
= s
->linesize
<< field_based
;
486 uvlinesize
= s
->uvlinesize
<< field_based
;
491 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA2
){
492 static const int rtab
[8]= {0,0,1,1,0,0,0,1};
493 mx
= (motion_x
>>1) + rtab
[motion_x
&7];
494 my
= (motion_y
>>1) + rtab
[motion_y
&7];
495 }else if(s
->workaround_bugs
&FF_BUG_QPEL_CHROMA
){
496 mx
= (motion_x
>>1)|(motion_x
&1);
497 my
= (motion_y
>>1)|(motion_y
&1);
505 uvdxy
= (mx
&1) | ((my
&1)<<1);
509 uvsrc_x
= s
->mb_x
* 8 + mx
;
510 uvsrc_y
= s
->mb_y
* (8 >> field_based
) + my
;
512 ptr_y
= ref_picture
[0] + src_y
* linesize
+ src_x
;
513 ptr_cb
= ref_picture
[1] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
514 ptr_cr
= ref_picture
[2] + uvsrc_y
* uvlinesize
+ uvsrc_x
;
516 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 16
517 || (unsigned)src_y
> v_edge_pos
- (motion_y
&3) - h
){
518 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr_y
, s
->linesize
,
519 17, 17+field_based
, src_x
, src_y
<<field_based
,
520 s
->h_edge_pos
, s
->v_edge_pos
);
521 ptr_y
= s
->edge_emu_buffer
;
522 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
523 uint8_t *uvbuf
= s
->edge_emu_buffer
+ 18*s
->linesize
;
524 ff_emulated_edge_mc(uvbuf
, ptr_cb
, s
->uvlinesize
,
526 uvsrc_x
, uvsrc_y
<<field_based
,
527 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
528 ff_emulated_edge_mc(uvbuf
+ 16, ptr_cr
, s
->uvlinesize
,
530 uvsrc_x
, uvsrc_y
<<field_based
,
531 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
538 qpix_op
[0][dxy
](dest_y
, ptr_y
, linesize
);
541 dest_y
+= s
->linesize
;
542 dest_cb
+= s
->uvlinesize
;
543 dest_cr
+= s
->uvlinesize
;
547 ptr_y
+= s
->linesize
;
548 ptr_cb
+= s
->uvlinesize
;
549 ptr_cr
+= s
->uvlinesize
;
551 //damn interlaced mode
552 //FIXME boundary mirroring is not exactly correct here
553 qpix_op
[1][dxy
](dest_y
, ptr_y
, linesize
);
554 qpix_op
[1][dxy
](dest_y
+8, ptr_y
+8, linesize
);
556 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
)){
557 pix_op
[1][uvdxy
](dest_cr
, ptr_cr
, uvlinesize
, h
>> 1);
558 pix_op
[1][uvdxy
](dest_cb
, ptr_cb
, uvlinesize
, h
>> 1);
563 * h263 chroma 4mv motion compensation.
565 static inline void chroma_4mv_motion(MpegEncContext
*s
,
566 uint8_t *dest_cb
, uint8_t *dest_cr
,
567 uint8_t **ref_picture
,
568 op_pixels_func
*pix_op
,
570 int dxy
, emu
=0, src_x
, src_y
, offset
;
573 /* In case of 8X8, we construct a single chroma motion vector
574 with a special rounding */
575 mx
= ff_h263_round_chroma(mx
);
576 my
= ff_h263_round_chroma(my
);
578 dxy
= ((my
& 1) << 1) | (mx
& 1);
582 src_x
= s
->mb_x
* 8 + mx
;
583 src_y
= s
->mb_y
* 8 + my
;
584 src_x
= av_clip(src_x
, -8, s
->width
/2);
585 if (src_x
== s
->width
/2)
587 src_y
= av_clip(src_y
, -8, s
->height
/2);
588 if (src_y
== s
->height
/2)
591 offset
= (src_y
* (s
->uvlinesize
)) + src_x
;
592 ptr
= ref_picture
[1] + offset
;
593 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
594 if( (unsigned)src_x
> (s
->h_edge_pos
>>1) - (dxy
&1) - 8
595 || (unsigned)src_y
> (s
->v_edge_pos
>>1) - (dxy
>>1) - 8){
596 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
,
598 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
599 ptr
= s
->edge_emu_buffer
;
603 pix_op
[dxy
](dest_cb
, ptr
, s
->uvlinesize
, 8);
605 ptr
= ref_picture
[2] + offset
;
607 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
, s
->uvlinesize
,
609 s
->h_edge_pos
>>1, s
->v_edge_pos
>>1);
610 ptr
= s
->edge_emu_buffer
;
612 pix_op
[dxy
](dest_cr
, ptr
, s
->uvlinesize
, 8);
615 static inline void prefetch_motion(MpegEncContext
*s
, uint8_t **pix
, int dir
){
616 /* fetch pixels for estimated mv 4 macroblocks ahead
617 * optimized for 64byte cache lines */
618 const int shift
= s
->quarter_sample
? 2 : 1;
619 const int mx
= (s
->mv
[dir
][0][0]>>shift
) + 16*s
->mb_x
+ 8;
620 const int my
= (s
->mv
[dir
][0][1]>>shift
) + 16*s
->mb_y
;
621 int off
= mx
+ (my
+ (s
->mb_x
&3)*4)*s
->linesize
+ 64;
622 s
->dsp
.prefetch(pix
[0]+off
, s
->linesize
, 4);
623 off
= (mx
>>1) + ((my
>>1) + (s
->mb_x
&7))*s
->uvlinesize
+ 64;
624 s
->dsp
.prefetch(pix
[1]+off
, pix
[2]-pix
[1], 2);
628 * motion compensation of a single macroblock
630 * @param dest_y luma destination pointer
631 * @param dest_cb chroma cb/u destination pointer
632 * @param dest_cr chroma cr/v destination pointer
633 * @param dir direction (0->forward, 1->backward)
634 * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
635 * @param pic_op halfpel motion compensation function (average or put normally)
636 * @param pic_op qpel motion compensation function (average or put normally)
637 * the motion vectors are taken from s->mv and the MV type from s->mv_type
639 static av_always_inline
void MPV_motion_internal(MpegEncContext
*s
,
640 uint8_t *dest_y
, uint8_t *dest_cb
,
641 uint8_t *dest_cr
, int dir
,
642 uint8_t **ref_picture
,
643 op_pixels_func (*pix_op
)[4],
644 qpel_mc_func (*qpix_op
)[16], int is_mpeg12
)
646 int dxy
, mx
, my
, src_x
, src_y
, motion_x
, motion_y
;
653 prefetch_motion(s
, ref_picture
, dir
);
655 if(!is_mpeg12
&& s
->obmc
&& s
->pict_type
!= FF_B_TYPE
){
656 int16_t mv_cache
[4][4][2];
657 const int xy
= s
->mb_x
+ s
->mb_y
*s
->mb_stride
;
658 const int mot_stride
= s
->b8_stride
;
659 const int mot_xy
= mb_x
*2 + mb_y
*2*mot_stride
;
661 assert(!s
->mb_skipped
);
663 memcpy(mv_cache
[1][1], s
->current_picture
.motion_val
[0][mot_xy
], sizeof(int16_t)*4);
664 memcpy(mv_cache
[2][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
665 memcpy(mv_cache
[3][1], s
->current_picture
.motion_val
[0][mot_xy
+mot_stride
], sizeof(int16_t)*4);
667 if(mb_y
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-s
->mb_stride
])){
668 memcpy(mv_cache
[0][1], mv_cache
[1][1], sizeof(int16_t)*4);
670 memcpy(mv_cache
[0][1], s
->current_picture
.motion_val
[0][mot_xy
-mot_stride
], sizeof(int16_t)*4);
673 if(mb_x
==0 || IS_INTRA(s
->current_picture
.mb_type
[xy
-1])){
674 *(int32_t*)mv_cache
[1][0]= *(int32_t*)mv_cache
[1][1];
675 *(int32_t*)mv_cache
[2][0]= *(int32_t*)mv_cache
[2][1];
677 *(int32_t*)mv_cache
[1][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1];
678 *(int32_t*)mv_cache
[2][0]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
-1+mot_stride
];
681 if(mb_x
+1>=s
->mb_width
|| IS_INTRA(s
->current_picture
.mb_type
[xy
+1])){
682 *(int32_t*)mv_cache
[1][3]= *(int32_t*)mv_cache
[1][2];
683 *(int32_t*)mv_cache
[2][3]= *(int32_t*)mv_cache
[2][2];
685 *(int32_t*)mv_cache
[1][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2];
686 *(int32_t*)mv_cache
[2][3]= *(int32_t*)s
->current_picture
.motion_val
[0][mot_xy
+2+mot_stride
];
692 const int x
= (i
&1)+1;
693 const int y
= (i
>>1)+1;
695 {mv_cache
[y
][x
][0], mv_cache
[y
][x
][1]},
696 {mv_cache
[y
-1][x
][0], mv_cache
[y
-1][x
][1]},
697 {mv_cache
[y
][x
-1][0], mv_cache
[y
][x
-1][1]},
698 {mv_cache
[y
][x
+1][0], mv_cache
[y
][x
+1][1]},
699 {mv_cache
[y
+1][x
][0], mv_cache
[y
+1][x
][1]}};
701 obmc_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
703 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
710 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
711 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
719 if(s
->real_sprite_warping_points
==1){
720 gmc1_motion(s
, dest_y
, dest_cb
, dest_cr
,
723 gmc_motion(s
, dest_y
, dest_cb
, dest_cr
,
726 }else if(!is_mpeg12
&& s
->quarter_sample
){
727 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
729 ref_picture
, pix_op
, qpix_op
,
730 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
731 }else if(!is_mpeg12
&& CONFIG_WMV2
&& s
->mspel
){
732 ff_mspel_motion(s
, dest_y
, dest_cb
, dest_cr
,
734 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
737 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
740 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
747 if(s
->quarter_sample
){
749 motion_x
= s
->mv
[dir
][i
][0];
750 motion_y
= s
->mv
[dir
][i
][1];
752 dxy
= ((motion_y
& 3) << 2) | (motion_x
& 3);
753 src_x
= mb_x
* 16 + (motion_x
>> 2) + (i
& 1) * 8;
754 src_y
= mb_y
* 16 + (motion_y
>> 2) + (i
>>1) * 8;
756 /* WARNING: do no forget half pels */
757 src_x
= av_clip(src_x
, -16, s
->width
);
758 if (src_x
== s
->width
)
760 src_y
= av_clip(src_y
, -16, s
->height
);
761 if (src_y
== s
->height
)
764 ptr
= ref_picture
[0] + (src_y
* s
->linesize
) + (src_x
);
765 if(s
->flags
&CODEC_FLAG_EMU_EDGE
){
766 if( (unsigned)src_x
> s
->h_edge_pos
- (motion_x
&3) - 8
767 || (unsigned)src_y
> s
->v_edge_pos
- (motion_y
&3) - 8 ){
768 ff_emulated_edge_mc(s
->edge_emu_buffer
, ptr
,
771 s
->h_edge_pos
, s
->v_edge_pos
);
772 ptr
= s
->edge_emu_buffer
;
775 dest
= dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
;
776 qpix_op
[1][dxy
](dest
, ptr
, s
->linesize
);
778 mx
+= s
->mv
[dir
][i
][0]/2;
779 my
+= s
->mv
[dir
][i
][1]/2;
783 hpel_motion(s
, dest_y
+ ((i
& 1) * 8) + (i
>> 1) * 8 * s
->linesize
,
784 ref_picture
[0], 0, 0,
785 mb_x
* 16 + (i
& 1) * 8, mb_y
* 16 + (i
>>1) * 8,
786 s
->width
, s
->height
, s
->linesize
,
787 s
->h_edge_pos
, s
->v_edge_pos
,
789 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1]);
791 mx
+= s
->mv
[dir
][i
][0];
792 my
+= s
->mv
[dir
][i
][1];
796 if(!CONFIG_GRAY
|| !(s
->flags
&CODEC_FLAG_GRAY
))
797 chroma_4mv_motion(s
, dest_cb
, dest_cr
, ref_picture
, pix_op
[1], mx
, my
);
801 if (s
->picture_structure
== PICT_FRAME
) {
802 if(!is_mpeg12
&& s
->quarter_sample
){
804 qpel_motion(s
, dest_y
, dest_cb
, dest_cr
,
805 1, i
, s
->field_select
[dir
][i
],
806 ref_picture
, pix_op
, qpix_op
,
807 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1], 8);
811 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
812 1, 0, s
->field_select
[dir
][0],
814 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 8);
816 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
817 1, 1, s
->field_select
[dir
][1],
819 s
->mv
[dir
][1][0], s
->mv
[dir
][1][1], 8);
822 if(s
->picture_structure
!= s
->field_select
[dir
][0] + 1 && s
->pict_type
!= FF_B_TYPE
&& !s
->first_field
){
823 ref_picture
= s
->current_picture_ptr
->data
;
826 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
827 0, 0, s
->field_select
[dir
][0],
829 s
->mv
[dir
][0][0], s
->mv
[dir
][0][1], 16);
834 uint8_t ** ref2picture
;
836 if(s
->picture_structure
== s
->field_select
[dir
][i
] + 1
837 || s
->pict_type
== FF_B_TYPE
|| s
->first_field
){
838 ref2picture
= ref_picture
;
840 ref2picture
= s
->current_picture_ptr
->data
;
843 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
844 0, 0, s
->field_select
[dir
][i
],
846 s
->mv
[dir
][i
][0], s
->mv
[dir
][i
][1] + 16*i
, 8);
848 dest_y
+= 16*s
->linesize
;
849 dest_cb
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
850 dest_cr
+= (16>>s
->chroma_y_shift
)*s
->uvlinesize
;
854 if(s
->picture_structure
== PICT_FRAME
){
858 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
861 s
->mv
[dir
][2*i
+ j
][0], s
->mv
[dir
][2*i
+ j
][1], 8);
863 pix_op
= s
->dsp
.avg_pixels_tab
;
867 mpeg_motion(s
, dest_y
, dest_cb
, dest_cr
,
868 0, 0, s
->picture_structure
!= i
+1,
870 s
->mv
[dir
][2*i
][0],s
->mv
[dir
][2*i
][1],16);
872 // after put we make avg of the same block
873 pix_op
=s
->dsp
.avg_pixels_tab
;
875 //opposite parity is always in the same frame if this is second field
877 ref_picture
= s
->current_picture_ptr
->data
;
886 static inline void MPV_motion(MpegEncContext
*s
,
887 uint8_t *dest_y
, uint8_t *dest_cb
,
888 uint8_t *dest_cr
, int dir
,
889 uint8_t **ref_picture
,
890 op_pixels_func (*pix_op
)[4],
891 qpel_mc_func (*qpix_op
)[16])
894 if(s
->out_format
== FMT_MPEG1
)
895 MPV_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, dir
,
896 ref_picture
, pix_op
, qpix_op
, 1);
899 MPV_motion_internal(s
, dest_y
, dest_cb
, dest_cr
, dir
,
900 ref_picture
, pix_op
, qpix_op
, 0);
902 #endif /* AVCODEC_MPEGVIDEO_COMMON_H */