Rename var
[FFMpeg-mirror/DVCPRO-HD.git] / ffplay.c
blob6e55e0f18b569bf6fbd7697ab57974f07ac205c3
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
30 #include "cmdutils.h"
32 #include <SDL.h>
33 #include <SDL_thread.h>
35 #ifdef __MINGW32__
36 #undef main /* We don't want SDL to override our main() */
37 #endif
39 #undef exit
41 const char program_name[] = "FFplay";
42 const int program_birth_year = 2003;
44 //#define DEBUG_SYNC
46 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
47 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
48 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
50 /* SDL audio buffer size, in samples. Should be small to have precise
51 A/V sync as SDL does not have hardware buffer fullness info. */
52 #define SDL_AUDIO_BUFFER_SIZE 1024
54 /* no AV sync correction is done if below the AV sync threshold */
55 #define AV_SYNC_THRESHOLD 0.01
56 /* no AV correction is done if too big error */
57 #define AV_NOSYNC_THRESHOLD 10.0
59 /* maximum audio speed change to get correct sync */
60 #define SAMPLE_CORRECTION_PERCENT_MAX 10
62 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
63 #define AUDIO_DIFF_AVG_NB 20
65 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
66 #define SAMPLE_ARRAY_SIZE (2*65536)
68 static int sws_flags = SWS_BICUBIC;
70 typedef struct PacketQueue {
71 AVPacketList *first_pkt, *last_pkt;
72 int nb_packets;
73 int size;
74 int abort_request;
75 SDL_mutex *mutex;
76 SDL_cond *cond;
77 } PacketQueue;
79 #define VIDEO_PICTURE_QUEUE_SIZE 1
80 #define SUBPICTURE_QUEUE_SIZE 4
82 typedef struct VideoPicture {
83 double pts; ///<presentation time stamp for this picture
84 SDL_Overlay *bmp;
85 int width, height; /* source height & width */
86 int allocated;
87 } VideoPicture;
89 typedef struct SubPicture {
90 double pts; /* presentation time stamp for this picture */
91 AVSubtitle sub;
92 } SubPicture;
94 enum {
95 AV_SYNC_AUDIO_MASTER, /* default choice */
96 AV_SYNC_VIDEO_MASTER,
97 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
100 typedef struct VideoState {
101 SDL_Thread *parse_tid;
102 SDL_Thread *video_tid;
103 AVInputFormat *iformat;
104 int no_background;
105 int abort_request;
106 int paused;
107 int last_paused;
108 int seek_req;
109 int seek_flags;
110 int64_t seek_pos;
111 AVFormatContext *ic;
112 int dtg_active_format;
114 int audio_stream;
116 int av_sync_type;
117 double external_clock; /* external clock base */
118 int64_t external_clock_time;
120 double audio_clock;
121 double audio_diff_cum; /* used for AV difference average computation */
122 double audio_diff_avg_coef;
123 double audio_diff_threshold;
124 int audio_diff_avg_count;
125 AVStream *audio_st;
126 PacketQueue audioq;
127 int audio_hw_buf_size;
128 /* samples output by the codec. we reserve more space for avsync
129 compensation */
130 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
131 unsigned int audio_buf_size; /* in bytes */
132 int audio_buf_index; /* in bytes */
133 AVPacket audio_pkt;
134 uint8_t *audio_pkt_data;
135 int audio_pkt_size;
137 int show_audio; /* if true, display audio samples */
138 int16_t sample_array[SAMPLE_ARRAY_SIZE];
139 int sample_array_index;
140 int last_i_start;
142 SDL_Thread *subtitle_tid;
143 int subtitle_stream;
144 int subtitle_stream_changed;
145 AVStream *subtitle_st;
146 PacketQueue subtitleq;
147 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
148 int subpq_size, subpq_rindex, subpq_windex;
149 SDL_mutex *subpq_mutex;
150 SDL_cond *subpq_cond;
152 double frame_timer;
153 double frame_last_pts;
154 double frame_last_delay;
155 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
156 int video_stream;
157 AVStream *video_st;
158 PacketQueue videoq;
159 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
160 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
161 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
162 int pictq_size, pictq_rindex, pictq_windex;
163 SDL_mutex *pictq_mutex;
164 SDL_cond *pictq_cond;
166 // QETimer *video_timer;
167 char filename[1024];
168 int width, height, xleft, ytop;
169 } VideoState;
171 static void show_help(void);
172 static int audio_write_get_buf_size(VideoState *is);
174 /* options specified by the user */
175 static AVInputFormat *file_iformat;
176 static const char *input_filename;
177 static int fs_screen_width;
178 static int fs_screen_height;
179 static int screen_width = 0;
180 static int screen_height = 0;
181 static int frame_width = 0;
182 static int frame_height = 0;
183 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
184 static int audio_disable;
185 static int video_disable;
186 static int wanted_audio_stream= 0;
187 static int wanted_video_stream= 0;
188 static int seek_by_bytes;
189 static int display_disable;
190 static int show_status;
191 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
192 static int64_t start_time = AV_NOPTS_VALUE;
193 static int debug = 0;
194 static int debug_mv = 0;
195 static int step = 0;
196 static int thread_count = 1;
197 static int workaround_bugs = 1;
198 static int fast = 0;
199 static int genpts = 0;
200 static int lowres = 0;
201 static int idct = FF_IDCT_AUTO;
202 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
203 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
204 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
205 static int error_resilience = FF_ER_CAREFUL;
206 static int error_concealment = 3;
207 static int decoder_reorder_pts= 0;
209 /* current context */
210 static int is_full_screen;
211 static VideoState *cur_stream;
212 static int64_t audio_callback_time;
214 AVPacket flush_pkt;
216 #define FF_ALLOC_EVENT (SDL_USEREVENT)
217 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
218 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
220 SDL_Surface *screen;
222 /* packet queue handling */
223 static void packet_queue_init(PacketQueue *q)
225 memset(q, 0, sizeof(PacketQueue));
226 q->mutex = SDL_CreateMutex();
227 q->cond = SDL_CreateCond();
230 static void packet_queue_flush(PacketQueue *q)
232 AVPacketList *pkt, *pkt1;
234 SDL_LockMutex(q->mutex);
235 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
236 pkt1 = pkt->next;
237 av_free_packet(&pkt->pkt);
238 av_freep(&pkt);
240 q->last_pkt = NULL;
241 q->first_pkt = NULL;
242 q->nb_packets = 0;
243 q->size = 0;
244 SDL_UnlockMutex(q->mutex);
247 static void packet_queue_end(PacketQueue *q)
249 packet_queue_flush(q);
250 SDL_DestroyMutex(q->mutex);
251 SDL_DestroyCond(q->cond);
254 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
256 AVPacketList *pkt1;
258 /* duplicate the packet */
259 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
260 return -1;
262 pkt1 = av_malloc(sizeof(AVPacketList));
263 if (!pkt1)
264 return -1;
265 pkt1->pkt = *pkt;
266 pkt1->next = NULL;
269 SDL_LockMutex(q->mutex);
271 if (!q->last_pkt)
273 q->first_pkt = pkt1;
274 else
275 q->last_pkt->next = pkt1;
276 q->last_pkt = pkt1;
277 q->nb_packets++;
278 q->size += pkt1->pkt.size;
279 /* XXX: should duplicate packet data in DV case */
280 SDL_CondSignal(q->cond);
282 SDL_UnlockMutex(q->mutex);
283 return 0;
286 static void packet_queue_abort(PacketQueue *q)
288 SDL_LockMutex(q->mutex);
290 q->abort_request = 1;
292 SDL_CondSignal(q->cond);
294 SDL_UnlockMutex(q->mutex);
297 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
298 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
300 AVPacketList *pkt1;
301 int ret;
303 SDL_LockMutex(q->mutex);
305 for(;;) {
306 if (q->abort_request) {
307 ret = -1;
308 break;
311 pkt1 = q->first_pkt;
312 if (pkt1) {
313 q->first_pkt = pkt1->next;
314 if (!q->first_pkt)
315 q->last_pkt = NULL;
316 q->nb_packets--;
317 q->size -= pkt1->pkt.size;
318 *pkt = pkt1->pkt;
319 av_free(pkt1);
320 ret = 1;
321 break;
322 } else if (!block) {
323 ret = 0;
324 break;
325 } else {
326 SDL_CondWait(q->cond, q->mutex);
329 SDL_UnlockMutex(q->mutex);
330 return ret;
333 static inline void fill_rectangle(SDL_Surface *screen,
334 int x, int y, int w, int h, int color)
336 SDL_Rect rect;
337 rect.x = x;
338 rect.y = y;
339 rect.w = w;
340 rect.h = h;
341 SDL_FillRect(screen, &rect, color);
344 #if 0
345 /* draw only the border of a rectangle */
346 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
348 int w1, w2, h1, h2;
350 /* fill the background */
351 w1 = x;
352 if (w1 < 0)
353 w1 = 0;
354 w2 = s->width - (x + w);
355 if (w2 < 0)
356 w2 = 0;
357 h1 = y;
358 if (h1 < 0)
359 h1 = 0;
360 h2 = s->height - (y + h);
361 if (h2 < 0)
362 h2 = 0;
363 fill_rectangle(screen,
364 s->xleft, s->ytop,
365 w1, s->height,
366 color);
367 fill_rectangle(screen,
368 s->xleft + s->width - w2, s->ytop,
369 w2, s->height,
370 color);
371 fill_rectangle(screen,
372 s->xleft + w1, s->ytop,
373 s->width - w1 - w2, h1,
374 color);
375 fill_rectangle(screen,
376 s->xleft + w1, s->ytop + s->height - h2,
377 s->width - w1 - w2, h2,
378 color);
380 #endif
384 #define SCALEBITS 10
385 #define ONE_HALF (1 << (SCALEBITS - 1))
386 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
388 #define RGB_TO_Y_CCIR(r, g, b) \
389 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
390 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
392 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
393 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
394 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
396 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
397 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
398 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
400 #define ALPHA_BLEND(a, oldp, newp, s)\
401 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
403 #define RGBA_IN(r, g, b, a, s)\
405 unsigned int v = ((const uint32_t *)(s))[0];\
406 a = (v >> 24) & 0xff;\
407 r = (v >> 16) & 0xff;\
408 g = (v >> 8) & 0xff;\
409 b = v & 0xff;\
412 #define YUVA_IN(y, u, v, a, s, pal)\
414 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
415 a = (val >> 24) & 0xff;\
416 y = (val >> 16) & 0xff;\
417 u = (val >> 8) & 0xff;\
418 v = val & 0xff;\
421 #define YUVA_OUT(d, y, u, v, a)\
423 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
427 #define BPP 1
429 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
431 int wrap, wrap3, width2, skip2;
432 int y, u, v, a, u1, v1, a1, w, h;
433 uint8_t *lum, *cb, *cr;
434 const uint8_t *p;
435 const uint32_t *pal;
436 int dstx, dsty, dstw, dsth;
438 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
439 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
440 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
441 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
442 lum = dst->data[0] + dsty * dst->linesize[0];
443 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
444 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
446 width2 = (dstw + 1) >> 1;
447 skip2 = dstx >> 1;
448 wrap = dst->linesize[0];
449 wrap3 = rect->linesize;
450 p = rect->bitmap;
451 pal = rect->rgba_palette; /* Now in YCrCb! */
453 if (dsty & 1) {
454 lum += dstx;
455 cb += skip2;
456 cr += skip2;
458 if (dstx & 1) {
459 YUVA_IN(y, u, v, a, p, pal);
460 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
461 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
462 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
463 cb++;
464 cr++;
465 lum++;
466 p += BPP;
468 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
469 YUVA_IN(y, u, v, a, p, pal);
470 u1 = u;
471 v1 = v;
472 a1 = a;
473 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
475 YUVA_IN(y, u, v, a, p + BPP, pal);
476 u1 += u;
477 v1 += v;
478 a1 += a;
479 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
480 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
481 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
482 cb++;
483 cr++;
484 p += 2 * BPP;
485 lum += 2;
487 if (w) {
488 YUVA_IN(y, u, v, a, p, pal);
489 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
490 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
491 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
493 p += wrap3 + (wrap3 - dstw * BPP);
494 lum += wrap + (wrap - dstw - dstx);
495 cb += dst->linesize[1] - width2 - skip2;
496 cr += dst->linesize[2] - width2 - skip2;
498 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
499 lum += dstx;
500 cb += skip2;
501 cr += skip2;
503 if (dstx & 1) {
504 YUVA_IN(y, u, v, a, p, pal);
505 u1 = u;
506 v1 = v;
507 a1 = a;
508 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
509 p += wrap3;
510 lum += wrap;
511 YUVA_IN(y, u, v, a, p, pal);
512 u1 += u;
513 v1 += v;
514 a1 += a;
515 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
516 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
517 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
518 cb++;
519 cr++;
520 p += -wrap3 + BPP;
521 lum += -wrap + 1;
523 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
524 YUVA_IN(y, u, v, a, p, pal);
525 u1 = u;
526 v1 = v;
527 a1 = a;
528 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
530 YUVA_IN(y, u, v, a, p, pal);
531 u1 += u;
532 v1 += v;
533 a1 += a;
534 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
535 p += wrap3;
536 lum += wrap;
538 YUVA_IN(y, u, v, a, p, pal);
539 u1 += u;
540 v1 += v;
541 a1 += a;
542 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
544 YUVA_IN(y, u, v, a, p, pal);
545 u1 += u;
546 v1 += v;
547 a1 += a;
548 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
550 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
551 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
553 cb++;
554 cr++;
555 p += -wrap3 + 2 * BPP;
556 lum += -wrap + 2;
558 if (w) {
559 YUVA_IN(y, u, v, a, p, pal);
560 u1 = u;
561 v1 = v;
562 a1 = a;
563 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
564 p += wrap3;
565 lum += wrap;
566 YUVA_IN(y, u, v, a, p, pal);
567 u1 += u;
568 v1 += v;
569 a1 += a;
570 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
571 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
572 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
573 cb++;
574 cr++;
575 p += -wrap3 + BPP;
576 lum += -wrap + 1;
578 p += wrap3 + (wrap3 - dstw * BPP);
579 lum += wrap + (wrap - dstw - dstx);
580 cb += dst->linesize[1] - width2 - skip2;
581 cr += dst->linesize[2] - width2 - skip2;
583 /* handle odd height */
584 if (h) {
585 lum += dstx;
586 cb += skip2;
587 cr += skip2;
589 if (dstx & 1) {
590 YUVA_IN(y, u, v, a, p, pal);
591 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
592 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
593 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
594 cb++;
595 cr++;
596 lum++;
597 p += BPP;
599 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
600 YUVA_IN(y, u, v, a, p, pal);
601 u1 = u;
602 v1 = v;
603 a1 = a;
604 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
606 YUVA_IN(y, u, v, a, p + BPP, pal);
607 u1 += u;
608 v1 += v;
609 a1 += a;
610 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
611 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
612 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
613 cb++;
614 cr++;
615 p += 2 * BPP;
616 lum += 2;
618 if (w) {
619 YUVA_IN(y, u, v, a, p, pal);
620 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
621 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
622 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
627 static void free_subpicture(SubPicture *sp)
629 int i;
631 for (i = 0; i < sp->sub.num_rects; i++)
633 av_free(sp->sub.rects[i].bitmap);
634 av_free(sp->sub.rects[i].rgba_palette);
637 av_free(sp->sub.rects);
639 memset(&sp->sub, 0, sizeof(AVSubtitle));
642 static void video_image_display(VideoState *is)
644 VideoPicture *vp;
645 SubPicture *sp;
646 AVPicture pict;
647 float aspect_ratio;
648 int width, height, x, y;
649 SDL_Rect rect;
650 int i;
652 vp = &is->pictq[is->pictq_rindex];
653 if (vp->bmp) {
654 /* XXX: use variable in the frame */
655 if (is->video_st->codec->sample_aspect_ratio.num == 0)
656 aspect_ratio = 0;
657 else
658 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
659 * is->video_st->codec->width / is->video_st->codec->height;
660 if (aspect_ratio <= 0.0)
661 aspect_ratio = (float)is->video_st->codec->width /
662 (float)is->video_st->codec->height;
663 /* if an active format is indicated, then it overrides the
664 mpeg format */
665 #if 0
666 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
667 is->dtg_active_format = is->video_st->codec->dtg_active_format;
668 printf("dtg_active_format=%d\n", is->dtg_active_format);
670 #endif
671 #if 0
672 switch(is->video_st->codec->dtg_active_format) {
673 case FF_DTG_AFD_SAME:
674 default:
675 /* nothing to do */
676 break;
677 case FF_DTG_AFD_4_3:
678 aspect_ratio = 4.0 / 3.0;
679 break;
680 case FF_DTG_AFD_16_9:
681 aspect_ratio = 16.0 / 9.0;
682 break;
683 case FF_DTG_AFD_14_9:
684 aspect_ratio = 14.0 / 9.0;
685 break;
686 case FF_DTG_AFD_4_3_SP_14_9:
687 aspect_ratio = 14.0 / 9.0;
688 break;
689 case FF_DTG_AFD_16_9_SP_14_9:
690 aspect_ratio = 14.0 / 9.0;
691 break;
692 case FF_DTG_AFD_SP_4_3:
693 aspect_ratio = 4.0 / 3.0;
694 break;
696 #endif
698 if (is->subtitle_st)
700 if (is->subpq_size > 0)
702 sp = &is->subpq[is->subpq_rindex];
704 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
706 SDL_LockYUVOverlay (vp->bmp);
708 pict.data[0] = vp->bmp->pixels[0];
709 pict.data[1] = vp->bmp->pixels[2];
710 pict.data[2] = vp->bmp->pixels[1];
712 pict.linesize[0] = vp->bmp->pitches[0];
713 pict.linesize[1] = vp->bmp->pitches[2];
714 pict.linesize[2] = vp->bmp->pitches[1];
716 for (i = 0; i < sp->sub.num_rects; i++)
717 blend_subrect(&pict, &sp->sub.rects[i],
718 vp->bmp->w, vp->bmp->h);
720 SDL_UnlockYUVOverlay (vp->bmp);
726 /* XXX: we suppose the screen has a 1.0 pixel ratio */
727 height = is->height;
728 width = ((int)rint(height * aspect_ratio)) & -3;
729 if (width > is->width) {
730 width = is->width;
731 height = ((int)rint(width / aspect_ratio)) & -3;
733 x = (is->width - width) / 2;
734 y = (is->height - height) / 2;
735 if (!is->no_background) {
736 /* fill the background */
737 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
738 } else {
739 is->no_background = 0;
741 rect.x = is->xleft + x;
742 rect.y = is->ytop + y;
743 rect.w = width;
744 rect.h = height;
745 SDL_DisplayYUVOverlay(vp->bmp, &rect);
746 } else {
747 #if 0
748 fill_rectangle(screen,
749 is->xleft, is->ytop, is->width, is->height,
750 QERGB(0x00, 0x00, 0x00));
751 #endif
755 static inline int compute_mod(int a, int b)
757 a = a % b;
758 if (a >= 0)
759 return a;
760 else
761 return a + b;
764 static void video_audio_display(VideoState *s)
766 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
767 int ch, channels, h, h2, bgcolor, fgcolor;
768 int16_t time_diff;
770 /* compute display index : center on currently output samples */
771 channels = s->audio_st->codec->channels;
772 nb_display_channels = channels;
773 if (!s->paused) {
774 n = 2 * channels;
775 delay = audio_write_get_buf_size(s);
776 delay /= n;
778 /* to be more precise, we take into account the time spent since
779 the last buffer computation */
780 if (audio_callback_time) {
781 time_diff = av_gettime() - audio_callback_time;
782 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
785 delay -= s->width / 2;
786 if (delay < s->width)
787 delay = s->width;
789 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
791 h= INT_MIN;
792 for(i=0; i<1000; i+=channels){
793 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
794 int a= s->sample_array[idx];
795 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
796 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
797 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
798 int score= a-d;
799 if(h<score && (b^c)<0){
800 h= score;
801 i_start= idx;
805 s->last_i_start = i_start;
806 } else {
807 i_start = s->last_i_start;
810 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
811 fill_rectangle(screen,
812 s->xleft, s->ytop, s->width, s->height,
813 bgcolor);
815 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
817 /* total height for one channel */
818 h = s->height / nb_display_channels;
819 /* graph height / 2 */
820 h2 = (h * 9) / 20;
821 for(ch = 0;ch < nb_display_channels; ch++) {
822 i = i_start + ch;
823 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
824 for(x = 0; x < s->width; x++) {
825 y = (s->sample_array[i] * h2) >> 15;
826 if (y < 0) {
827 y = -y;
828 ys = y1 - y;
829 } else {
830 ys = y1;
832 fill_rectangle(screen,
833 s->xleft + x, ys, 1, y,
834 fgcolor);
835 i += channels;
836 if (i >= SAMPLE_ARRAY_SIZE)
837 i -= SAMPLE_ARRAY_SIZE;
841 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
843 for(ch = 1;ch < nb_display_channels; ch++) {
844 y = s->ytop + ch * h;
845 fill_rectangle(screen,
846 s->xleft, y, s->width, 1,
847 fgcolor);
849 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
852 static int video_open(VideoState *is){
853 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
854 int w,h;
856 if(is_full_screen) flags |= SDL_FULLSCREEN;
857 else flags |= SDL_RESIZABLE;
859 if (is_full_screen && fs_screen_width) {
860 w = fs_screen_width;
861 h = fs_screen_height;
862 } else if(!is_full_screen && screen_width){
863 w = screen_width;
864 h = screen_height;
865 }else if (is->video_st && is->video_st->codec->width){
866 w = is->video_st->codec->width;
867 h = is->video_st->codec->height;
868 } else {
869 w = 640;
870 h = 480;
872 #ifndef __APPLE__
873 screen = SDL_SetVideoMode(w, h, 0, flags);
874 #else
875 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
876 screen = SDL_SetVideoMode(w, h, 24, flags);
877 #endif
878 if (!screen) {
879 fprintf(stderr, "SDL: could not set video mode - exiting\n");
880 return -1;
882 SDL_WM_SetCaption("FFplay", "FFplay");
884 is->width = screen->w;
885 is->height = screen->h;
887 return 0;
890 /* display the current picture, if any */
891 static void video_display(VideoState *is)
893 if(!screen)
894 video_open(cur_stream);
895 if (is->audio_st && is->show_audio)
896 video_audio_display(is);
897 else if (is->video_st)
898 video_image_display(is);
901 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
903 SDL_Event event;
904 event.type = FF_REFRESH_EVENT;
905 event.user.data1 = opaque;
906 SDL_PushEvent(&event);
907 return 0; /* 0 means stop timer */
910 /* schedule a video refresh in 'delay' ms */
911 static void schedule_refresh(VideoState *is, int delay)
913 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
916 /* get the current audio clock value */
917 static double get_audio_clock(VideoState *is)
919 double pts;
920 int hw_buf_size, bytes_per_sec;
921 pts = is->audio_clock;
922 hw_buf_size = audio_write_get_buf_size(is);
923 bytes_per_sec = 0;
924 if (is->audio_st) {
925 bytes_per_sec = is->audio_st->codec->sample_rate *
926 2 * is->audio_st->codec->channels;
928 if (bytes_per_sec)
929 pts -= (double)hw_buf_size / bytes_per_sec;
930 return pts;
933 /* get the current video clock value */
934 static double get_video_clock(VideoState *is)
936 double delta;
937 if (is->paused) {
938 delta = 0;
939 } else {
940 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
942 return is->video_current_pts + delta;
945 /* get the current external clock value */
946 static double get_external_clock(VideoState *is)
948 int64_t ti;
949 ti = av_gettime();
950 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
953 /* get the current master clock value */
954 static double get_master_clock(VideoState *is)
956 double val;
958 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
959 if (is->video_st)
960 val = get_video_clock(is);
961 else
962 val = get_audio_clock(is);
963 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
964 if (is->audio_st)
965 val = get_audio_clock(is);
966 else
967 val = get_video_clock(is);
968 } else {
969 val = get_external_clock(is);
971 return val;
974 /* seek in the stream */
975 static void stream_seek(VideoState *is, int64_t pos, int rel)
977 if (!is->seek_req) {
978 is->seek_pos = pos;
979 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
980 if (seek_by_bytes)
981 is->seek_flags |= AVSEEK_FLAG_BYTE;
982 is->seek_req = 1;
986 /* pause or resume the video */
987 static void stream_pause(VideoState *is)
989 is->paused = !is->paused;
990 if (!is->paused) {
991 is->video_current_pts = get_video_clock(is);
992 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
996 /* called to display each frame */
997 static void video_refresh_timer(void *opaque)
999 VideoState *is = opaque;
1000 VideoPicture *vp;
1001 double actual_delay, delay, sync_threshold, ref_clock, diff;
1003 SubPicture *sp, *sp2;
1005 if (is->video_st) {
1006 if (is->pictq_size == 0) {
1007 /* if no picture, need to wait */
1008 schedule_refresh(is, 1);
1009 } else {
1010 /* dequeue the picture */
1011 vp = &is->pictq[is->pictq_rindex];
1013 /* update current video pts */
1014 is->video_current_pts = vp->pts;
1015 is->video_current_pts_time = av_gettime();
1017 /* compute nominal delay */
1018 delay = vp->pts - is->frame_last_pts;
1019 if (delay <= 0 || delay >= 2.0) {
1020 /* if incorrect delay, use previous one */
1021 delay = is->frame_last_delay;
1023 is->frame_last_delay = delay;
1024 is->frame_last_pts = vp->pts;
1026 /* update delay to follow master synchronisation source */
1027 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1028 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1029 /* if video is slave, we try to correct big delays by
1030 duplicating or deleting a frame */
1031 ref_clock = get_master_clock(is);
1032 diff = vp->pts - ref_clock;
1034 /* skip or repeat frame. We take into account the
1035 delay to compute the threshold. I still don't know
1036 if it is the best guess */
1037 sync_threshold = AV_SYNC_THRESHOLD;
1038 if (delay > sync_threshold)
1039 sync_threshold = delay;
1040 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1041 if (diff <= -sync_threshold)
1042 delay = 0;
1043 else if (diff >= sync_threshold)
1044 delay = 2 * delay;
1048 is->frame_timer += delay;
1049 /* compute the REAL delay (we need to do that to avoid
1050 long term errors */
1051 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1052 if (actual_delay < 0.010) {
1053 /* XXX: should skip picture */
1054 actual_delay = 0.010;
1056 /* launch timer for next picture */
1057 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1059 #if defined(DEBUG_SYNC)
1060 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1061 delay, actual_delay, vp->pts, -diff);
1062 #endif
1064 if(is->subtitle_st) {
1065 if (is->subtitle_stream_changed) {
1066 SDL_LockMutex(is->subpq_mutex);
1068 while (is->subpq_size) {
1069 free_subpicture(&is->subpq[is->subpq_rindex]);
1071 /* update queue size and signal for next picture */
1072 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1073 is->subpq_rindex = 0;
1075 is->subpq_size--;
1077 is->subtitle_stream_changed = 0;
1079 SDL_CondSignal(is->subpq_cond);
1080 SDL_UnlockMutex(is->subpq_mutex);
1081 } else {
1082 if (is->subpq_size > 0) {
1083 sp = &is->subpq[is->subpq_rindex];
1085 if (is->subpq_size > 1)
1086 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1087 else
1088 sp2 = NULL;
1090 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1091 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1093 free_subpicture(sp);
1095 /* update queue size and signal for next picture */
1096 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1097 is->subpq_rindex = 0;
1099 SDL_LockMutex(is->subpq_mutex);
1100 is->subpq_size--;
1101 SDL_CondSignal(is->subpq_cond);
1102 SDL_UnlockMutex(is->subpq_mutex);
1108 /* display picture */
1109 video_display(is);
1111 /* update queue size and signal for next picture */
1112 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1113 is->pictq_rindex = 0;
1115 SDL_LockMutex(is->pictq_mutex);
1116 is->pictq_size--;
1117 SDL_CondSignal(is->pictq_cond);
1118 SDL_UnlockMutex(is->pictq_mutex);
1120 } else if (is->audio_st) {
1121 /* draw the next audio frame */
1123 schedule_refresh(is, 40);
1125 /* if only audio stream, then display the audio bars (better
1126 than nothing, just to test the implementation */
1128 /* display picture */
1129 video_display(is);
1130 } else {
1131 schedule_refresh(is, 100);
1133 if (show_status) {
1134 static int64_t last_time;
1135 int64_t cur_time;
1136 int aqsize, vqsize, sqsize;
1137 double av_diff;
1139 cur_time = av_gettime();
1140 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1141 aqsize = 0;
1142 vqsize = 0;
1143 sqsize = 0;
1144 if (is->audio_st)
1145 aqsize = is->audioq.size;
1146 if (is->video_st)
1147 vqsize = is->videoq.size;
1148 if (is->subtitle_st)
1149 sqsize = is->subtitleq.size;
1150 av_diff = 0;
1151 if (is->audio_st && is->video_st)
1152 av_diff = get_audio_clock(is) - get_video_clock(is);
1153 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1154 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1155 fflush(stdout);
1156 last_time = cur_time;
1161 /* allocate a picture (needs to do that in main thread to avoid
1162 potential locking problems */
1163 static void alloc_picture(void *opaque)
1165 VideoState *is = opaque;
1166 VideoPicture *vp;
1168 vp = &is->pictq[is->pictq_windex];
1170 if (vp->bmp)
1171 SDL_FreeYUVOverlay(vp->bmp);
1173 #if 0
1174 /* XXX: use generic function */
1175 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1176 switch(is->video_st->codec->pix_fmt) {
1177 case PIX_FMT_YUV420P:
1178 case PIX_FMT_YUV422P:
1179 case PIX_FMT_YUV444P:
1180 case PIX_FMT_YUYV422:
1181 case PIX_FMT_YUV410P:
1182 case PIX_FMT_YUV411P:
1183 is_yuv = 1;
1184 break;
1185 default:
1186 is_yuv = 0;
1187 break;
1189 #endif
1190 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1191 is->video_st->codec->height,
1192 SDL_YV12_OVERLAY,
1193 screen);
1194 vp->width = is->video_st->codec->width;
1195 vp->height = is->video_st->codec->height;
1197 SDL_LockMutex(is->pictq_mutex);
1198 vp->allocated = 1;
1199 SDL_CondSignal(is->pictq_cond);
1200 SDL_UnlockMutex(is->pictq_mutex);
1205 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1207 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1209 VideoPicture *vp;
1210 int dst_pix_fmt;
1211 AVPicture pict;
1212 static struct SwsContext *img_convert_ctx;
1214 /* wait until we have space to put a new picture */
1215 SDL_LockMutex(is->pictq_mutex);
1216 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1217 !is->videoq.abort_request) {
1218 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1220 SDL_UnlockMutex(is->pictq_mutex);
1222 if (is->videoq.abort_request)
1223 return -1;
1225 vp = &is->pictq[is->pictq_windex];
1227 /* alloc or resize hardware picture buffer */
1228 if (!vp->bmp ||
1229 vp->width != is->video_st->codec->width ||
1230 vp->height != is->video_st->codec->height) {
1231 SDL_Event event;
1233 vp->allocated = 0;
1235 /* the allocation must be done in the main thread to avoid
1236 locking problems */
1237 event.type = FF_ALLOC_EVENT;
1238 event.user.data1 = is;
1239 SDL_PushEvent(&event);
1241 /* wait until the picture is allocated */
1242 SDL_LockMutex(is->pictq_mutex);
1243 while (!vp->allocated && !is->videoq.abort_request) {
1244 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1246 SDL_UnlockMutex(is->pictq_mutex);
1248 if (is->videoq.abort_request)
1249 return -1;
1252 /* if the frame is not skipped, then display it */
1253 if (vp->bmp) {
1254 /* get a pointer on the bitmap */
1255 SDL_LockYUVOverlay (vp->bmp);
1257 dst_pix_fmt = PIX_FMT_YUV420P;
1258 pict.data[0] = vp->bmp->pixels[0];
1259 pict.data[1] = vp->bmp->pixels[2];
1260 pict.data[2] = vp->bmp->pixels[1];
1262 pict.linesize[0] = vp->bmp->pitches[0];
1263 pict.linesize[1] = vp->bmp->pitches[2];
1264 pict.linesize[2] = vp->bmp->pitches[1];
1265 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1266 is->video_st->codec->width, is->video_st->codec->height,
1267 is->video_st->codec->pix_fmt,
1268 is->video_st->codec->width, is->video_st->codec->height,
1269 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1270 if (img_convert_ctx == NULL) {
1271 fprintf(stderr, "Cannot initialize the conversion context\n");
1272 exit(1);
1274 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1275 0, is->video_st->codec->height, pict.data, pict.linesize);
1276 /* update the bitmap content */
1277 SDL_UnlockYUVOverlay(vp->bmp);
1279 vp->pts = pts;
1281 /* now we can update the picture count */
1282 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1283 is->pictq_windex = 0;
1284 SDL_LockMutex(is->pictq_mutex);
1285 is->pictq_size++;
1286 SDL_UnlockMutex(is->pictq_mutex);
1288 return 0;
1292 * compute the exact PTS for the picture if it is omitted in the stream
1293 * @param pts1 the dts of the pkt / pts of the frame
1295 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1297 double frame_delay, pts;
1299 pts = pts1;
1301 if (pts != 0) {
1302 /* update video clock with pts, if present */
1303 is->video_clock = pts;
1304 } else {
1305 pts = is->video_clock;
1307 /* update video clock for next frame */
1308 frame_delay = av_q2d(is->video_st->codec->time_base);
1309 /* for MPEG2, the frame can be repeated, so we update the
1310 clock accordingly */
1311 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1312 is->video_clock += frame_delay;
1314 #if defined(DEBUG_SYNC) && 0
1316 int ftype;
1317 if (src_frame->pict_type == FF_B_TYPE)
1318 ftype = 'B';
1319 else if (src_frame->pict_type == FF_I_TYPE)
1320 ftype = 'I';
1321 else
1322 ftype = 'P';
1323 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1324 ftype, pts, pts1);
1326 #endif
1327 return queue_picture(is, src_frame, pts);
1330 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1332 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1333 int ret= avcodec_default_get_buffer(c, pic);
1334 uint64_t *pts= av_malloc(sizeof(uint64_t));
1335 *pts= global_video_pkt_pts;
1336 pic->opaque= pts;
1337 return ret;
1340 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1341 if(pic) av_freep(&pic->opaque);
1342 avcodec_default_release_buffer(c, pic);
1345 static int video_thread(void *arg)
1347 VideoState *is = arg;
1348 AVPacket pkt1, *pkt = &pkt1;
1349 int len1, got_picture;
1350 AVFrame *frame= avcodec_alloc_frame();
1351 double pts;
1353 for(;;) {
1354 while (is->paused && !is->videoq.abort_request) {
1355 SDL_Delay(10);
1357 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1358 break;
1360 if(pkt->data == flush_pkt.data){
1361 avcodec_flush_buffers(is->video_st->codec);
1362 continue;
1365 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1366 this packet, if any */
1367 global_video_pkt_pts= pkt->pts;
1368 len1 = avcodec_decode_video(is->video_st->codec,
1369 frame, &got_picture,
1370 pkt->data, pkt->size);
1372 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1373 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1374 pts= *(uint64_t*)frame->opaque;
1375 else if(pkt->dts != AV_NOPTS_VALUE)
1376 pts= pkt->dts;
1377 else
1378 pts= 0;
1379 pts *= av_q2d(is->video_st->time_base);
1381 // if (len1 < 0)
1382 // break;
1383 if (got_picture) {
1384 if (output_picture2(is, frame, pts) < 0)
1385 goto the_end;
1387 av_free_packet(pkt);
1388 if (step)
1389 if (cur_stream)
1390 stream_pause(cur_stream);
1392 the_end:
1393 av_free(frame);
1394 return 0;
1397 static int subtitle_thread(void *arg)
1399 VideoState *is = arg;
1400 SubPicture *sp;
1401 AVPacket pkt1, *pkt = &pkt1;
1402 int len1, got_subtitle;
1403 double pts;
1404 int i, j;
1405 int r, g, b, y, u, v, a;
1407 for(;;) {
1408 while (is->paused && !is->subtitleq.abort_request) {
1409 SDL_Delay(10);
1411 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1412 break;
1414 if(pkt->data == flush_pkt.data){
1415 avcodec_flush_buffers(is->subtitle_st->codec);
1416 continue;
1418 SDL_LockMutex(is->subpq_mutex);
1419 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1420 !is->subtitleq.abort_request) {
1421 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1423 SDL_UnlockMutex(is->subpq_mutex);
1425 if (is->subtitleq.abort_request)
1426 goto the_end;
1428 sp = &is->subpq[is->subpq_windex];
1430 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1431 this packet, if any */
1432 pts = 0;
1433 if (pkt->pts != AV_NOPTS_VALUE)
1434 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1436 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1437 &sp->sub, &got_subtitle,
1438 pkt->data, pkt->size);
1439 // if (len1 < 0)
1440 // break;
1441 if (got_subtitle && sp->sub.format == 0) {
1442 sp->pts = pts;
1444 for (i = 0; i < sp->sub.num_rects; i++)
1446 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1448 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1449 y = RGB_TO_Y_CCIR(r, g, b);
1450 u = RGB_TO_U_CCIR(r, g, b, 0);
1451 v = RGB_TO_V_CCIR(r, g, b, 0);
1452 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1456 /* now we can update the picture count */
1457 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1458 is->subpq_windex = 0;
1459 SDL_LockMutex(is->subpq_mutex);
1460 is->subpq_size++;
1461 SDL_UnlockMutex(is->subpq_mutex);
1463 av_free_packet(pkt);
1464 // if (step)
1465 // if (cur_stream)
1466 // stream_pause(cur_stream);
1468 the_end:
1469 return 0;
1472 /* copy samples for viewing in editor window */
1473 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1475 int size, len, channels;
1477 channels = is->audio_st->codec->channels;
1479 size = samples_size / sizeof(short);
1480 while (size > 0) {
1481 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1482 if (len > size)
1483 len = size;
1484 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1485 samples += len;
1486 is->sample_array_index += len;
1487 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1488 is->sample_array_index = 0;
1489 size -= len;
1493 /* return the new audio buffer size (samples can be added or deleted
1494 to get better sync if video or external master clock) */
1495 static int synchronize_audio(VideoState *is, short *samples,
1496 int samples_size1, double pts)
1498 int n, samples_size;
1499 double ref_clock;
1501 n = 2 * is->audio_st->codec->channels;
1502 samples_size = samples_size1;
1504 /* if not master, then we try to remove or add samples to correct the clock */
1505 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1506 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1507 double diff, avg_diff;
1508 int wanted_size, min_size, max_size, nb_samples;
1510 ref_clock = get_master_clock(is);
1511 diff = get_audio_clock(is) - ref_clock;
1513 if (diff < AV_NOSYNC_THRESHOLD) {
1514 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1515 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1516 /* not enough measures to have a correct estimate */
1517 is->audio_diff_avg_count++;
1518 } else {
1519 /* estimate the A-V difference */
1520 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1522 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1523 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1524 nb_samples = samples_size / n;
1526 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1527 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1528 if (wanted_size < min_size)
1529 wanted_size = min_size;
1530 else if (wanted_size > max_size)
1531 wanted_size = max_size;
1533 /* add or remove samples to correction the synchro */
1534 if (wanted_size < samples_size) {
1535 /* remove samples */
1536 samples_size = wanted_size;
1537 } else if (wanted_size > samples_size) {
1538 uint8_t *samples_end, *q;
1539 int nb;
1541 /* add samples */
1542 nb = (samples_size - wanted_size);
1543 samples_end = (uint8_t *)samples + samples_size - n;
1544 q = samples_end + n;
1545 while (nb > 0) {
1546 memcpy(q, samples_end, n);
1547 q += n;
1548 nb -= n;
1550 samples_size = wanted_size;
1553 #if 0
1554 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1555 diff, avg_diff, samples_size - samples_size1,
1556 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1557 #endif
1559 } else {
1560 /* too big difference : may be initial PTS errors, so
1561 reset A-V filter */
1562 is->audio_diff_avg_count = 0;
1563 is->audio_diff_cum = 0;
1567 return samples_size;
1570 /* decode one audio frame and returns its uncompressed size */
1571 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1573 AVPacket *pkt = &is->audio_pkt;
1574 int n, len1, data_size;
1575 double pts;
1577 for(;;) {
1578 /* NOTE: the audio packet can contain several frames */
1579 while (is->audio_pkt_size > 0) {
1580 data_size = buf_size;
1581 len1 = avcodec_decode_audio2(is->audio_st->codec,
1582 (int16_t *)audio_buf, &data_size,
1583 is->audio_pkt_data, is->audio_pkt_size);
1584 if (len1 < 0) {
1585 /* if error, we skip the frame */
1586 is->audio_pkt_size = 0;
1587 break;
1590 is->audio_pkt_data += len1;
1591 is->audio_pkt_size -= len1;
1592 if (data_size <= 0)
1593 continue;
1594 /* if no pts, then compute it */
1595 pts = is->audio_clock;
1596 *pts_ptr = pts;
1597 n = 2 * is->audio_st->codec->channels;
1598 is->audio_clock += (double)data_size /
1599 (double)(n * is->audio_st->codec->sample_rate);
1600 #if defined(DEBUG_SYNC)
1602 static double last_clock;
1603 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1604 is->audio_clock - last_clock,
1605 is->audio_clock, pts);
1606 last_clock = is->audio_clock;
1608 #endif
1609 return data_size;
1612 /* free the current packet */
1613 if (pkt->data)
1614 av_free_packet(pkt);
1616 if (is->paused || is->audioq.abort_request) {
1617 return -1;
1620 /* read next packet */
1621 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1622 return -1;
1623 if(pkt->data == flush_pkt.data){
1624 avcodec_flush_buffers(is->audio_st->codec);
1625 continue;
1628 is->audio_pkt_data = pkt->data;
1629 is->audio_pkt_size = pkt->size;
1631 /* if update the audio clock with the pts */
1632 if (pkt->pts != AV_NOPTS_VALUE) {
1633 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1638 /* get the current audio output buffer size, in samples. With SDL, we
1639 cannot have a precise information */
1640 static int audio_write_get_buf_size(VideoState *is)
1642 return is->audio_buf_size - is->audio_buf_index;
1646 /* prepare a new audio buffer */
1647 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1649 VideoState *is = opaque;
1650 int audio_size, len1;
1651 double pts;
1653 audio_callback_time = av_gettime();
1655 while (len > 0) {
1656 if (is->audio_buf_index >= is->audio_buf_size) {
1657 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1658 if (audio_size < 0) {
1659 /* if error, just output silence */
1660 is->audio_buf_size = 1024;
1661 memset(is->audio_buf, 0, is->audio_buf_size);
1662 } else {
1663 if (is->show_audio)
1664 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1665 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1666 pts);
1667 is->audio_buf_size = audio_size;
1669 is->audio_buf_index = 0;
1671 len1 = is->audio_buf_size - is->audio_buf_index;
1672 if (len1 > len)
1673 len1 = len;
1674 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1675 len -= len1;
1676 stream += len1;
1677 is->audio_buf_index += len1;
1681 /* open a given stream. Return 0 if OK */
1682 static int stream_component_open(VideoState *is, int stream_index)
1684 AVFormatContext *ic = is->ic;
1685 AVCodecContext *enc;
1686 AVCodec *codec;
1687 SDL_AudioSpec wanted_spec, spec;
1689 if (stream_index < 0 || stream_index >= ic->nb_streams)
1690 return -1;
1691 enc = ic->streams[stream_index]->codec;
1693 /* prepare audio output */
1694 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1695 if (enc->channels > 0) {
1696 enc->request_channels = FFMIN(2, enc->channels);
1697 } else {
1698 enc->request_channels = 2;
1702 codec = avcodec_find_decoder(enc->codec_id);
1703 enc->debug_mv = debug_mv;
1704 enc->debug = debug;
1705 enc->workaround_bugs = workaround_bugs;
1706 enc->lowres = lowres;
1707 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1708 enc->idct_algo= idct;
1709 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1710 enc->skip_frame= skip_frame;
1711 enc->skip_idct= skip_idct;
1712 enc->skip_loop_filter= skip_loop_filter;
1713 enc->error_resilience= error_resilience;
1714 enc->error_concealment= error_concealment;
1715 if (!codec ||
1716 avcodec_open(enc, codec) < 0)
1717 return -1;
1719 /* prepare audio output */
1720 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1721 wanted_spec.freq = enc->sample_rate;
1722 wanted_spec.format = AUDIO_S16SYS;
1723 wanted_spec.channels = enc->channels;
1724 wanted_spec.silence = 0;
1725 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1726 wanted_spec.callback = sdl_audio_callback;
1727 wanted_spec.userdata = is;
1728 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1729 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1730 return -1;
1732 is->audio_hw_buf_size = spec.size;
1735 if(thread_count>1)
1736 avcodec_thread_init(enc, thread_count);
1737 enc->thread_count= thread_count;
1738 switch(enc->codec_type) {
1739 case CODEC_TYPE_AUDIO:
1740 is->audio_stream = stream_index;
1741 is->audio_st = ic->streams[stream_index];
1742 is->audio_buf_size = 0;
1743 is->audio_buf_index = 0;
1745 /* init averaging filter */
1746 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1747 is->audio_diff_avg_count = 0;
1748 /* since we do not have a precise anough audio fifo fullness,
1749 we correct audio sync only if larger than this threshold */
1750 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1752 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1753 packet_queue_init(&is->audioq);
1754 SDL_PauseAudio(0);
1755 break;
1756 case CODEC_TYPE_VIDEO:
1757 is->video_stream = stream_index;
1758 is->video_st = ic->streams[stream_index];
1760 is->frame_last_delay = 40e-3;
1761 is->frame_timer = (double)av_gettime() / 1000000.0;
1762 is->video_current_pts_time = av_gettime();
1764 packet_queue_init(&is->videoq);
1765 is->video_tid = SDL_CreateThread(video_thread, is);
1767 enc-> get_buffer= my_get_buffer;
1768 enc->release_buffer= my_release_buffer;
1769 break;
1770 case CODEC_TYPE_SUBTITLE:
1771 is->subtitle_stream = stream_index;
1772 is->subtitle_st = ic->streams[stream_index];
1773 packet_queue_init(&is->subtitleq);
1775 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1776 break;
1777 default:
1778 break;
1780 return 0;
1783 static void stream_component_close(VideoState *is, int stream_index)
1785 AVFormatContext *ic = is->ic;
1786 AVCodecContext *enc;
1788 if (stream_index < 0 || stream_index >= ic->nb_streams)
1789 return;
1790 enc = ic->streams[stream_index]->codec;
1792 switch(enc->codec_type) {
1793 case CODEC_TYPE_AUDIO:
1794 packet_queue_abort(&is->audioq);
1796 SDL_CloseAudio();
1798 packet_queue_end(&is->audioq);
1799 break;
1800 case CODEC_TYPE_VIDEO:
1801 packet_queue_abort(&is->videoq);
1803 /* note: we also signal this mutex to make sure we deblock the
1804 video thread in all cases */
1805 SDL_LockMutex(is->pictq_mutex);
1806 SDL_CondSignal(is->pictq_cond);
1807 SDL_UnlockMutex(is->pictq_mutex);
1809 SDL_WaitThread(is->video_tid, NULL);
1811 packet_queue_end(&is->videoq);
1812 break;
1813 case CODEC_TYPE_SUBTITLE:
1814 packet_queue_abort(&is->subtitleq);
1816 /* note: we also signal this mutex to make sure we deblock the
1817 video thread in all cases */
1818 SDL_LockMutex(is->subpq_mutex);
1819 is->subtitle_stream_changed = 1;
1821 SDL_CondSignal(is->subpq_cond);
1822 SDL_UnlockMutex(is->subpq_mutex);
1824 SDL_WaitThread(is->subtitle_tid, NULL);
1826 packet_queue_end(&is->subtitleq);
1827 break;
1828 default:
1829 break;
1832 avcodec_close(enc);
1833 switch(enc->codec_type) {
1834 case CODEC_TYPE_AUDIO:
1835 is->audio_st = NULL;
1836 is->audio_stream = -1;
1837 break;
1838 case CODEC_TYPE_VIDEO:
1839 is->video_st = NULL;
1840 is->video_stream = -1;
1841 break;
1842 case CODEC_TYPE_SUBTITLE:
1843 is->subtitle_st = NULL;
1844 is->subtitle_stream = -1;
1845 break;
1846 default:
1847 break;
1851 static void dump_stream_info(const AVFormatContext *s)
1853 if (s->track != 0)
1854 fprintf(stderr, "Track: %d\n", s->track);
1855 if (s->title[0] != '\0')
1856 fprintf(stderr, "Title: %s\n", s->title);
1857 if (s->author[0] != '\0')
1858 fprintf(stderr, "Author: %s\n", s->author);
1859 if (s->copyright[0] != '\0')
1860 fprintf(stderr, "Copyright: %s\n", s->copyright);
1861 if (s->comment[0] != '\0')
1862 fprintf(stderr, "Comment: %s\n", s->comment);
1863 if (s->album[0] != '\0')
1864 fprintf(stderr, "Album: %s\n", s->album);
1865 if (s->year != 0)
1866 fprintf(stderr, "Year: %d\n", s->year);
1867 if (s->genre[0] != '\0')
1868 fprintf(stderr, "Genre: %s\n", s->genre);
1871 /* since we have only one decoding thread, we can use a global
1872 variable instead of a thread local variable */
1873 static VideoState *global_video_state;
1875 static int decode_interrupt_cb(void)
1877 return (global_video_state && global_video_state->abort_request);
1880 /* this thread gets the stream from the disk or the network */
1881 static int decode_thread(void *arg)
1883 VideoState *is = arg;
1884 AVFormatContext *ic;
1885 int err, i, ret, video_index, audio_index;
1886 AVPacket pkt1, *pkt = &pkt1;
1887 AVFormatParameters params, *ap = &params;
1889 video_index = -1;
1890 audio_index = -1;
1891 is->video_stream = -1;
1892 is->audio_stream = -1;
1893 is->subtitle_stream = -1;
1895 global_video_state = is;
1896 url_set_interrupt_cb(decode_interrupt_cb);
1898 memset(ap, 0, sizeof(*ap));
1900 ap->width = frame_width;
1901 ap->height= frame_height;
1902 ap->time_base= (AVRational){1, 25};
1903 ap->pix_fmt = frame_pix_fmt;
1905 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1906 if (err < 0) {
1907 print_error(is->filename, err);
1908 ret = -1;
1909 goto fail;
1911 is->ic = ic;
1913 if(genpts)
1914 ic->flags |= AVFMT_FLAG_GENPTS;
1916 err = av_find_stream_info(ic);
1917 if (err < 0) {
1918 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1919 ret = -1;
1920 goto fail;
1922 if(ic->pb)
1923 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1925 /* if seeking requested, we execute it */
1926 if (start_time != AV_NOPTS_VALUE) {
1927 int64_t timestamp;
1929 timestamp = start_time;
1930 /* add the stream start time */
1931 if (ic->start_time != AV_NOPTS_VALUE)
1932 timestamp += ic->start_time;
1933 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1934 if (ret < 0) {
1935 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1936 is->filename, (double)timestamp / AV_TIME_BASE);
1940 for(i = 0; i < ic->nb_streams; i++) {
1941 AVCodecContext *enc = ic->streams[i]->codec;
1942 switch(enc->codec_type) {
1943 case CODEC_TYPE_AUDIO:
1944 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1945 audio_index = i;
1946 break;
1947 case CODEC_TYPE_VIDEO:
1948 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1949 video_index = i;
1950 break;
1951 default:
1952 break;
1955 if (show_status) {
1956 dump_format(ic, 0, is->filename, 0);
1957 dump_stream_info(ic);
1960 /* open the streams */
1961 if (audio_index >= 0) {
1962 stream_component_open(is, audio_index);
1965 if (video_index >= 0) {
1966 stream_component_open(is, video_index);
1967 } else {
1968 if (!display_disable)
1969 is->show_audio = 1;
1972 if (is->video_stream < 0 && is->audio_stream < 0) {
1973 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1974 ret = -1;
1975 goto fail;
1978 for(;;) {
1979 if (is->abort_request)
1980 break;
1981 if (is->paused != is->last_paused) {
1982 is->last_paused = is->paused;
1983 if (is->paused)
1984 av_read_pause(ic);
1985 else
1986 av_read_play(ic);
1988 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
1989 if (is->paused &&
1990 (!strcmp(ic->iformat->name, "rtsp") ||
1991 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
1992 /* wait 10 ms to avoid trying to get another packet */
1993 /* XXX: horrible */
1994 SDL_Delay(10);
1995 continue;
1997 #endif
1998 if (is->seek_req) {
1999 int stream_index= -1;
2000 int64_t seek_target= is->seek_pos;
2002 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2003 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2004 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2006 if(stream_index>=0){
2007 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2010 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2011 if (ret < 0) {
2012 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2013 }else{
2014 if (is->audio_stream >= 0) {
2015 packet_queue_flush(&is->audioq);
2016 packet_queue_put(&is->audioq, &flush_pkt);
2018 if (is->subtitle_stream >= 0) {
2019 packet_queue_flush(&is->subtitleq);
2020 packet_queue_put(&is->subtitleq, &flush_pkt);
2022 if (is->video_stream >= 0) {
2023 packet_queue_flush(&is->videoq);
2024 packet_queue_put(&is->videoq, &flush_pkt);
2027 is->seek_req = 0;
2030 /* if the queue are full, no need to read more */
2031 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2032 is->videoq.size > MAX_VIDEOQ_SIZE ||
2033 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2034 url_feof(ic->pb)) {
2035 /* wait 10 ms */
2036 SDL_Delay(10);
2037 continue;
2039 ret = av_read_frame(ic, pkt);
2040 if (ret < 0) {
2041 if (url_ferror(ic->pb) == 0) {
2042 SDL_Delay(100); /* wait for user event */
2043 continue;
2044 } else
2045 break;
2047 if (pkt->stream_index == is->audio_stream) {
2048 packet_queue_put(&is->audioq, pkt);
2049 } else if (pkt->stream_index == is->video_stream) {
2050 packet_queue_put(&is->videoq, pkt);
2051 } else if (pkt->stream_index == is->subtitle_stream) {
2052 packet_queue_put(&is->subtitleq, pkt);
2053 } else {
2054 av_free_packet(pkt);
2057 /* wait until the end */
2058 while (!is->abort_request) {
2059 SDL_Delay(100);
2062 ret = 0;
2063 fail:
2064 /* disable interrupting */
2065 global_video_state = NULL;
2067 /* close each stream */
2068 if (is->audio_stream >= 0)
2069 stream_component_close(is, is->audio_stream);
2070 if (is->video_stream >= 0)
2071 stream_component_close(is, is->video_stream);
2072 if (is->subtitle_stream >= 0)
2073 stream_component_close(is, is->subtitle_stream);
2074 if (is->ic) {
2075 av_close_input_file(is->ic);
2076 is->ic = NULL; /* safety */
2078 url_set_interrupt_cb(NULL);
2080 if (ret != 0) {
2081 SDL_Event event;
2083 event.type = FF_QUIT_EVENT;
2084 event.user.data1 = is;
2085 SDL_PushEvent(&event);
2087 return 0;
2090 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2092 VideoState *is;
2094 is = av_mallocz(sizeof(VideoState));
2095 if (!is)
2096 return NULL;
2097 av_strlcpy(is->filename, filename, sizeof(is->filename));
2098 is->iformat = iformat;
2099 is->ytop = 0;
2100 is->xleft = 0;
2102 /* start video display */
2103 is->pictq_mutex = SDL_CreateMutex();
2104 is->pictq_cond = SDL_CreateCond();
2106 is->subpq_mutex = SDL_CreateMutex();
2107 is->subpq_cond = SDL_CreateCond();
2109 /* add the refresh timer to draw the picture */
2110 schedule_refresh(is, 40);
2112 is->av_sync_type = av_sync_type;
2113 is->parse_tid = SDL_CreateThread(decode_thread, is);
2114 if (!is->parse_tid) {
2115 av_free(is);
2116 return NULL;
2118 return is;
2121 static void stream_close(VideoState *is)
2123 VideoPicture *vp;
2124 int i;
2125 /* XXX: use a special url_shutdown call to abort parse cleanly */
2126 is->abort_request = 1;
2127 SDL_WaitThread(is->parse_tid, NULL);
2129 /* free all pictures */
2130 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2131 vp = &is->pictq[i];
2132 if (vp->bmp) {
2133 SDL_FreeYUVOverlay(vp->bmp);
2134 vp->bmp = NULL;
2137 SDL_DestroyMutex(is->pictq_mutex);
2138 SDL_DestroyCond(is->pictq_cond);
2139 SDL_DestroyMutex(is->subpq_mutex);
2140 SDL_DestroyCond(is->subpq_cond);
2143 static void stream_cycle_channel(VideoState *is, int codec_type)
2145 AVFormatContext *ic = is->ic;
2146 int start_index, stream_index;
2147 AVStream *st;
2149 if (codec_type == CODEC_TYPE_VIDEO)
2150 start_index = is->video_stream;
2151 else if (codec_type == CODEC_TYPE_AUDIO)
2152 start_index = is->audio_stream;
2153 else
2154 start_index = is->subtitle_stream;
2155 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2156 return;
2157 stream_index = start_index;
2158 for(;;) {
2159 if (++stream_index >= is->ic->nb_streams)
2161 if (codec_type == CODEC_TYPE_SUBTITLE)
2163 stream_index = -1;
2164 goto the_end;
2165 } else
2166 stream_index = 0;
2168 if (stream_index == start_index)
2169 return;
2170 st = ic->streams[stream_index];
2171 if (st->codec->codec_type == codec_type) {
2172 /* check that parameters are OK */
2173 switch(codec_type) {
2174 case CODEC_TYPE_AUDIO:
2175 if (st->codec->sample_rate != 0 &&
2176 st->codec->channels != 0)
2177 goto the_end;
2178 break;
2179 case CODEC_TYPE_VIDEO:
2180 case CODEC_TYPE_SUBTITLE:
2181 goto the_end;
2182 default:
2183 break;
2187 the_end:
2188 stream_component_close(is, start_index);
2189 stream_component_open(is, stream_index);
2193 static void toggle_full_screen(void)
2195 is_full_screen = !is_full_screen;
2196 if (!fs_screen_width) {
2197 /* use default SDL method */
2198 // SDL_WM_ToggleFullScreen(screen);
2200 video_open(cur_stream);
2203 static void toggle_pause(void)
2205 if (cur_stream)
2206 stream_pause(cur_stream);
2207 step = 0;
2210 static void step_to_next_frame(void)
2212 if (cur_stream) {
2213 /* if the stream is paused unpause it, then step */
2214 if (cur_stream->paused)
2215 stream_pause(cur_stream);
2217 step = 1;
2220 static void do_exit(void)
2222 if (cur_stream) {
2223 stream_close(cur_stream);
2224 cur_stream = NULL;
2226 if (show_status)
2227 printf("\n");
2228 SDL_Quit();
2229 exit(0);
2232 static void toggle_audio_display(void)
2234 if (cur_stream) {
2235 cur_stream->show_audio = !cur_stream->show_audio;
2239 /* handle an event sent by the GUI */
2240 static void event_loop(void)
2242 SDL_Event event;
2243 double incr, pos, frac;
2245 for(;;) {
2246 SDL_WaitEvent(&event);
2247 switch(event.type) {
2248 case SDL_KEYDOWN:
2249 switch(event.key.keysym.sym) {
2250 case SDLK_ESCAPE:
2251 case SDLK_q:
2252 do_exit();
2253 break;
2254 case SDLK_f:
2255 toggle_full_screen();
2256 break;
2257 case SDLK_p:
2258 case SDLK_SPACE:
2259 toggle_pause();
2260 break;
2261 case SDLK_s: //S: Step to next frame
2262 step_to_next_frame();
2263 break;
2264 case SDLK_a:
2265 if (cur_stream)
2266 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2267 break;
2268 case SDLK_v:
2269 if (cur_stream)
2270 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2271 break;
2272 case SDLK_t:
2273 if (cur_stream)
2274 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2275 break;
2276 case SDLK_w:
2277 toggle_audio_display();
2278 break;
2279 case SDLK_LEFT:
2280 incr = -10.0;
2281 goto do_seek;
2282 case SDLK_RIGHT:
2283 incr = 10.0;
2284 goto do_seek;
2285 case SDLK_UP:
2286 incr = 60.0;
2287 goto do_seek;
2288 case SDLK_DOWN:
2289 incr = -60.0;
2290 do_seek:
2291 if (cur_stream) {
2292 if (seek_by_bytes) {
2293 pos = url_ftell(cur_stream->ic->pb);
2294 if (cur_stream->ic->bit_rate)
2295 incr *= cur_stream->ic->bit_rate / 60.0;
2296 else
2297 incr *= 180000.0;
2298 pos += incr;
2299 stream_seek(cur_stream, pos, incr);
2300 } else {
2301 pos = get_master_clock(cur_stream);
2302 pos += incr;
2303 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2306 break;
2307 default:
2308 break;
2310 break;
2311 case SDL_MOUSEBUTTONDOWN:
2312 if (cur_stream) {
2313 int ns, hh, mm, ss;
2314 int tns, thh, tmm, tss;
2315 tns = cur_stream->ic->duration/1000000LL;
2316 thh = tns/3600;
2317 tmm = (tns%3600)/60;
2318 tss = (tns%60);
2319 frac = (double)event.button.x/(double)cur_stream->width;
2320 ns = frac*tns;
2321 hh = ns/3600;
2322 mm = (ns%3600)/60;
2323 ss = (ns%60);
2324 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2325 hh, mm, ss, thh, tmm, tss);
2326 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2328 break;
2329 case SDL_VIDEORESIZE:
2330 if (cur_stream) {
2331 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2332 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2333 screen_width = cur_stream->width = event.resize.w;
2334 screen_height= cur_stream->height= event.resize.h;
2336 break;
2337 case SDL_QUIT:
2338 case FF_QUIT_EVENT:
2339 do_exit();
2340 break;
2341 case FF_ALLOC_EVENT:
2342 video_open(event.user.data1);
2343 alloc_picture(event.user.data1);
2344 break;
2345 case FF_REFRESH_EVENT:
2346 video_refresh_timer(event.user.data1);
2347 break;
2348 default:
2349 break;
2354 static void opt_frame_size(const char *arg)
2356 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2357 fprintf(stderr, "Incorrect frame size\n");
2358 exit(1);
2360 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2361 fprintf(stderr, "Frame size must be a multiple of 2\n");
2362 exit(1);
2366 static int opt_width(const char *opt, const char *arg)
2368 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2369 return 0;
2372 static int opt_height(const char *opt, const char *arg)
2374 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2375 return 0;
2378 static void opt_format(const char *arg)
2380 file_iformat = av_find_input_format(arg);
2381 if (!file_iformat) {
2382 fprintf(stderr, "Unknown input format: %s\n", arg);
2383 exit(1);
2387 static void opt_frame_pix_fmt(const char *arg)
2389 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2392 static int opt_sync(const char *opt, const char *arg)
2394 if (!strcmp(arg, "audio"))
2395 av_sync_type = AV_SYNC_AUDIO_MASTER;
2396 else if (!strcmp(arg, "video"))
2397 av_sync_type = AV_SYNC_VIDEO_MASTER;
2398 else if (!strcmp(arg, "ext"))
2399 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2400 else {
2401 fprintf(stderr, "Unknown value for %s: %s\n", opt, arg);
2402 exit(1);
2404 return 0;
2407 static int opt_seek(const char *opt, const char *arg)
2409 start_time = parse_time_or_die(opt, arg, 1);
2410 return 0;
2413 static int opt_debug(const char *opt, const char *arg)
2415 av_log_set_level(99);
2416 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2417 return 0;
2420 static int opt_vismv(const char *opt, const char *arg)
2422 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2423 return 0;
2426 static int opt_thread_count(const char *opt, const char *arg)
2428 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2429 #if !defined(HAVE_THREADS)
2430 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2431 #endif
2432 return 0;
2435 static const OptionDef options[] = {
2436 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2437 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2438 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2439 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2440 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2441 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2442 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2443 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2444 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2445 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2446 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2447 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2448 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2449 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2450 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2451 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2452 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2453 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2454 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2455 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2456 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2457 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2458 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2459 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2460 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2461 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2462 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2463 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2464 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2465 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2466 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2467 { "sync", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2468 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2469 { NULL, },
2472 static void show_help(void)
2474 printf("usage: ffplay [options] input_file\n"
2475 "Simple media player\n");
2476 printf("\n");
2477 show_help_options(options, "Main options:\n",
2478 OPT_EXPERT, 0);
2479 show_help_options(options, "\nAdvanced options:\n",
2480 OPT_EXPERT, OPT_EXPERT);
2481 printf("\nWhile playing:\n"
2482 "q, ESC quit\n"
2483 "f toggle full screen\n"
2484 "p, SPC pause\n"
2485 "a cycle audio channel\n"
2486 "v cycle video channel\n"
2487 "t cycle subtitle channel\n"
2488 "w show audio waves\n"
2489 "left/right seek backward/forward 10 seconds\n"
2490 "down/up seek backward/forward 1 minute\n"
2491 "mouse click seek to percentage in file corresponding to fraction of width\n"
2495 static void opt_input_file(const char *filename)
2497 if (!strcmp(filename, "-"))
2498 filename = "pipe:";
2499 input_filename = filename;
2502 /* Called from the main */
2503 int main(int argc, char **argv)
2505 int flags;
2507 /* register all codecs, demux and protocols */
2508 avcodec_register_all();
2509 avdevice_register_all();
2510 av_register_all();
2512 show_banner();
2514 parse_options(argc, argv, options, opt_input_file);
2516 if (!input_filename) {
2517 show_help();
2518 exit(1);
2521 if (display_disable) {
2522 video_disable = 1;
2524 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2525 #if !defined(__MINGW32__) && !defined(__APPLE__)
2526 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2527 #endif
2528 if (SDL_Init (flags)) {
2529 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2530 exit(1);
2533 if (!display_disable) {
2534 #ifdef HAVE_SDL_VIDEO_SIZE
2535 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2536 fs_screen_width = vi->current_w;
2537 fs_screen_height = vi->current_h;
2538 #endif
2541 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2542 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2543 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2544 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2546 av_init_packet(&flush_pkt);
2547 flush_pkt.data= "FLUSH";
2549 cur_stream = stream_open(input_filename, file_iformat);
2551 event_loop();
2553 /* never returns */
2555 return 0;