Use (u)int16_t instead of (unsigned) short
[FFMpeg-mirror/DVCPRO-HD.git] / ffplay.c
blob62915d2e658fac455631e2a2ecbae8127120b86e
1 /*
2 * FFplay : Simple Media Player based on the ffmpeg libraries
3 * Copyright (c) 2003 Fabrice Bellard
5 * This file is part of FFmpeg.
7 * FFmpeg is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * FFmpeg is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with FFmpeg; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
22 #include <math.h>
23 #include <limits.h>
24 #include "libavutil/avstring.h"
25 #include "libavformat/avformat.h"
26 #include "libavformat/rtsp.h"
27 #include "libavdevice/avdevice.h"
28 #include "libswscale/swscale.h"
30 #include "version.h"
31 #include "cmdutils.h"
33 #include <SDL.h>
34 #include <SDL_thread.h>
36 #ifdef __MINGW32__
37 #undef main /* We don't want SDL to override our main() */
38 #endif
40 #undef exit
42 const char program_name[] = "FFplay";
43 static const int program_birth_year = 2003;
45 //#define DEBUG_SYNC
47 #define MAX_VIDEOQ_SIZE (5 * 256 * 1024)
48 #define MAX_AUDIOQ_SIZE (5 * 16 * 1024)
49 #define MAX_SUBTITLEQ_SIZE (5 * 16 * 1024)
51 /* SDL audio buffer size, in samples. Should be small to have precise
52 A/V sync as SDL does not have hardware buffer fullness info. */
53 #define SDL_AUDIO_BUFFER_SIZE 1024
55 /* no AV sync correction is done if below the AV sync threshold */
56 #define AV_SYNC_THRESHOLD 0.01
57 /* no AV correction is done if too big error */
58 #define AV_NOSYNC_THRESHOLD 10.0
60 /* maximum audio speed change to get correct sync */
61 #define SAMPLE_CORRECTION_PERCENT_MAX 10
63 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
64 #define AUDIO_DIFF_AVG_NB 20
66 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
67 #define SAMPLE_ARRAY_SIZE (2*65536)
69 static int sws_flags = SWS_BICUBIC;
71 typedef struct PacketQueue {
72 AVPacketList *first_pkt, *last_pkt;
73 int nb_packets;
74 int size;
75 int abort_request;
76 SDL_mutex *mutex;
77 SDL_cond *cond;
78 } PacketQueue;
80 #define VIDEO_PICTURE_QUEUE_SIZE 1
81 #define SUBPICTURE_QUEUE_SIZE 4
83 typedef struct VideoPicture {
84 double pts; ///<presentation time stamp for this picture
85 SDL_Overlay *bmp;
86 int width, height; /* source height & width */
87 int allocated;
88 } VideoPicture;
90 typedef struct SubPicture {
91 double pts; /* presentation time stamp for this picture */
92 AVSubtitle sub;
93 } SubPicture;
95 enum {
96 AV_SYNC_AUDIO_MASTER, /* default choice */
97 AV_SYNC_VIDEO_MASTER,
98 AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
101 typedef struct VideoState {
102 SDL_Thread *parse_tid;
103 SDL_Thread *video_tid;
104 AVInputFormat *iformat;
105 int no_background;
106 int abort_request;
107 int paused;
108 int last_paused;
109 int seek_req;
110 int seek_flags;
111 int64_t seek_pos;
112 AVFormatContext *ic;
113 int dtg_active_format;
115 int audio_stream;
117 int av_sync_type;
118 double external_clock; /* external clock base */
119 int64_t external_clock_time;
121 double audio_clock;
122 double audio_diff_cum; /* used for AV difference average computation */
123 double audio_diff_avg_coef;
124 double audio_diff_threshold;
125 int audio_diff_avg_count;
126 AVStream *audio_st;
127 PacketQueue audioq;
128 int audio_hw_buf_size;
129 /* samples output by the codec. we reserve more space for avsync
130 compensation */
131 DECLARE_ALIGNED(16,uint8_t,audio_buf[(AVCODEC_MAX_AUDIO_FRAME_SIZE * 3) / 2]);
132 unsigned int audio_buf_size; /* in bytes */
133 int audio_buf_index; /* in bytes */
134 AVPacket audio_pkt;
135 uint8_t *audio_pkt_data;
136 int audio_pkt_size;
138 int show_audio; /* if true, display audio samples */
139 int16_t sample_array[SAMPLE_ARRAY_SIZE];
140 int sample_array_index;
141 int last_i_start;
143 SDL_Thread *subtitle_tid;
144 int subtitle_stream;
145 int subtitle_stream_changed;
146 AVStream *subtitle_st;
147 PacketQueue subtitleq;
148 SubPicture subpq[SUBPICTURE_QUEUE_SIZE];
149 int subpq_size, subpq_rindex, subpq_windex;
150 SDL_mutex *subpq_mutex;
151 SDL_cond *subpq_cond;
153 double frame_timer;
154 double frame_last_pts;
155 double frame_last_delay;
156 double video_clock; ///<pts of last decoded frame / predicted pts of next decoded frame
157 int video_stream;
158 AVStream *video_st;
159 PacketQueue videoq;
160 double video_current_pts; ///<current displayed pts (different from video_clock if frame fifos are used)
161 int64_t video_current_pts_time; ///<time (av_gettime) at which we updated video_current_pts - used to have running video pts
162 VideoPicture pictq[VIDEO_PICTURE_QUEUE_SIZE];
163 int pictq_size, pictq_rindex, pictq_windex;
164 SDL_mutex *pictq_mutex;
165 SDL_cond *pictq_cond;
167 // QETimer *video_timer;
168 char filename[1024];
169 int width, height, xleft, ytop;
170 } VideoState;
172 static void show_help(void);
173 static int audio_write_get_buf_size(VideoState *is);
175 /* options specified by the user */
176 static AVInputFormat *file_iformat;
177 static const char *input_filename;
178 static int fs_screen_width;
179 static int fs_screen_height;
180 static int screen_width = 0;
181 static int screen_height = 0;
182 static int frame_width = 0;
183 static int frame_height = 0;
184 static enum PixelFormat frame_pix_fmt = PIX_FMT_NONE;
185 static int audio_disable;
186 static int video_disable;
187 static int wanted_audio_stream= 0;
188 static int wanted_video_stream= 0;
189 static int seek_by_bytes;
190 static int display_disable;
191 static int show_status;
192 static int av_sync_type = AV_SYNC_AUDIO_MASTER;
193 static int64_t start_time = AV_NOPTS_VALUE;
194 static int debug = 0;
195 static int debug_mv = 0;
196 static int step = 0;
197 static int thread_count = 1;
198 static int workaround_bugs = 1;
199 static int fast = 0;
200 static int genpts = 0;
201 static int lowres = 0;
202 static int idct = FF_IDCT_AUTO;
203 static enum AVDiscard skip_frame= AVDISCARD_DEFAULT;
204 static enum AVDiscard skip_idct= AVDISCARD_DEFAULT;
205 static enum AVDiscard skip_loop_filter= AVDISCARD_DEFAULT;
206 static int error_resilience = FF_ER_CAREFUL;
207 static int error_concealment = 3;
208 static int decoder_reorder_pts= 0;
210 /* current context */
211 static int is_full_screen;
212 static VideoState *cur_stream;
213 static int64_t audio_callback_time;
215 AVPacket flush_pkt;
217 #define FF_ALLOC_EVENT (SDL_USEREVENT)
218 #define FF_REFRESH_EVENT (SDL_USEREVENT + 1)
219 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
221 SDL_Surface *screen;
223 /* packet queue handling */
224 static void packet_queue_init(PacketQueue *q)
226 memset(q, 0, sizeof(PacketQueue));
227 q->mutex = SDL_CreateMutex();
228 q->cond = SDL_CreateCond();
231 static void packet_queue_flush(PacketQueue *q)
233 AVPacketList *pkt, *pkt1;
235 SDL_LockMutex(q->mutex);
236 for(pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
237 pkt1 = pkt->next;
238 av_free_packet(&pkt->pkt);
239 av_freep(&pkt);
241 q->last_pkt = NULL;
242 q->first_pkt = NULL;
243 q->nb_packets = 0;
244 q->size = 0;
245 SDL_UnlockMutex(q->mutex);
248 static void packet_queue_end(PacketQueue *q)
250 packet_queue_flush(q);
251 SDL_DestroyMutex(q->mutex);
252 SDL_DestroyCond(q->cond);
255 static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
257 AVPacketList *pkt1;
259 /* duplicate the packet */
260 if (pkt!=&flush_pkt && av_dup_packet(pkt) < 0)
261 return -1;
263 pkt1 = av_malloc(sizeof(AVPacketList));
264 if (!pkt1)
265 return -1;
266 pkt1->pkt = *pkt;
267 pkt1->next = NULL;
270 SDL_LockMutex(q->mutex);
272 if (!q->last_pkt)
274 q->first_pkt = pkt1;
275 else
276 q->last_pkt->next = pkt1;
277 q->last_pkt = pkt1;
278 q->nb_packets++;
279 q->size += pkt1->pkt.size;
280 /* XXX: should duplicate packet data in DV case */
281 SDL_CondSignal(q->cond);
283 SDL_UnlockMutex(q->mutex);
284 return 0;
287 static void packet_queue_abort(PacketQueue *q)
289 SDL_LockMutex(q->mutex);
291 q->abort_request = 1;
293 SDL_CondSignal(q->cond);
295 SDL_UnlockMutex(q->mutex);
298 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
299 static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block)
301 AVPacketList *pkt1;
302 int ret;
304 SDL_LockMutex(q->mutex);
306 for(;;) {
307 if (q->abort_request) {
308 ret = -1;
309 break;
312 pkt1 = q->first_pkt;
313 if (pkt1) {
314 q->first_pkt = pkt1->next;
315 if (!q->first_pkt)
316 q->last_pkt = NULL;
317 q->nb_packets--;
318 q->size -= pkt1->pkt.size;
319 *pkt = pkt1->pkt;
320 av_free(pkt1);
321 ret = 1;
322 break;
323 } else if (!block) {
324 ret = 0;
325 break;
326 } else {
327 SDL_CondWait(q->cond, q->mutex);
330 SDL_UnlockMutex(q->mutex);
331 return ret;
334 static inline void fill_rectangle(SDL_Surface *screen,
335 int x, int y, int w, int h, int color)
337 SDL_Rect rect;
338 rect.x = x;
339 rect.y = y;
340 rect.w = w;
341 rect.h = h;
342 SDL_FillRect(screen, &rect, color);
345 #if 0
346 /* draw only the border of a rectangle */
347 void fill_border(VideoState *s, int x, int y, int w, int h, int color)
349 int w1, w2, h1, h2;
351 /* fill the background */
352 w1 = x;
353 if (w1 < 0)
354 w1 = 0;
355 w2 = s->width - (x + w);
356 if (w2 < 0)
357 w2 = 0;
358 h1 = y;
359 if (h1 < 0)
360 h1 = 0;
361 h2 = s->height - (y + h);
362 if (h2 < 0)
363 h2 = 0;
364 fill_rectangle(screen,
365 s->xleft, s->ytop,
366 w1, s->height,
367 color);
368 fill_rectangle(screen,
369 s->xleft + s->width - w2, s->ytop,
370 w2, s->height,
371 color);
372 fill_rectangle(screen,
373 s->xleft + w1, s->ytop,
374 s->width - w1 - w2, h1,
375 color);
376 fill_rectangle(screen,
377 s->xleft + w1, s->ytop + s->height - h2,
378 s->width - w1 - w2, h2,
379 color);
381 #endif
385 #define SCALEBITS 10
386 #define ONE_HALF (1 << (SCALEBITS - 1))
387 #define FIX(x) ((int) ((x) * (1<<SCALEBITS) + 0.5))
389 #define RGB_TO_Y_CCIR(r, g, b) \
390 ((FIX(0.29900*219.0/255.0) * (r) + FIX(0.58700*219.0/255.0) * (g) + \
391 FIX(0.11400*219.0/255.0) * (b) + (ONE_HALF + (16 << SCALEBITS))) >> SCALEBITS)
393 #define RGB_TO_U_CCIR(r1, g1, b1, shift)\
394 (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \
395 FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
397 #define RGB_TO_V_CCIR(r1, g1, b1, shift)\
398 (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \
399 FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128)
401 #define ALPHA_BLEND(a, oldp, newp, s)\
402 ((((oldp << s) * (255 - (a))) + (newp * (a))) / (255 << s))
404 #define RGBA_IN(r, g, b, a, s)\
406 unsigned int v = ((const uint32_t *)(s))[0];\
407 a = (v >> 24) & 0xff;\
408 r = (v >> 16) & 0xff;\
409 g = (v >> 8) & 0xff;\
410 b = v & 0xff;\
413 #define YUVA_IN(y, u, v, a, s, pal)\
415 unsigned int val = ((const uint32_t *)(pal))[*(const uint8_t*)(s)];\
416 a = (val >> 24) & 0xff;\
417 y = (val >> 16) & 0xff;\
418 u = (val >> 8) & 0xff;\
419 v = val & 0xff;\
422 #define YUVA_OUT(d, y, u, v, a)\
424 ((uint32_t *)(d))[0] = (a << 24) | (y << 16) | (u << 8) | v;\
428 #define BPP 1
430 static void blend_subrect(AVPicture *dst, const AVSubtitleRect *rect, int imgw, int imgh)
432 int wrap, wrap3, width2, skip2;
433 int y, u, v, a, u1, v1, a1, w, h;
434 uint8_t *lum, *cb, *cr;
435 const uint8_t *p;
436 const uint32_t *pal;
437 int dstx, dsty, dstw, dsth;
439 dstx = FFMIN(FFMAX(rect->x, 0), imgw);
440 dstw = FFMIN(FFMAX(rect->w, 0), imgw - dstx);
441 dsty = FFMIN(FFMAX(rect->y, 0), imgh);
442 dsth = FFMIN(FFMAX(rect->h, 0), imgh - dsty);
443 lum = dst->data[0] + dsty * dst->linesize[0];
444 cb = dst->data[1] + (dsty >> 1) * dst->linesize[1];
445 cr = dst->data[2] + (dsty >> 1) * dst->linesize[2];
447 width2 = (dstw + 1) >> 1;
448 skip2 = dstx >> 1;
449 wrap = dst->linesize[0];
450 wrap3 = rect->linesize;
451 p = rect->bitmap;
452 pal = rect->rgba_palette; /* Now in YCrCb! */
454 if (dsty & 1) {
455 lum += dstx;
456 cb += skip2;
457 cr += skip2;
459 if (dstx & 1) {
460 YUVA_IN(y, u, v, a, p, pal);
461 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
462 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
463 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
464 cb++;
465 cr++;
466 lum++;
467 p += BPP;
469 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
470 YUVA_IN(y, u, v, a, p, pal);
471 u1 = u;
472 v1 = v;
473 a1 = a;
474 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
476 YUVA_IN(y, u, v, a, p + BPP, pal);
477 u1 += u;
478 v1 += v;
479 a1 += a;
480 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
481 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
482 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
483 cb++;
484 cr++;
485 p += 2 * BPP;
486 lum += 2;
488 if (w) {
489 YUVA_IN(y, u, v, a, p, pal);
490 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
491 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
492 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
494 p += wrap3 + (wrap3 - dstw * BPP);
495 lum += wrap + (wrap - dstw - dstx);
496 cb += dst->linesize[1] - width2 - skip2;
497 cr += dst->linesize[2] - width2 - skip2;
499 for(h = dsth - (dsty & 1); h >= 2; h -= 2) {
500 lum += dstx;
501 cb += skip2;
502 cr += skip2;
504 if (dstx & 1) {
505 YUVA_IN(y, u, v, a, p, pal);
506 u1 = u;
507 v1 = v;
508 a1 = a;
509 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
510 p += wrap3;
511 lum += wrap;
512 YUVA_IN(y, u, v, a, p, pal);
513 u1 += u;
514 v1 += v;
515 a1 += a;
516 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
517 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
518 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
519 cb++;
520 cr++;
521 p += -wrap3 + BPP;
522 lum += -wrap + 1;
524 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
525 YUVA_IN(y, u, v, a, p, pal);
526 u1 = u;
527 v1 = v;
528 a1 = a;
529 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
531 YUVA_IN(y, u, v, a, p, pal);
532 u1 += u;
533 v1 += v;
534 a1 += a;
535 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
536 p += wrap3;
537 lum += wrap;
539 YUVA_IN(y, u, v, a, p, pal);
540 u1 += u;
541 v1 += v;
542 a1 += a;
543 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
545 YUVA_IN(y, u, v, a, p, pal);
546 u1 += u;
547 v1 += v;
548 a1 += a;
549 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
551 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 2);
552 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 2);
554 cb++;
555 cr++;
556 p += -wrap3 + 2 * BPP;
557 lum += -wrap + 2;
559 if (w) {
560 YUVA_IN(y, u, v, a, p, pal);
561 u1 = u;
562 v1 = v;
563 a1 = a;
564 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
565 p += wrap3;
566 lum += wrap;
567 YUVA_IN(y, u, v, a, p, pal);
568 u1 += u;
569 v1 += v;
570 a1 += a;
571 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
572 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u1, 1);
573 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v1, 1);
574 cb++;
575 cr++;
576 p += -wrap3 + BPP;
577 lum += -wrap + 1;
579 p += wrap3 + (wrap3 - dstw * BPP);
580 lum += wrap + (wrap - dstw - dstx);
581 cb += dst->linesize[1] - width2 - skip2;
582 cr += dst->linesize[2] - width2 - skip2;
584 /* handle odd height */
585 if (h) {
586 lum += dstx;
587 cb += skip2;
588 cr += skip2;
590 if (dstx & 1) {
591 YUVA_IN(y, u, v, a, p, pal);
592 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
593 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
594 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
595 cb++;
596 cr++;
597 lum++;
598 p += BPP;
600 for(w = dstw - (dstx & 1); w >= 2; w -= 2) {
601 YUVA_IN(y, u, v, a, p, pal);
602 u1 = u;
603 v1 = v;
604 a1 = a;
605 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
607 YUVA_IN(y, u, v, a, p + BPP, pal);
608 u1 += u;
609 v1 += v;
610 a1 += a;
611 lum[1] = ALPHA_BLEND(a, lum[1], y, 0);
612 cb[0] = ALPHA_BLEND(a1 >> 2, cb[0], u, 1);
613 cr[0] = ALPHA_BLEND(a1 >> 2, cr[0], v, 1);
614 cb++;
615 cr++;
616 p += 2 * BPP;
617 lum += 2;
619 if (w) {
620 YUVA_IN(y, u, v, a, p, pal);
621 lum[0] = ALPHA_BLEND(a, lum[0], y, 0);
622 cb[0] = ALPHA_BLEND(a >> 2, cb[0], u, 0);
623 cr[0] = ALPHA_BLEND(a >> 2, cr[0], v, 0);
628 static void free_subpicture(SubPicture *sp)
630 int i;
632 for (i = 0; i < sp->sub.num_rects; i++)
634 av_free(sp->sub.rects[i].bitmap);
635 av_free(sp->sub.rects[i].rgba_palette);
638 av_free(sp->sub.rects);
640 memset(&sp->sub, 0, sizeof(AVSubtitle));
643 static void video_image_display(VideoState *is)
645 VideoPicture *vp;
646 SubPicture *sp;
647 AVPicture pict;
648 float aspect_ratio;
649 int width, height, x, y;
650 SDL_Rect rect;
651 int i;
653 vp = &is->pictq[is->pictq_rindex];
654 if (vp->bmp) {
655 /* XXX: use variable in the frame */
656 if (is->video_st->codec->sample_aspect_ratio.num == 0)
657 aspect_ratio = 0;
658 else
659 aspect_ratio = av_q2d(is->video_st->codec->sample_aspect_ratio)
660 * is->video_st->codec->width / is->video_st->codec->height;
661 if (aspect_ratio <= 0.0)
662 aspect_ratio = (float)is->video_st->codec->width /
663 (float)is->video_st->codec->height;
664 /* if an active format is indicated, then it overrides the
665 mpeg format */
666 #if 0
667 if (is->video_st->codec->dtg_active_format != is->dtg_active_format) {
668 is->dtg_active_format = is->video_st->codec->dtg_active_format;
669 printf("dtg_active_format=%d\n", is->dtg_active_format);
671 #endif
672 #if 0
673 switch(is->video_st->codec->dtg_active_format) {
674 case FF_DTG_AFD_SAME:
675 default:
676 /* nothing to do */
677 break;
678 case FF_DTG_AFD_4_3:
679 aspect_ratio = 4.0 / 3.0;
680 break;
681 case FF_DTG_AFD_16_9:
682 aspect_ratio = 16.0 / 9.0;
683 break;
684 case FF_DTG_AFD_14_9:
685 aspect_ratio = 14.0 / 9.0;
686 break;
687 case FF_DTG_AFD_4_3_SP_14_9:
688 aspect_ratio = 14.0 / 9.0;
689 break;
690 case FF_DTG_AFD_16_9_SP_14_9:
691 aspect_ratio = 14.0 / 9.0;
692 break;
693 case FF_DTG_AFD_SP_4_3:
694 aspect_ratio = 4.0 / 3.0;
695 break;
697 #endif
699 if (is->subtitle_st)
701 if (is->subpq_size > 0)
703 sp = &is->subpq[is->subpq_rindex];
705 if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000))
707 SDL_LockYUVOverlay (vp->bmp);
709 pict.data[0] = vp->bmp->pixels[0];
710 pict.data[1] = vp->bmp->pixels[2];
711 pict.data[2] = vp->bmp->pixels[1];
713 pict.linesize[0] = vp->bmp->pitches[0];
714 pict.linesize[1] = vp->bmp->pitches[2];
715 pict.linesize[2] = vp->bmp->pitches[1];
717 for (i = 0; i < sp->sub.num_rects; i++)
718 blend_subrect(&pict, &sp->sub.rects[i],
719 vp->bmp->w, vp->bmp->h);
721 SDL_UnlockYUVOverlay (vp->bmp);
727 /* XXX: we suppose the screen has a 1.0 pixel ratio */
728 height = is->height;
729 width = ((int)rint(height * aspect_ratio)) & -3;
730 if (width > is->width) {
731 width = is->width;
732 height = ((int)rint(width / aspect_ratio)) & -3;
734 x = (is->width - width) / 2;
735 y = (is->height - height) / 2;
736 if (!is->no_background) {
737 /* fill the background */
738 // fill_border(is, x, y, width, height, QERGB(0x00, 0x00, 0x00));
739 } else {
740 is->no_background = 0;
742 rect.x = is->xleft + x;
743 rect.y = is->ytop + y;
744 rect.w = width;
745 rect.h = height;
746 SDL_DisplayYUVOverlay(vp->bmp, &rect);
747 } else {
748 #if 0
749 fill_rectangle(screen,
750 is->xleft, is->ytop, is->width, is->height,
751 QERGB(0x00, 0x00, 0x00));
752 #endif
756 static inline int compute_mod(int a, int b)
758 a = a % b;
759 if (a >= 0)
760 return a;
761 else
762 return a + b;
765 static void video_audio_display(VideoState *s)
767 int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
768 int ch, channels, h, h2, bgcolor, fgcolor;
769 int16_t time_diff;
771 /* compute display index : center on currently output samples */
772 channels = s->audio_st->codec->channels;
773 nb_display_channels = channels;
774 if (!s->paused) {
775 n = 2 * channels;
776 delay = audio_write_get_buf_size(s);
777 delay /= n;
779 /* to be more precise, we take into account the time spent since
780 the last buffer computation */
781 if (audio_callback_time) {
782 time_diff = av_gettime() - audio_callback_time;
783 delay += (time_diff * s->audio_st->codec->sample_rate) / 1000000;
786 delay -= s->width / 2;
787 if (delay < s->width)
788 delay = s->width;
790 i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
792 h= INT_MIN;
793 for(i=0; i<1000; i+=channels){
794 int idx= (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
795 int a= s->sample_array[idx];
796 int b= s->sample_array[(idx + 4*channels)%SAMPLE_ARRAY_SIZE];
797 int c= s->sample_array[(idx + 5*channels)%SAMPLE_ARRAY_SIZE];
798 int d= s->sample_array[(idx + 9*channels)%SAMPLE_ARRAY_SIZE];
799 int score= a-d;
800 if(h<score && (b^c)<0){
801 h= score;
802 i_start= idx;
806 s->last_i_start = i_start;
807 } else {
808 i_start = s->last_i_start;
811 bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
812 fill_rectangle(screen,
813 s->xleft, s->ytop, s->width, s->height,
814 bgcolor);
816 fgcolor = SDL_MapRGB(screen->format, 0xff, 0xff, 0xff);
818 /* total height for one channel */
819 h = s->height / nb_display_channels;
820 /* graph height / 2 */
821 h2 = (h * 9) / 20;
822 for(ch = 0;ch < nb_display_channels; ch++) {
823 i = i_start + ch;
824 y1 = s->ytop + ch * h + (h / 2); /* position of center line */
825 for(x = 0; x < s->width; x++) {
826 y = (s->sample_array[i] * h2) >> 15;
827 if (y < 0) {
828 y = -y;
829 ys = y1 - y;
830 } else {
831 ys = y1;
833 fill_rectangle(screen,
834 s->xleft + x, ys, 1, y,
835 fgcolor);
836 i += channels;
837 if (i >= SAMPLE_ARRAY_SIZE)
838 i -= SAMPLE_ARRAY_SIZE;
842 fgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0xff);
844 for(ch = 1;ch < nb_display_channels; ch++) {
845 y = s->ytop + ch * h;
846 fill_rectangle(screen,
847 s->xleft, y, s->width, 1,
848 fgcolor);
850 SDL_UpdateRect(screen, s->xleft, s->ytop, s->width, s->height);
853 static int video_open(VideoState *is){
854 int flags = SDL_HWSURFACE|SDL_ASYNCBLIT|SDL_HWACCEL;
855 int w,h;
857 if(is_full_screen) flags |= SDL_FULLSCREEN;
858 else flags |= SDL_RESIZABLE;
860 if (is_full_screen && fs_screen_width) {
861 w = fs_screen_width;
862 h = fs_screen_height;
863 } else if(!is_full_screen && screen_width){
864 w = screen_width;
865 h = screen_height;
866 }else if (is->video_st && is->video_st->codec->width){
867 w = is->video_st->codec->width;
868 h = is->video_st->codec->height;
869 } else {
870 w = 640;
871 h = 480;
873 #ifndef __APPLE__
874 screen = SDL_SetVideoMode(w, h, 0, flags);
875 #else
876 /* setting bits_per_pixel = 0 or 32 causes blank video on OS X */
877 screen = SDL_SetVideoMode(w, h, 24, flags);
878 #endif
879 if (!screen) {
880 fprintf(stderr, "SDL: could not set video mode - exiting\n");
881 return -1;
883 SDL_WM_SetCaption("FFplay", "FFplay");
885 is->width = screen->w;
886 is->height = screen->h;
888 return 0;
891 /* display the current picture, if any */
892 static void video_display(VideoState *is)
894 if(!screen)
895 video_open(cur_stream);
896 if (is->audio_st && is->show_audio)
897 video_audio_display(is);
898 else if (is->video_st)
899 video_image_display(is);
902 static Uint32 sdl_refresh_timer_cb(Uint32 interval, void *opaque)
904 SDL_Event event;
905 event.type = FF_REFRESH_EVENT;
906 event.user.data1 = opaque;
907 SDL_PushEvent(&event);
908 return 0; /* 0 means stop timer */
911 /* schedule a video refresh in 'delay' ms */
912 static void schedule_refresh(VideoState *is, int delay)
914 SDL_AddTimer(delay, sdl_refresh_timer_cb, is);
917 /* get the current audio clock value */
918 static double get_audio_clock(VideoState *is)
920 double pts;
921 int hw_buf_size, bytes_per_sec;
922 pts = is->audio_clock;
923 hw_buf_size = audio_write_get_buf_size(is);
924 bytes_per_sec = 0;
925 if (is->audio_st) {
926 bytes_per_sec = is->audio_st->codec->sample_rate *
927 2 * is->audio_st->codec->channels;
929 if (bytes_per_sec)
930 pts -= (double)hw_buf_size / bytes_per_sec;
931 return pts;
934 /* get the current video clock value */
935 static double get_video_clock(VideoState *is)
937 double delta;
938 if (is->paused) {
939 delta = 0;
940 } else {
941 delta = (av_gettime() - is->video_current_pts_time) / 1000000.0;
943 return is->video_current_pts + delta;
946 /* get the current external clock value */
947 static double get_external_clock(VideoState *is)
949 int64_t ti;
950 ti = av_gettime();
951 return is->external_clock + ((ti - is->external_clock_time) * 1e-6);
954 /* get the current master clock value */
955 static double get_master_clock(VideoState *is)
957 double val;
959 if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
960 if (is->video_st)
961 val = get_video_clock(is);
962 else
963 val = get_audio_clock(is);
964 } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
965 if (is->audio_st)
966 val = get_audio_clock(is);
967 else
968 val = get_video_clock(is);
969 } else {
970 val = get_external_clock(is);
972 return val;
975 /* seek in the stream */
976 static void stream_seek(VideoState *is, int64_t pos, int rel)
978 if (!is->seek_req) {
979 is->seek_pos = pos;
980 is->seek_flags = rel < 0 ? AVSEEK_FLAG_BACKWARD : 0;
981 if (seek_by_bytes)
982 is->seek_flags |= AVSEEK_FLAG_BYTE;
983 is->seek_req = 1;
987 /* pause or resume the video */
988 static void stream_pause(VideoState *is)
990 is->paused = !is->paused;
991 if (!is->paused) {
992 is->video_current_pts = get_video_clock(is);
993 is->frame_timer += (av_gettime() - is->video_current_pts_time) / 1000000.0;
997 /* called to display each frame */
998 static void video_refresh_timer(void *opaque)
1000 VideoState *is = opaque;
1001 VideoPicture *vp;
1002 double actual_delay, delay, sync_threshold, ref_clock, diff;
1004 SubPicture *sp, *sp2;
1006 if (is->video_st) {
1007 if (is->pictq_size == 0) {
1008 /* if no picture, need to wait */
1009 schedule_refresh(is, 1);
1010 } else {
1011 /* dequeue the picture */
1012 vp = &is->pictq[is->pictq_rindex];
1014 /* update current video pts */
1015 is->video_current_pts = vp->pts;
1016 is->video_current_pts_time = av_gettime();
1018 /* compute nominal delay */
1019 delay = vp->pts - is->frame_last_pts;
1020 if (delay <= 0 || delay >= 2.0) {
1021 /* if incorrect delay, use previous one */
1022 delay = is->frame_last_delay;
1024 is->frame_last_delay = delay;
1025 is->frame_last_pts = vp->pts;
1027 /* update delay to follow master synchronisation source */
1028 if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
1029 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1030 /* if video is slave, we try to correct big delays by
1031 duplicating or deleting a frame */
1032 ref_clock = get_master_clock(is);
1033 diff = vp->pts - ref_clock;
1035 /* skip or repeat frame. We take into account the
1036 delay to compute the threshold. I still don't know
1037 if it is the best guess */
1038 sync_threshold = AV_SYNC_THRESHOLD;
1039 if (delay > sync_threshold)
1040 sync_threshold = delay;
1041 if (fabs(diff) < AV_NOSYNC_THRESHOLD) {
1042 if (diff <= -sync_threshold)
1043 delay = 0;
1044 else if (diff >= sync_threshold)
1045 delay = 2 * delay;
1049 is->frame_timer += delay;
1050 /* compute the REAL delay (we need to do that to avoid
1051 long term errors */
1052 actual_delay = is->frame_timer - (av_gettime() / 1000000.0);
1053 if (actual_delay < 0.010) {
1054 /* XXX: should skip picture */
1055 actual_delay = 0.010;
1057 /* launch timer for next picture */
1058 schedule_refresh(is, (int)(actual_delay * 1000 + 0.5));
1060 #if defined(DEBUG_SYNC)
1061 printf("video: delay=%0.3f actual_delay=%0.3f pts=%0.3f A-V=%f\n",
1062 delay, actual_delay, vp->pts, -diff);
1063 #endif
1065 if(is->subtitle_st) {
1066 if (is->subtitle_stream_changed) {
1067 SDL_LockMutex(is->subpq_mutex);
1069 while (is->subpq_size) {
1070 free_subpicture(&is->subpq[is->subpq_rindex]);
1072 /* update queue size and signal for next picture */
1073 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1074 is->subpq_rindex = 0;
1076 is->subpq_size--;
1078 is->subtitle_stream_changed = 0;
1080 SDL_CondSignal(is->subpq_cond);
1081 SDL_UnlockMutex(is->subpq_mutex);
1082 } else {
1083 if (is->subpq_size > 0) {
1084 sp = &is->subpq[is->subpq_rindex];
1086 if (is->subpq_size > 1)
1087 sp2 = &is->subpq[(is->subpq_rindex + 1) % SUBPICTURE_QUEUE_SIZE];
1088 else
1089 sp2 = NULL;
1091 if ((is->video_current_pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1092 || (sp2 && is->video_current_pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1094 free_subpicture(sp);
1096 /* update queue size and signal for next picture */
1097 if (++is->subpq_rindex == SUBPICTURE_QUEUE_SIZE)
1098 is->subpq_rindex = 0;
1100 SDL_LockMutex(is->subpq_mutex);
1101 is->subpq_size--;
1102 SDL_CondSignal(is->subpq_cond);
1103 SDL_UnlockMutex(is->subpq_mutex);
1109 /* display picture */
1110 video_display(is);
1112 /* update queue size and signal for next picture */
1113 if (++is->pictq_rindex == VIDEO_PICTURE_QUEUE_SIZE)
1114 is->pictq_rindex = 0;
1116 SDL_LockMutex(is->pictq_mutex);
1117 is->pictq_size--;
1118 SDL_CondSignal(is->pictq_cond);
1119 SDL_UnlockMutex(is->pictq_mutex);
1121 } else if (is->audio_st) {
1122 /* draw the next audio frame */
1124 schedule_refresh(is, 40);
1126 /* if only audio stream, then display the audio bars (better
1127 than nothing, just to test the implementation */
1129 /* display picture */
1130 video_display(is);
1131 } else {
1132 schedule_refresh(is, 100);
1134 if (show_status) {
1135 static int64_t last_time;
1136 int64_t cur_time;
1137 int aqsize, vqsize, sqsize;
1138 double av_diff;
1140 cur_time = av_gettime();
1141 if (!last_time || (cur_time - last_time) >= 500 * 1000) {
1142 aqsize = 0;
1143 vqsize = 0;
1144 sqsize = 0;
1145 if (is->audio_st)
1146 aqsize = is->audioq.size;
1147 if (is->video_st)
1148 vqsize = is->videoq.size;
1149 if (is->subtitle_st)
1150 sqsize = is->subtitleq.size;
1151 av_diff = 0;
1152 if (is->audio_st && is->video_st)
1153 av_diff = get_audio_clock(is) - get_video_clock(is);
1154 printf("%7.2f A-V:%7.3f aq=%5dKB vq=%5dKB sq=%5dB \r",
1155 get_master_clock(is), av_diff, aqsize / 1024, vqsize / 1024, sqsize);
1156 fflush(stdout);
1157 last_time = cur_time;
1162 /* allocate a picture (needs to do that in main thread to avoid
1163 potential locking problems */
1164 static void alloc_picture(void *opaque)
1166 VideoState *is = opaque;
1167 VideoPicture *vp;
1169 vp = &is->pictq[is->pictq_windex];
1171 if (vp->bmp)
1172 SDL_FreeYUVOverlay(vp->bmp);
1174 #if 0
1175 /* XXX: use generic function */
1176 /* XXX: disable overlay if no hardware acceleration or if RGB format */
1177 switch(is->video_st->codec->pix_fmt) {
1178 case PIX_FMT_YUV420P:
1179 case PIX_FMT_YUV422P:
1180 case PIX_FMT_YUV444P:
1181 case PIX_FMT_YUYV422:
1182 case PIX_FMT_YUV410P:
1183 case PIX_FMT_YUV411P:
1184 is_yuv = 1;
1185 break;
1186 default:
1187 is_yuv = 0;
1188 break;
1190 #endif
1191 vp->bmp = SDL_CreateYUVOverlay(is->video_st->codec->width,
1192 is->video_st->codec->height,
1193 SDL_YV12_OVERLAY,
1194 screen);
1195 vp->width = is->video_st->codec->width;
1196 vp->height = is->video_st->codec->height;
1198 SDL_LockMutex(is->pictq_mutex);
1199 vp->allocated = 1;
1200 SDL_CondSignal(is->pictq_cond);
1201 SDL_UnlockMutex(is->pictq_mutex);
1206 * @param pts the dts of the pkt / pts of the frame and guessed if not known
1208 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts)
1210 VideoPicture *vp;
1211 int dst_pix_fmt;
1212 AVPicture pict;
1213 static struct SwsContext *img_convert_ctx;
1215 /* wait until we have space to put a new picture */
1216 SDL_LockMutex(is->pictq_mutex);
1217 while (is->pictq_size >= VIDEO_PICTURE_QUEUE_SIZE &&
1218 !is->videoq.abort_request) {
1219 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1221 SDL_UnlockMutex(is->pictq_mutex);
1223 if (is->videoq.abort_request)
1224 return -1;
1226 vp = &is->pictq[is->pictq_windex];
1228 /* alloc or resize hardware picture buffer */
1229 if (!vp->bmp ||
1230 vp->width != is->video_st->codec->width ||
1231 vp->height != is->video_st->codec->height) {
1232 SDL_Event event;
1234 vp->allocated = 0;
1236 /* the allocation must be done in the main thread to avoid
1237 locking problems */
1238 event.type = FF_ALLOC_EVENT;
1239 event.user.data1 = is;
1240 SDL_PushEvent(&event);
1242 /* wait until the picture is allocated */
1243 SDL_LockMutex(is->pictq_mutex);
1244 while (!vp->allocated && !is->videoq.abort_request) {
1245 SDL_CondWait(is->pictq_cond, is->pictq_mutex);
1247 SDL_UnlockMutex(is->pictq_mutex);
1249 if (is->videoq.abort_request)
1250 return -1;
1253 /* if the frame is not skipped, then display it */
1254 if (vp->bmp) {
1255 /* get a pointer on the bitmap */
1256 SDL_LockYUVOverlay (vp->bmp);
1258 dst_pix_fmt = PIX_FMT_YUV420P;
1259 pict.data[0] = vp->bmp->pixels[0];
1260 pict.data[1] = vp->bmp->pixels[2];
1261 pict.data[2] = vp->bmp->pixels[1];
1263 pict.linesize[0] = vp->bmp->pitches[0];
1264 pict.linesize[1] = vp->bmp->pitches[2];
1265 pict.linesize[2] = vp->bmp->pitches[1];
1266 img_convert_ctx = sws_getCachedContext(img_convert_ctx,
1267 is->video_st->codec->width, is->video_st->codec->height,
1268 is->video_st->codec->pix_fmt,
1269 is->video_st->codec->width, is->video_st->codec->height,
1270 dst_pix_fmt, sws_flags, NULL, NULL, NULL);
1271 if (img_convert_ctx == NULL) {
1272 fprintf(stderr, "Cannot initialize the conversion context\n");
1273 exit(1);
1275 sws_scale(img_convert_ctx, src_frame->data, src_frame->linesize,
1276 0, is->video_st->codec->height, pict.data, pict.linesize);
1277 /* update the bitmap content */
1278 SDL_UnlockYUVOverlay(vp->bmp);
1280 vp->pts = pts;
1282 /* now we can update the picture count */
1283 if (++is->pictq_windex == VIDEO_PICTURE_QUEUE_SIZE)
1284 is->pictq_windex = 0;
1285 SDL_LockMutex(is->pictq_mutex);
1286 is->pictq_size++;
1287 SDL_UnlockMutex(is->pictq_mutex);
1289 return 0;
1293 * compute the exact PTS for the picture if it is omitted in the stream
1294 * @param pts1 the dts of the pkt / pts of the frame
1296 static int output_picture2(VideoState *is, AVFrame *src_frame, double pts1)
1298 double frame_delay, pts;
1300 pts = pts1;
1302 if (pts != 0) {
1303 /* update video clock with pts, if present */
1304 is->video_clock = pts;
1305 } else {
1306 pts = is->video_clock;
1308 /* update video clock for next frame */
1309 frame_delay = av_q2d(is->video_st->codec->time_base);
1310 /* for MPEG2, the frame can be repeated, so we update the
1311 clock accordingly */
1312 frame_delay += src_frame->repeat_pict * (frame_delay * 0.5);
1313 is->video_clock += frame_delay;
1315 #if defined(DEBUG_SYNC) && 0
1317 int ftype;
1318 if (src_frame->pict_type == FF_B_TYPE)
1319 ftype = 'B';
1320 else if (src_frame->pict_type == FF_I_TYPE)
1321 ftype = 'I';
1322 else
1323 ftype = 'P';
1324 printf("frame_type=%c clock=%0.3f pts=%0.3f\n",
1325 ftype, pts, pts1);
1327 #endif
1328 return queue_picture(is, src_frame, pts);
1331 static uint64_t global_video_pkt_pts= AV_NOPTS_VALUE;
1333 static int my_get_buffer(struct AVCodecContext *c, AVFrame *pic){
1334 int ret= avcodec_default_get_buffer(c, pic);
1335 uint64_t *pts= av_malloc(sizeof(uint64_t));
1336 *pts= global_video_pkt_pts;
1337 pic->opaque= pts;
1338 return ret;
1341 static void my_release_buffer(struct AVCodecContext *c, AVFrame *pic){
1342 if(pic) av_freep(&pic->opaque);
1343 avcodec_default_release_buffer(c, pic);
1346 static int video_thread(void *arg)
1348 VideoState *is = arg;
1349 AVPacket pkt1, *pkt = &pkt1;
1350 int len1, got_picture;
1351 AVFrame *frame= avcodec_alloc_frame();
1352 double pts;
1354 for(;;) {
1355 while (is->paused && !is->videoq.abort_request) {
1356 SDL_Delay(10);
1358 if (packet_queue_get(&is->videoq, pkt, 1) < 0)
1359 break;
1361 if(pkt->data == flush_pkt.data){
1362 avcodec_flush_buffers(is->video_st->codec);
1363 continue;
1366 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1367 this packet, if any */
1368 global_video_pkt_pts= pkt->pts;
1369 len1 = avcodec_decode_video(is->video_st->codec,
1370 frame, &got_picture,
1371 pkt->data, pkt->size);
1373 if( (decoder_reorder_pts || pkt->dts == AV_NOPTS_VALUE)
1374 && frame->opaque && *(uint64_t*)frame->opaque != AV_NOPTS_VALUE)
1375 pts= *(uint64_t*)frame->opaque;
1376 else if(pkt->dts != AV_NOPTS_VALUE)
1377 pts= pkt->dts;
1378 else
1379 pts= 0;
1380 pts *= av_q2d(is->video_st->time_base);
1382 // if (len1 < 0)
1383 // break;
1384 if (got_picture) {
1385 if (output_picture2(is, frame, pts) < 0)
1386 goto the_end;
1388 av_free_packet(pkt);
1389 if (step)
1390 if (cur_stream)
1391 stream_pause(cur_stream);
1393 the_end:
1394 av_free(frame);
1395 return 0;
1398 static int subtitle_thread(void *arg)
1400 VideoState *is = arg;
1401 SubPicture *sp;
1402 AVPacket pkt1, *pkt = &pkt1;
1403 int len1, got_subtitle;
1404 double pts;
1405 int i, j;
1406 int r, g, b, y, u, v, a;
1408 for(;;) {
1409 while (is->paused && !is->subtitleq.abort_request) {
1410 SDL_Delay(10);
1412 if (packet_queue_get(&is->subtitleq, pkt, 1) < 0)
1413 break;
1415 if(pkt->data == flush_pkt.data){
1416 avcodec_flush_buffers(is->subtitle_st->codec);
1417 continue;
1419 SDL_LockMutex(is->subpq_mutex);
1420 while (is->subpq_size >= SUBPICTURE_QUEUE_SIZE &&
1421 !is->subtitleq.abort_request) {
1422 SDL_CondWait(is->subpq_cond, is->subpq_mutex);
1424 SDL_UnlockMutex(is->subpq_mutex);
1426 if (is->subtitleq.abort_request)
1427 goto the_end;
1429 sp = &is->subpq[is->subpq_windex];
1431 /* NOTE: ipts is the PTS of the _first_ picture beginning in
1432 this packet, if any */
1433 pts = 0;
1434 if (pkt->pts != AV_NOPTS_VALUE)
1435 pts = av_q2d(is->subtitle_st->time_base)*pkt->pts;
1437 len1 = avcodec_decode_subtitle(is->subtitle_st->codec,
1438 &sp->sub, &got_subtitle,
1439 pkt->data, pkt->size);
1440 // if (len1 < 0)
1441 // break;
1442 if (got_subtitle && sp->sub.format == 0) {
1443 sp->pts = pts;
1445 for (i = 0; i < sp->sub.num_rects; i++)
1447 for (j = 0; j < sp->sub.rects[i].nb_colors; j++)
1449 RGBA_IN(r, g, b, a, sp->sub.rects[i].rgba_palette + j);
1450 y = RGB_TO_Y_CCIR(r, g, b);
1451 u = RGB_TO_U_CCIR(r, g, b, 0);
1452 v = RGB_TO_V_CCIR(r, g, b, 0);
1453 YUVA_OUT(sp->sub.rects[i].rgba_palette + j, y, u, v, a);
1457 /* now we can update the picture count */
1458 if (++is->subpq_windex == SUBPICTURE_QUEUE_SIZE)
1459 is->subpq_windex = 0;
1460 SDL_LockMutex(is->subpq_mutex);
1461 is->subpq_size++;
1462 SDL_UnlockMutex(is->subpq_mutex);
1464 av_free_packet(pkt);
1465 // if (step)
1466 // if (cur_stream)
1467 // stream_pause(cur_stream);
1469 the_end:
1470 return 0;
1473 /* copy samples for viewing in editor window */
1474 static void update_sample_display(VideoState *is, short *samples, int samples_size)
1476 int size, len, channels;
1478 channels = is->audio_st->codec->channels;
1480 size = samples_size / sizeof(short);
1481 while (size > 0) {
1482 len = SAMPLE_ARRAY_SIZE - is->sample_array_index;
1483 if (len > size)
1484 len = size;
1485 memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
1486 samples += len;
1487 is->sample_array_index += len;
1488 if (is->sample_array_index >= SAMPLE_ARRAY_SIZE)
1489 is->sample_array_index = 0;
1490 size -= len;
1494 /* return the new audio buffer size (samples can be added or deleted
1495 to get better sync if video or external master clock) */
1496 static int synchronize_audio(VideoState *is, short *samples,
1497 int samples_size1, double pts)
1499 int n, samples_size;
1500 double ref_clock;
1502 n = 2 * is->audio_st->codec->channels;
1503 samples_size = samples_size1;
1505 /* if not master, then we try to remove or add samples to correct the clock */
1506 if (((is->av_sync_type == AV_SYNC_VIDEO_MASTER && is->video_st) ||
1507 is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK)) {
1508 double diff, avg_diff;
1509 int wanted_size, min_size, max_size, nb_samples;
1511 ref_clock = get_master_clock(is);
1512 diff = get_audio_clock(is) - ref_clock;
1514 if (diff < AV_NOSYNC_THRESHOLD) {
1515 is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
1516 if (is->audio_diff_avg_count < AUDIO_DIFF_AVG_NB) {
1517 /* not enough measures to have a correct estimate */
1518 is->audio_diff_avg_count++;
1519 } else {
1520 /* estimate the A-V difference */
1521 avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
1523 if (fabs(avg_diff) >= is->audio_diff_threshold) {
1524 wanted_size = samples_size + ((int)(diff * is->audio_st->codec->sample_rate) * n);
1525 nb_samples = samples_size / n;
1527 min_size = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1528 max_size = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX)) / 100) * n;
1529 if (wanted_size < min_size)
1530 wanted_size = min_size;
1531 else if (wanted_size > max_size)
1532 wanted_size = max_size;
1534 /* add or remove samples to correction the synchro */
1535 if (wanted_size < samples_size) {
1536 /* remove samples */
1537 samples_size = wanted_size;
1538 } else if (wanted_size > samples_size) {
1539 uint8_t *samples_end, *q;
1540 int nb;
1542 /* add samples */
1543 nb = (samples_size - wanted_size);
1544 samples_end = (uint8_t *)samples + samples_size - n;
1545 q = samples_end + n;
1546 while (nb > 0) {
1547 memcpy(q, samples_end, n);
1548 q += n;
1549 nb -= n;
1551 samples_size = wanted_size;
1554 #if 0
1555 printf("diff=%f adiff=%f sample_diff=%d apts=%0.3f vpts=%0.3f %f\n",
1556 diff, avg_diff, samples_size - samples_size1,
1557 is->audio_clock, is->video_clock, is->audio_diff_threshold);
1558 #endif
1560 } else {
1561 /* too big difference : may be initial PTS errors, so
1562 reset A-V filter */
1563 is->audio_diff_avg_count = 0;
1564 is->audio_diff_cum = 0;
1568 return samples_size;
1571 /* decode one audio frame and returns its uncompressed size */
1572 static int audio_decode_frame(VideoState *is, uint8_t *audio_buf, int buf_size, double *pts_ptr)
1574 AVPacket *pkt = &is->audio_pkt;
1575 int n, len1, data_size;
1576 double pts;
1578 for(;;) {
1579 /* NOTE: the audio packet can contain several frames */
1580 while (is->audio_pkt_size > 0) {
1581 data_size = buf_size;
1582 len1 = avcodec_decode_audio2(is->audio_st->codec,
1583 (int16_t *)audio_buf, &data_size,
1584 is->audio_pkt_data, is->audio_pkt_size);
1585 if (len1 < 0) {
1586 /* if error, we skip the frame */
1587 is->audio_pkt_size = 0;
1588 break;
1591 is->audio_pkt_data += len1;
1592 is->audio_pkt_size -= len1;
1593 if (data_size <= 0)
1594 continue;
1595 /* if no pts, then compute it */
1596 pts = is->audio_clock;
1597 *pts_ptr = pts;
1598 n = 2 * is->audio_st->codec->channels;
1599 is->audio_clock += (double)data_size /
1600 (double)(n * is->audio_st->codec->sample_rate);
1601 #if defined(DEBUG_SYNC)
1603 static double last_clock;
1604 printf("audio: delay=%0.3f clock=%0.3f pts=%0.3f\n",
1605 is->audio_clock - last_clock,
1606 is->audio_clock, pts);
1607 last_clock = is->audio_clock;
1609 #endif
1610 return data_size;
1613 /* free the current packet */
1614 if (pkt->data)
1615 av_free_packet(pkt);
1617 if (is->paused || is->audioq.abort_request) {
1618 return -1;
1621 /* read next packet */
1622 if (packet_queue_get(&is->audioq, pkt, 1) < 0)
1623 return -1;
1624 if(pkt->data == flush_pkt.data){
1625 avcodec_flush_buffers(is->audio_st->codec);
1626 continue;
1629 is->audio_pkt_data = pkt->data;
1630 is->audio_pkt_size = pkt->size;
1632 /* if update the audio clock with the pts */
1633 if (pkt->pts != AV_NOPTS_VALUE) {
1634 is->audio_clock = av_q2d(is->audio_st->time_base)*pkt->pts;
1639 /* get the current audio output buffer size, in samples. With SDL, we
1640 cannot have a precise information */
1641 static int audio_write_get_buf_size(VideoState *is)
1643 return is->audio_buf_size - is->audio_buf_index;
1647 /* prepare a new audio buffer */
1648 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
1650 VideoState *is = opaque;
1651 int audio_size, len1;
1652 double pts;
1654 audio_callback_time = av_gettime();
1656 while (len > 0) {
1657 if (is->audio_buf_index >= is->audio_buf_size) {
1658 audio_size = audio_decode_frame(is, is->audio_buf, sizeof(is->audio_buf), &pts);
1659 if (audio_size < 0) {
1660 /* if error, just output silence */
1661 is->audio_buf_size = 1024;
1662 memset(is->audio_buf, 0, is->audio_buf_size);
1663 } else {
1664 if (is->show_audio)
1665 update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
1666 audio_size = synchronize_audio(is, (int16_t *)is->audio_buf, audio_size,
1667 pts);
1668 is->audio_buf_size = audio_size;
1670 is->audio_buf_index = 0;
1672 len1 = is->audio_buf_size - is->audio_buf_index;
1673 if (len1 > len)
1674 len1 = len;
1675 memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
1676 len -= len1;
1677 stream += len1;
1678 is->audio_buf_index += len1;
1682 /* open a given stream. Return 0 if OK */
1683 static int stream_component_open(VideoState *is, int stream_index)
1685 AVFormatContext *ic = is->ic;
1686 AVCodecContext *enc;
1687 AVCodec *codec;
1688 SDL_AudioSpec wanted_spec, spec;
1690 if (stream_index < 0 || stream_index >= ic->nb_streams)
1691 return -1;
1692 enc = ic->streams[stream_index]->codec;
1694 /* prepare audio output */
1695 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1696 if (enc->channels > 0) {
1697 enc->request_channels = FFMIN(2, enc->channels);
1698 } else {
1699 enc->request_channels = 2;
1703 codec = avcodec_find_decoder(enc->codec_id);
1704 enc->debug_mv = debug_mv;
1705 enc->debug = debug;
1706 enc->workaround_bugs = workaround_bugs;
1707 enc->lowres = lowres;
1708 if(lowres) enc->flags |= CODEC_FLAG_EMU_EDGE;
1709 enc->idct_algo= idct;
1710 if(fast) enc->flags2 |= CODEC_FLAG2_FAST;
1711 enc->skip_frame= skip_frame;
1712 enc->skip_idct= skip_idct;
1713 enc->skip_loop_filter= skip_loop_filter;
1714 enc->error_resilience= error_resilience;
1715 enc->error_concealment= error_concealment;
1716 if (!codec ||
1717 avcodec_open(enc, codec) < 0)
1718 return -1;
1720 /* prepare audio output */
1721 if (enc->codec_type == CODEC_TYPE_AUDIO) {
1722 wanted_spec.freq = enc->sample_rate;
1723 wanted_spec.format = AUDIO_S16SYS;
1724 wanted_spec.channels = enc->channels;
1725 wanted_spec.silence = 0;
1726 wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
1727 wanted_spec.callback = sdl_audio_callback;
1728 wanted_spec.userdata = is;
1729 if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
1730 fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
1731 return -1;
1733 is->audio_hw_buf_size = spec.size;
1736 if(thread_count>1)
1737 avcodec_thread_init(enc, thread_count);
1738 enc->thread_count= thread_count;
1739 switch(enc->codec_type) {
1740 case CODEC_TYPE_AUDIO:
1741 is->audio_stream = stream_index;
1742 is->audio_st = ic->streams[stream_index];
1743 is->audio_buf_size = 0;
1744 is->audio_buf_index = 0;
1746 /* init averaging filter */
1747 is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
1748 is->audio_diff_avg_count = 0;
1749 /* since we do not have a precise anough audio fifo fullness,
1750 we correct audio sync only if larger than this threshold */
1751 is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / enc->sample_rate;
1753 memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
1754 packet_queue_init(&is->audioq);
1755 SDL_PauseAudio(0);
1756 break;
1757 case CODEC_TYPE_VIDEO:
1758 is->video_stream = stream_index;
1759 is->video_st = ic->streams[stream_index];
1761 is->frame_last_delay = 40e-3;
1762 is->frame_timer = (double)av_gettime() / 1000000.0;
1763 is->video_current_pts_time = av_gettime();
1765 packet_queue_init(&is->videoq);
1766 is->video_tid = SDL_CreateThread(video_thread, is);
1768 enc-> get_buffer= my_get_buffer;
1769 enc->release_buffer= my_release_buffer;
1770 break;
1771 case CODEC_TYPE_SUBTITLE:
1772 is->subtitle_stream = stream_index;
1773 is->subtitle_st = ic->streams[stream_index];
1774 packet_queue_init(&is->subtitleq);
1776 is->subtitle_tid = SDL_CreateThread(subtitle_thread, is);
1777 break;
1778 default:
1779 break;
1781 return 0;
1784 static void stream_component_close(VideoState *is, int stream_index)
1786 AVFormatContext *ic = is->ic;
1787 AVCodecContext *enc;
1789 if (stream_index < 0 || stream_index >= ic->nb_streams)
1790 return;
1791 enc = ic->streams[stream_index]->codec;
1793 switch(enc->codec_type) {
1794 case CODEC_TYPE_AUDIO:
1795 packet_queue_abort(&is->audioq);
1797 SDL_CloseAudio();
1799 packet_queue_end(&is->audioq);
1800 break;
1801 case CODEC_TYPE_VIDEO:
1802 packet_queue_abort(&is->videoq);
1804 /* note: we also signal this mutex to make sure we deblock the
1805 video thread in all cases */
1806 SDL_LockMutex(is->pictq_mutex);
1807 SDL_CondSignal(is->pictq_cond);
1808 SDL_UnlockMutex(is->pictq_mutex);
1810 SDL_WaitThread(is->video_tid, NULL);
1812 packet_queue_end(&is->videoq);
1813 break;
1814 case CODEC_TYPE_SUBTITLE:
1815 packet_queue_abort(&is->subtitleq);
1817 /* note: we also signal this mutex to make sure we deblock the
1818 video thread in all cases */
1819 SDL_LockMutex(is->subpq_mutex);
1820 is->subtitle_stream_changed = 1;
1822 SDL_CondSignal(is->subpq_cond);
1823 SDL_UnlockMutex(is->subpq_mutex);
1825 SDL_WaitThread(is->subtitle_tid, NULL);
1827 packet_queue_end(&is->subtitleq);
1828 break;
1829 default:
1830 break;
1833 avcodec_close(enc);
1834 switch(enc->codec_type) {
1835 case CODEC_TYPE_AUDIO:
1836 is->audio_st = NULL;
1837 is->audio_stream = -1;
1838 break;
1839 case CODEC_TYPE_VIDEO:
1840 is->video_st = NULL;
1841 is->video_stream = -1;
1842 break;
1843 case CODEC_TYPE_SUBTITLE:
1844 is->subtitle_st = NULL;
1845 is->subtitle_stream = -1;
1846 break;
1847 default:
1848 break;
1852 static void dump_stream_info(const AVFormatContext *s)
1854 if (s->track != 0)
1855 fprintf(stderr, "Track: %d\n", s->track);
1856 if (s->title[0] != '\0')
1857 fprintf(stderr, "Title: %s\n", s->title);
1858 if (s->author[0] != '\0')
1859 fprintf(stderr, "Author: %s\n", s->author);
1860 if (s->copyright[0] != '\0')
1861 fprintf(stderr, "Copyright: %s\n", s->copyright);
1862 if (s->comment[0] != '\0')
1863 fprintf(stderr, "Comment: %s\n", s->comment);
1864 if (s->album[0] != '\0')
1865 fprintf(stderr, "Album: %s\n", s->album);
1866 if (s->year != 0)
1867 fprintf(stderr, "Year: %d\n", s->year);
1868 if (s->genre[0] != '\0')
1869 fprintf(stderr, "Genre: %s\n", s->genre);
1872 /* since we have only one decoding thread, we can use a global
1873 variable instead of a thread local variable */
1874 static VideoState *global_video_state;
1876 static int decode_interrupt_cb(void)
1878 return (global_video_state && global_video_state->abort_request);
1881 /* this thread gets the stream from the disk or the network */
1882 static int decode_thread(void *arg)
1884 VideoState *is = arg;
1885 AVFormatContext *ic;
1886 int err, i, ret, video_index, audio_index;
1887 AVPacket pkt1, *pkt = &pkt1;
1888 AVFormatParameters params, *ap = &params;
1890 video_index = -1;
1891 audio_index = -1;
1892 is->video_stream = -1;
1893 is->audio_stream = -1;
1894 is->subtitle_stream = -1;
1896 global_video_state = is;
1897 url_set_interrupt_cb(decode_interrupt_cb);
1899 memset(ap, 0, sizeof(*ap));
1901 ap->width = frame_width;
1902 ap->height= frame_height;
1903 ap->time_base= (AVRational){1, 25};
1904 ap->pix_fmt = frame_pix_fmt;
1906 err = av_open_input_file(&ic, is->filename, is->iformat, 0, ap);
1907 if (err < 0) {
1908 print_error(is->filename, err);
1909 ret = -1;
1910 goto fail;
1912 is->ic = ic;
1914 if(genpts)
1915 ic->flags |= AVFMT_FLAG_GENPTS;
1917 err = av_find_stream_info(ic);
1918 if (err < 0) {
1919 fprintf(stderr, "%s: could not find codec parameters\n", is->filename);
1920 ret = -1;
1921 goto fail;
1923 if(ic->pb)
1924 ic->pb->eof_reached= 0; //FIXME hack, ffplay maybe should not use url_feof() to test for the end
1926 /* if seeking requested, we execute it */
1927 if (start_time != AV_NOPTS_VALUE) {
1928 int64_t timestamp;
1930 timestamp = start_time;
1931 /* add the stream start time */
1932 if (ic->start_time != AV_NOPTS_VALUE)
1933 timestamp += ic->start_time;
1934 ret = av_seek_frame(ic, -1, timestamp, AVSEEK_FLAG_BACKWARD);
1935 if (ret < 0) {
1936 fprintf(stderr, "%s: could not seek to position %0.3f\n",
1937 is->filename, (double)timestamp / AV_TIME_BASE);
1941 for(i = 0; i < ic->nb_streams; i++) {
1942 AVCodecContext *enc = ic->streams[i]->codec;
1943 switch(enc->codec_type) {
1944 case CODEC_TYPE_AUDIO:
1945 if ((audio_index < 0 || wanted_audio_stream-- > 0) && !audio_disable)
1946 audio_index = i;
1947 break;
1948 case CODEC_TYPE_VIDEO:
1949 if ((video_index < 0 || wanted_video_stream-- > 0) && !video_disable)
1950 video_index = i;
1951 break;
1952 default:
1953 break;
1956 if (show_status) {
1957 dump_format(ic, 0, is->filename, 0);
1958 dump_stream_info(ic);
1961 /* open the streams */
1962 if (audio_index >= 0) {
1963 stream_component_open(is, audio_index);
1966 if (video_index >= 0) {
1967 stream_component_open(is, video_index);
1968 } else {
1969 if (!display_disable)
1970 is->show_audio = 1;
1973 if (is->video_stream < 0 && is->audio_stream < 0) {
1974 fprintf(stderr, "%s: could not open codecs\n", is->filename);
1975 ret = -1;
1976 goto fail;
1979 for(;;) {
1980 if (is->abort_request)
1981 break;
1982 if (is->paused != is->last_paused) {
1983 is->last_paused = is->paused;
1984 if (is->paused)
1985 av_read_pause(ic);
1986 else
1987 av_read_play(ic);
1989 #if defined(CONFIG_RTSP_DEMUXER) || defined(CONFIG_MMSH_PROTOCOL)
1990 if (is->paused &&
1991 (!strcmp(ic->iformat->name, "rtsp") ||
1992 (ic->pb && !strcmp(url_fileno(ic->pb)->prot->name, "mmsh")))) {
1993 /* wait 10 ms to avoid trying to get another packet */
1994 /* XXX: horrible */
1995 SDL_Delay(10);
1996 continue;
1998 #endif
1999 if (is->seek_req) {
2000 int stream_index= -1;
2001 int64_t seek_target= is->seek_pos;
2003 if (is-> video_stream >= 0) stream_index= is-> video_stream;
2004 else if(is-> audio_stream >= 0) stream_index= is-> audio_stream;
2005 else if(is->subtitle_stream >= 0) stream_index= is->subtitle_stream;
2007 if(stream_index>=0){
2008 seek_target= av_rescale_q(seek_target, AV_TIME_BASE_Q, ic->streams[stream_index]->time_base);
2011 ret = av_seek_frame(is->ic, stream_index, seek_target, is->seek_flags);
2012 if (ret < 0) {
2013 fprintf(stderr, "%s: error while seeking\n", is->ic->filename);
2014 }else{
2015 if (is->audio_stream >= 0) {
2016 packet_queue_flush(&is->audioq);
2017 packet_queue_put(&is->audioq, &flush_pkt);
2019 if (is->subtitle_stream >= 0) {
2020 packet_queue_flush(&is->subtitleq);
2021 packet_queue_put(&is->subtitleq, &flush_pkt);
2023 if (is->video_stream >= 0) {
2024 packet_queue_flush(&is->videoq);
2025 packet_queue_put(&is->videoq, &flush_pkt);
2028 is->seek_req = 0;
2031 /* if the queue are full, no need to read more */
2032 if (is->audioq.size > MAX_AUDIOQ_SIZE ||
2033 is->videoq.size > MAX_VIDEOQ_SIZE ||
2034 is->subtitleq.size > MAX_SUBTITLEQ_SIZE ||
2035 url_feof(ic->pb)) {
2036 /* wait 10 ms */
2037 SDL_Delay(10);
2038 continue;
2040 ret = av_read_frame(ic, pkt);
2041 if (ret < 0) {
2042 if (url_ferror(ic->pb) == 0) {
2043 SDL_Delay(100); /* wait for user event */
2044 continue;
2045 } else
2046 break;
2048 if (pkt->stream_index == is->audio_stream) {
2049 packet_queue_put(&is->audioq, pkt);
2050 } else if (pkt->stream_index == is->video_stream) {
2051 packet_queue_put(&is->videoq, pkt);
2052 } else if (pkt->stream_index == is->subtitle_stream) {
2053 packet_queue_put(&is->subtitleq, pkt);
2054 } else {
2055 av_free_packet(pkt);
2058 /* wait until the end */
2059 while (!is->abort_request) {
2060 SDL_Delay(100);
2063 ret = 0;
2064 fail:
2065 /* disable interrupting */
2066 global_video_state = NULL;
2068 /* close each stream */
2069 if (is->audio_stream >= 0)
2070 stream_component_close(is, is->audio_stream);
2071 if (is->video_stream >= 0)
2072 stream_component_close(is, is->video_stream);
2073 if (is->subtitle_stream >= 0)
2074 stream_component_close(is, is->subtitle_stream);
2075 if (is->ic) {
2076 av_close_input_file(is->ic);
2077 is->ic = NULL; /* safety */
2079 url_set_interrupt_cb(NULL);
2081 if (ret != 0) {
2082 SDL_Event event;
2084 event.type = FF_QUIT_EVENT;
2085 event.user.data1 = is;
2086 SDL_PushEvent(&event);
2088 return 0;
2091 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
2093 VideoState *is;
2095 is = av_mallocz(sizeof(VideoState));
2096 if (!is)
2097 return NULL;
2098 av_strlcpy(is->filename, filename, sizeof(is->filename));
2099 is->iformat = iformat;
2100 is->ytop = 0;
2101 is->xleft = 0;
2103 /* start video display */
2104 is->pictq_mutex = SDL_CreateMutex();
2105 is->pictq_cond = SDL_CreateCond();
2107 is->subpq_mutex = SDL_CreateMutex();
2108 is->subpq_cond = SDL_CreateCond();
2110 /* add the refresh timer to draw the picture */
2111 schedule_refresh(is, 40);
2113 is->av_sync_type = av_sync_type;
2114 is->parse_tid = SDL_CreateThread(decode_thread, is);
2115 if (!is->parse_tid) {
2116 av_free(is);
2117 return NULL;
2119 return is;
2122 static void stream_close(VideoState *is)
2124 VideoPicture *vp;
2125 int i;
2126 /* XXX: use a special url_shutdown call to abort parse cleanly */
2127 is->abort_request = 1;
2128 SDL_WaitThread(is->parse_tid, NULL);
2130 /* free all pictures */
2131 for(i=0;i<VIDEO_PICTURE_QUEUE_SIZE; i++) {
2132 vp = &is->pictq[i];
2133 if (vp->bmp) {
2134 SDL_FreeYUVOverlay(vp->bmp);
2135 vp->bmp = NULL;
2138 SDL_DestroyMutex(is->pictq_mutex);
2139 SDL_DestroyCond(is->pictq_cond);
2140 SDL_DestroyMutex(is->subpq_mutex);
2141 SDL_DestroyCond(is->subpq_cond);
2144 static void stream_cycle_channel(VideoState *is, int codec_type)
2146 AVFormatContext *ic = is->ic;
2147 int start_index, stream_index;
2148 AVStream *st;
2150 if (codec_type == CODEC_TYPE_VIDEO)
2151 start_index = is->video_stream;
2152 else if (codec_type == CODEC_TYPE_AUDIO)
2153 start_index = is->audio_stream;
2154 else
2155 start_index = is->subtitle_stream;
2156 if (start_index < (codec_type == CODEC_TYPE_SUBTITLE ? -1 : 0))
2157 return;
2158 stream_index = start_index;
2159 for(;;) {
2160 if (++stream_index >= is->ic->nb_streams)
2162 if (codec_type == CODEC_TYPE_SUBTITLE)
2164 stream_index = -1;
2165 goto the_end;
2166 } else
2167 stream_index = 0;
2169 if (stream_index == start_index)
2170 return;
2171 st = ic->streams[stream_index];
2172 if (st->codec->codec_type == codec_type) {
2173 /* check that parameters are OK */
2174 switch(codec_type) {
2175 case CODEC_TYPE_AUDIO:
2176 if (st->codec->sample_rate != 0 &&
2177 st->codec->channels != 0)
2178 goto the_end;
2179 break;
2180 case CODEC_TYPE_VIDEO:
2181 case CODEC_TYPE_SUBTITLE:
2182 goto the_end;
2183 default:
2184 break;
2188 the_end:
2189 stream_component_close(is, start_index);
2190 stream_component_open(is, stream_index);
2194 static void toggle_full_screen(void)
2196 is_full_screen = !is_full_screen;
2197 if (!fs_screen_width) {
2198 /* use default SDL method */
2199 // SDL_WM_ToggleFullScreen(screen);
2201 video_open(cur_stream);
2204 static void toggle_pause(void)
2206 if (cur_stream)
2207 stream_pause(cur_stream);
2208 step = 0;
2211 static void step_to_next_frame(void)
2213 if (cur_stream) {
2214 /* if the stream is paused unpause it, then step */
2215 if (cur_stream->paused)
2216 stream_pause(cur_stream);
2218 step = 1;
2221 static void do_exit(void)
2223 if (cur_stream) {
2224 stream_close(cur_stream);
2225 cur_stream = NULL;
2227 if (show_status)
2228 printf("\n");
2229 SDL_Quit();
2230 exit(0);
2233 static void toggle_audio_display(void)
2235 if (cur_stream) {
2236 cur_stream->show_audio = !cur_stream->show_audio;
2240 /* handle an event sent by the GUI */
2241 static void event_loop(void)
2243 SDL_Event event;
2244 double incr, pos, frac;
2246 for(;;) {
2247 SDL_WaitEvent(&event);
2248 switch(event.type) {
2249 case SDL_KEYDOWN:
2250 switch(event.key.keysym.sym) {
2251 case SDLK_ESCAPE:
2252 case SDLK_q:
2253 do_exit();
2254 break;
2255 case SDLK_f:
2256 toggle_full_screen();
2257 break;
2258 case SDLK_p:
2259 case SDLK_SPACE:
2260 toggle_pause();
2261 break;
2262 case SDLK_s: //S: Step to next frame
2263 step_to_next_frame();
2264 break;
2265 case SDLK_a:
2266 if (cur_stream)
2267 stream_cycle_channel(cur_stream, CODEC_TYPE_AUDIO);
2268 break;
2269 case SDLK_v:
2270 if (cur_stream)
2271 stream_cycle_channel(cur_stream, CODEC_TYPE_VIDEO);
2272 break;
2273 case SDLK_t:
2274 if (cur_stream)
2275 stream_cycle_channel(cur_stream, CODEC_TYPE_SUBTITLE);
2276 break;
2277 case SDLK_w:
2278 toggle_audio_display();
2279 break;
2280 case SDLK_LEFT:
2281 incr = -10.0;
2282 goto do_seek;
2283 case SDLK_RIGHT:
2284 incr = 10.0;
2285 goto do_seek;
2286 case SDLK_UP:
2287 incr = 60.0;
2288 goto do_seek;
2289 case SDLK_DOWN:
2290 incr = -60.0;
2291 do_seek:
2292 if (cur_stream) {
2293 if (seek_by_bytes) {
2294 pos = url_ftell(cur_stream->ic->pb);
2295 if (cur_stream->ic->bit_rate)
2296 incr *= cur_stream->ic->bit_rate / 60.0;
2297 else
2298 incr *= 180000.0;
2299 pos += incr;
2300 stream_seek(cur_stream, pos, incr);
2301 } else {
2302 pos = get_master_clock(cur_stream);
2303 pos += incr;
2304 stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), incr);
2307 break;
2308 default:
2309 break;
2311 break;
2312 case SDL_MOUSEBUTTONDOWN:
2313 if (cur_stream) {
2314 int ns, hh, mm, ss;
2315 int tns, thh, tmm, tss;
2316 tns = cur_stream->ic->duration/1000000LL;
2317 thh = tns/3600;
2318 tmm = (tns%3600)/60;
2319 tss = (tns%60);
2320 frac = (double)event.button.x/(double)cur_stream->width;
2321 ns = frac*tns;
2322 hh = ns/3600;
2323 mm = (ns%3600)/60;
2324 ss = (ns%60);
2325 fprintf(stderr, "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
2326 hh, mm, ss, thh, tmm, tss);
2327 stream_seek(cur_stream, (int64_t)(cur_stream->ic->start_time+frac*cur_stream->ic->duration), 0);
2329 break;
2330 case SDL_VIDEORESIZE:
2331 if (cur_stream) {
2332 screen = SDL_SetVideoMode(event.resize.w, event.resize.h, 0,
2333 SDL_HWSURFACE|SDL_RESIZABLE|SDL_ASYNCBLIT|SDL_HWACCEL);
2334 screen_width = cur_stream->width = event.resize.w;
2335 screen_height= cur_stream->height= event.resize.h;
2337 break;
2338 case SDL_QUIT:
2339 case FF_QUIT_EVENT:
2340 do_exit();
2341 break;
2342 case FF_ALLOC_EVENT:
2343 video_open(event.user.data1);
2344 alloc_picture(event.user.data1);
2345 break;
2346 case FF_REFRESH_EVENT:
2347 video_refresh_timer(event.user.data1);
2348 break;
2349 default:
2350 break;
2355 static void opt_frame_size(const char *arg)
2357 if (av_parse_video_frame_size(&frame_width, &frame_height, arg) < 0) {
2358 fprintf(stderr, "Incorrect frame size\n");
2359 exit(1);
2361 if ((frame_width % 2) != 0 || (frame_height % 2) != 0) {
2362 fprintf(stderr, "Frame size must be a multiple of 2\n");
2363 exit(1);
2367 static int opt_width(const char *opt, const char *arg)
2369 screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2370 return 0;
2373 static int opt_height(const char *opt, const char *arg)
2375 screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
2376 return 0;
2379 static void opt_format(const char *arg)
2381 file_iformat = av_find_input_format(arg);
2382 if (!file_iformat) {
2383 fprintf(stderr, "Unknown input format: %s\n", arg);
2384 exit(1);
2388 static void opt_frame_pix_fmt(const char *arg)
2390 frame_pix_fmt = avcodec_get_pix_fmt(arg);
2393 static void opt_sync(const char *arg)
2395 if (!strcmp(arg, "audio"))
2396 av_sync_type = AV_SYNC_AUDIO_MASTER;
2397 else if (!strcmp(arg, "video"))
2398 av_sync_type = AV_SYNC_VIDEO_MASTER;
2399 else if (!strcmp(arg, "ext"))
2400 av_sync_type = AV_SYNC_EXTERNAL_CLOCK;
2401 else {
2402 show_help();
2403 exit(1);
2407 static int opt_seek(const char *opt, const char *arg)
2409 start_time = parse_time_or_die(opt, arg, 1);
2410 return 0;
2413 static int opt_debug(const char *opt, const char *arg)
2415 av_log_set_level(99);
2416 debug = parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2417 return 0;
2420 static int opt_vismv(const char *opt, const char *arg)
2422 debug_mv = parse_number_or_die(opt, arg, OPT_INT64, INT_MIN, INT_MAX);
2423 return 0;
2426 static int opt_thread_count(const char *opt, const char *arg)
2428 thread_count= parse_number_or_die(opt, arg, OPT_INT64, 0, INT_MAX);
2429 #if !defined(HAVE_THREADS)
2430 fprintf(stderr, "Warning: not compiled with thread support, using thread emulation\n");
2431 #endif
2432 return 0;
2435 static const OptionDef options[] = {
2436 { "h", OPT_EXIT, {(void*)show_help}, "show help" },
2437 { "version", OPT_EXIT, {(void*)show_version}, "show version" },
2438 { "L", OPT_EXIT, {(void*)show_license}, "show license" },
2439 { "formats", OPT_EXIT, {(void*)show_formats}, "show available formats, codecs, protocols, ..." },
2440 { "x", HAS_ARG | OPT_FUNC2, {(void*)opt_width}, "force displayed width", "width" },
2441 { "y", HAS_ARG | OPT_FUNC2, {(void*)opt_height}, "force displayed height", "height" },
2442 { "s", HAS_ARG | OPT_VIDEO, {(void*)opt_frame_size}, "set frame size (WxH or abbreviation)", "size" },
2443 { "fs", OPT_BOOL, {(void*)&is_full_screen}, "force full screen" },
2444 { "an", OPT_BOOL, {(void*)&audio_disable}, "disable audio" },
2445 { "vn", OPT_BOOL, {(void*)&video_disable}, "disable video" },
2446 { "ast", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_audio_stream}, "", "" },
2447 { "vst", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&wanted_video_stream}, "", "" },
2448 { "ss", HAS_ARG | OPT_FUNC2, {(void*)&opt_seek}, "seek to a given position in seconds", "pos" },
2449 { "bytes", OPT_BOOL, {(void*)&seek_by_bytes}, "seek by bytes" },
2450 { "nodisp", OPT_BOOL, {(void*)&display_disable}, "disable graphical display" },
2451 { "f", HAS_ARG, {(void*)opt_format}, "force format", "fmt" },
2452 { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, {(void*)opt_frame_pix_fmt}, "set pixel format", "format" },
2453 { "stats", OPT_BOOL | OPT_EXPERT, {(void*)&show_status}, "show status", "" },
2454 { "debug", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_debug}, "print specific debug info", "" },
2455 { "bug", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&workaround_bugs}, "workaround bugs", "" },
2456 { "vismv", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_vismv}, "visualize motion vectors", "" },
2457 { "fast", OPT_BOOL | OPT_EXPERT, {(void*)&fast}, "non spec compliant optimizations", "" },
2458 { "genpts", OPT_BOOL | OPT_EXPERT, {(void*)&genpts}, "generate pts", "" },
2459 { "drp", OPT_BOOL |OPT_EXPERT, {(void*)&decoder_reorder_pts}, "let decoder reorder pts", ""},
2460 { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&lowres}, "", "" },
2461 { "skiploop", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_loop_filter}, "", "" },
2462 { "skipframe", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_frame}, "", "" },
2463 { "skipidct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&skip_idct}, "", "" },
2464 { "idct", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&idct}, "set idct algo", "algo" },
2465 { "er", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_resilience}, "set error detection threshold (0-4)", "threshold" },
2466 { "ec", OPT_INT | HAS_ARG | OPT_EXPERT, {(void*)&error_concealment}, "set error concealment options", "bit_mask" },
2467 { "sync", HAS_ARG | OPT_EXPERT, {(void*)opt_sync}, "set audio-video sync. type (type=audio/video/ext)", "type" },
2468 { "threads", HAS_ARG | OPT_FUNC2 | OPT_EXPERT, {(void*)opt_thread_count}, "thread count", "count" },
2469 { NULL, },
2472 static void show_help(void)
2474 printf("usage: ffplay [options] input_file\n"
2475 "Simple media player\n");
2476 printf("\n");
2477 show_help_options(options, "Main options:\n",
2478 OPT_EXPERT, 0);
2479 show_help_options(options, "\nAdvanced options:\n",
2480 OPT_EXPERT, OPT_EXPERT);
2481 printf("\nWhile playing:\n"
2482 "q, ESC quit\n"
2483 "f toggle full screen\n"
2484 "p, SPC pause\n"
2485 "a cycle audio channel\n"
2486 "v cycle video channel\n"
2487 "t cycle subtitle channel\n"
2488 "w show audio waves\n"
2489 "left/right seek backward/forward 10 seconds\n"
2490 "down/up seek backward/forward 1 minute\n"
2491 "mouse click seek to percentage in file corresponding to fraction of width\n"
2495 static void opt_input_file(const char *filename)
2497 if (!strcmp(filename, "-"))
2498 filename = "pipe:";
2499 input_filename = filename;
2502 /* Called from the main */
2503 int main(int argc, char **argv)
2505 int flags;
2507 /* register all codecs, demux and protocols */
2508 avcodec_register_all();
2509 avdevice_register_all();
2510 av_register_all();
2512 show_banner(program_name, program_birth_year);
2514 parse_options(argc, argv, options, opt_input_file);
2516 if (!input_filename) {
2517 show_help();
2518 exit(1);
2521 if (display_disable) {
2522 video_disable = 1;
2524 flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
2525 #if !defined(__MINGW32__) && !defined(__APPLE__)
2526 flags |= SDL_INIT_EVENTTHREAD; /* Not supported on Windows or Mac OS X */
2527 #endif
2528 if (SDL_Init (flags)) {
2529 fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
2530 exit(1);
2533 if (!display_disable) {
2534 #ifdef HAVE_SDL_VIDEO_SIZE
2535 const SDL_VideoInfo *vi = SDL_GetVideoInfo();
2536 fs_screen_width = vi->current_w;
2537 fs_screen_height = vi->current_h;
2538 #endif
2541 SDL_EventState(SDL_ACTIVEEVENT, SDL_IGNORE);
2542 SDL_EventState(SDL_MOUSEMOTION, SDL_IGNORE);
2543 SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
2544 SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
2546 av_init_packet(&flush_pkt);
2547 flush_pkt.data= "FLUSH";
2549 cur_stream = stream_open(input_filename, file_iformat);
2551 event_loop();
2553 /* never returns */
2555 return 0;