FFmpeg  4.0
ffplay.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * simple media player based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <inttypes.h>
28 #include <math.h>
29 #include <limits.h>
30 #include <signal.h>
31 #include <stdint.h>
32 
33 #include "libavutil/avstring.h"
34 #include "libavutil/eval.h"
35 #include "libavutil/mathematics.h"
36 #include "libavutil/pixdesc.h"
37 #include "libavutil/imgutils.h"
38 #include "libavutil/dict.h"
39 #include "libavutil/parseutils.h"
40 #include "libavutil/samplefmt.h"
41 #include "libavutil/avassert.h"
42 #include "libavutil/time.h"
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
45 #include "libswscale/swscale.h"
46 #include "libavutil/opt.h"
47 #include "libavcodec/avfft.h"
49 
50 #if CONFIG_AVFILTER
51 # include "libavfilter/avfilter.h"
52 # include "libavfilter/buffersink.h"
53 # include "libavfilter/buffersrc.h"
54 #endif
55 
56 #include <SDL.h>
57 #include <SDL_thread.h>
58 
59 #include "cmdutils.h"
60 
61 #include <assert.h>
62 
63 const char program_name[] = "ffplay";
64 const int program_birth_year = 2003;
65 
66 #define MAX_QUEUE_SIZE (15 * 1024 * 1024)
67 #define MIN_FRAMES 25
68 #define EXTERNAL_CLOCK_MIN_FRAMES 2
69 #define EXTERNAL_CLOCK_MAX_FRAMES 10
70 
71 /* Minimum SDL audio buffer size, in samples. */
72 #define SDL_AUDIO_MIN_BUFFER_SIZE 512
73 /* Calculate actual buffer size keeping in mind not cause too frequent audio callbacks */
74 #define SDL_AUDIO_MAX_CALLBACKS_PER_SEC 30
75 
76 /* Step size for volume control in dB */
77 #define SDL_VOLUME_STEP (0.75)
78 
79 /* no AV sync correction is done if below the minimum AV sync threshold */
80 #define AV_SYNC_THRESHOLD_MIN 0.04
81 /* AV sync correction is done if above the maximum AV sync threshold */
82 #define AV_SYNC_THRESHOLD_MAX 0.1
83 /* If a frame duration is longer than this, it will not be duplicated to compensate AV sync */
84 #define AV_SYNC_FRAMEDUP_THRESHOLD 0.1
85 /* no AV correction is done if too big error */
86 #define AV_NOSYNC_THRESHOLD 10.0
87 
88 /* maximum audio speed change to get correct sync */
89 #define SAMPLE_CORRECTION_PERCENT_MAX 10
90 
91 /* external clock speed adjustment constants for realtime sources based on buffer fullness */
92 #define EXTERNAL_CLOCK_SPEED_MIN 0.900
93 #define EXTERNAL_CLOCK_SPEED_MAX 1.010
94 #define EXTERNAL_CLOCK_SPEED_STEP 0.001
95 
96 /* we use about AUDIO_DIFF_AVG_NB A-V differences to make the average */
97 #define AUDIO_DIFF_AVG_NB 20
98 
99 /* polls for possible required screen refresh at least this often, should be less than 1/fps */
100 #define REFRESH_RATE 0.01
101 
102 /* NOTE: the size must be big enough to compensate the hardware audio buffersize size */
103 /* TODO: We assume that a decoded and resampled frame fits into this buffer */
104 #define SAMPLE_ARRAY_SIZE (8 * 65536)
105 
106 #define CURSOR_HIDE_DELAY 1000000
107 
108 #define USE_ONEPASS_SUBTITLE_RENDER 1
109 
110 static unsigned sws_flags = SWS_BICUBIC;
111 
112 typedef struct MyAVPacketList {
115  int serial;
117 
118 typedef struct PacketQueue {
119  MyAVPacketList *first_pkt, *last_pkt;
121  int size;
122  int64_t duration;
124  int serial;
125  SDL_mutex *mutex;
126  SDL_cond *cond;
127 } PacketQueue;
128 
129 #define VIDEO_PICTURE_QUEUE_SIZE 3
130 #define SUBPICTURE_QUEUE_SIZE 16
131 #define SAMPLE_QUEUE_SIZE 9
132 #define FRAME_QUEUE_SIZE FFMAX(SAMPLE_QUEUE_SIZE, FFMAX(VIDEO_PICTURE_QUEUE_SIZE, SUBPICTURE_QUEUE_SIZE))
133 
134 typedef struct AudioParams {
135  int freq;
136  int channels;
137  int64_t channel_layout;
141 } AudioParams;
142 
143 typedef struct Clock {
144  double pts; /* clock base */
145  double pts_drift; /* clock base minus time at which we updated the clock */
146  double last_updated;
147  double speed;
148  int serial; /* clock is based on a packet with this serial */
149  int paused;
150  int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */
151 } Clock;
152 
153 /* Common struct for handling all types of decoded data and allocated render buffers. */
154 typedef struct Frame {
157  int serial;
158  double pts; /* presentation timestamp for the frame */
159  double duration; /* estimated duration of the frame */
160  int64_t pos; /* byte position of the frame in the input file */
161  int width;
162  int height;
163  int format;
165  int uploaded;
166  int flip_v;
167 } Frame;
168 
169 typedef struct FrameQueue {
171  int rindex;
172  int windex;
173  int size;
174  int max_size;
177  SDL_mutex *mutex;
178  SDL_cond *cond;
180 } FrameQueue;
181 
182 enum {
183  AV_SYNC_AUDIO_MASTER, /* default choice */
185  AV_SYNC_EXTERNAL_CLOCK, /* synchronize to an external clock */
186 };
187 
188 typedef struct Decoder {
193  int finished;
195  SDL_cond *empty_queue_cond;
196  int64_t start_pts;
198  int64_t next_pts;
200  SDL_Thread *decoder_tid;
201 } Decoder;
202 
203 typedef struct VideoState {
204  SDL_Thread *read_tid;
208  int paused;
211  int seek_req;
213  int64_t seek_pos;
214  int64_t seek_rel;
217  int realtime;
218 
222 
226 
230 
232 
234 
235  double audio_clock;
237  double audio_diff_cum; /* used for AV difference average computation */
246  unsigned int audio_buf_size; /* in bytes */
247  unsigned int audio_buf1_size;
248  int audio_buf_index; /* in bytes */
251  int muted;
252  struct AudioParams audio_src;
253 #if CONFIG_AVFILTER
254  struct AudioParams audio_filter_src;
255 #endif
256  struct AudioParams audio_tgt;
260 
261  enum ShowMode {
262  SHOW_MODE_NONE = -1, SHOW_MODE_VIDEO = 0, SHOW_MODE_WAVES, SHOW_MODE_RDFT, SHOW_MODE_NB
263  } show_mode;
264  int16_t sample_array[SAMPLE_ARRAY_SIZE];
270  int xpos;
272  SDL_Texture *vis_texture;
273  SDL_Texture *sub_texture;
274  SDL_Texture *vid_texture;
275 
279 
280  double frame_timer;
286  double max_frame_duration; // maximum duration of a frame - above this, we consider the jump a timestamp discontinuity
289  int eof;
290 
291  char *filename;
292  int width, height, xleft, ytop;
293  int step;
294 
295 #if CONFIG_AVFILTER
296  int vfilter_idx;
297  AVFilterContext *in_video_filter; // the first filter in the video chain
298  AVFilterContext *out_video_filter; // the last filter in the video chain
299  AVFilterContext *in_audio_filter; // the first filter in the audio chain
300  AVFilterContext *out_audio_filter; // the last filter in the audio chain
301  AVFilterGraph *agraph; // audio filter graph
302 #endif
303 
304  int last_video_stream, last_audio_stream, last_subtitle_stream;
305 
307 } VideoState;
308 
309 /* options specified by the user */
311 static const char *input_filename;
312 static const char *window_title;
313 static int default_width = 640;
314 static int default_height = 480;
315 static int screen_width = 0;
316 static int screen_height = 0;
317 static int audio_disable;
318 static int video_disable;
319 static int subtitle_disable;
320 static const char* wanted_stream_spec[AVMEDIA_TYPE_NB] = {0};
321 static int seek_by_bytes = -1;
322 static int display_disable;
323 static int borderless;
324 static int startup_volume = 100;
325 static int show_status = 1;
327 static int64_t start_time = AV_NOPTS_VALUE;
328 static int64_t duration = AV_NOPTS_VALUE;
329 static int fast = 0;
330 static int genpts = 0;
331 static int lowres = 0;
332 static int decoder_reorder_pts = -1;
333 static int autoexit;
334 static int exit_on_keydown;
335 static int exit_on_mousedown;
336 static int loop = 1;
337 static int framedrop = -1;
338 static int infinite_buffer = -1;
339 static enum ShowMode show_mode = SHOW_MODE_NONE;
340 static const char *audio_codec_name;
341 static const char *subtitle_codec_name;
342 static const char *video_codec_name;
343 double rdftspeed = 0.02;
344 static int64_t cursor_last_shown;
345 static int cursor_hidden = 0;
346 #if CONFIG_AVFILTER
347 static const char **vfilters_list = NULL;
348 static int nb_vfilters = 0;
349 static char *afilters = NULL;
350 #endif
351 static int autorotate = 1;
352 static int find_stream_info = 1;
353 
354 /* current context */
355 static int is_full_screen;
356 static int64_t audio_callback_time;
357 
359 
360 #define FF_QUIT_EVENT (SDL_USEREVENT + 2)
361 
362 static SDL_Window *window;
363 static SDL_Renderer *renderer;
364 static SDL_RendererInfo renderer_info = {0};
365 static SDL_AudioDeviceID audio_dev;
366 
367 static const struct TextureFormatEntry {
371  { AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
372  { AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
373  { AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
374  { AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
375  { AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
376  { AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
377  { AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
378  { AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
379  { AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
380  { AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
381  { AV_PIX_FMT_NE(RGB0, 0BGR), SDL_PIXELFORMAT_RGBX8888 },
382  { AV_PIX_FMT_NE(BGR0, 0RGB), SDL_PIXELFORMAT_BGRX8888 },
383  { AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
384  { AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
385  { AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
386  { AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
387  { AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
388  { AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
389  { AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
390  { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_UNKNOWN },
391 };
392 
393 #if CONFIG_AVFILTER
394 static int opt_add_vfilter(void *optctx, const char *opt, const char *arg)
395 {
396  GROW_ARRAY(vfilters_list, nb_vfilters);
397  vfilters_list[nb_vfilters - 1] = arg;
398  return 0;
399 }
400 #endif
401 
402 static inline
403 int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1,
404  enum AVSampleFormat fmt2, int64_t channel_count2)
405 {
406  /* If channel count == 1, planar and non-planar formats are the same */
407  if (channel_count1 == 1 && channel_count2 == 1)
409  else
410  return channel_count1 != channel_count2 || fmt1 != fmt2;
411 }
412 
413 static inline
414 int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
415 {
416  if (channel_layout && av_get_channel_layout_nb_channels(channel_layout) == channels)
417  return channel_layout;
418  else
419  return 0;
420 }
421 
423 {
424  MyAVPacketList *pkt1;
425 
426  if (q->abort_request)
427  return -1;
428 
429  pkt1 = av_malloc(sizeof(MyAVPacketList));
430  if (!pkt1)
431  return -1;
432  pkt1->pkt = *pkt;
433  pkt1->next = NULL;
434  if (pkt == &flush_pkt)
435  q->serial++;
436  pkt1->serial = q->serial;
437 
438  if (!q->last_pkt)
439  q->first_pkt = pkt1;
440  else
441  q->last_pkt->next = pkt1;
442  q->last_pkt = pkt1;
443  q->nb_packets++;
444  q->size += pkt1->pkt.size + sizeof(*pkt1);
445  q->duration += pkt1->pkt.duration;
446  /* XXX: should duplicate packet data in DV case */
447  SDL_CondSignal(q->cond);
448  return 0;
449 }
450 
452 {
453  int ret;
454 
455  SDL_LockMutex(q->mutex);
456  ret = packet_queue_put_private(q, pkt);
457  SDL_UnlockMutex(q->mutex);
458 
459  if (pkt != &flush_pkt && ret < 0)
460  av_packet_unref(pkt);
461 
462  return ret;
463 }
464 
465 static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
466 {
467  AVPacket pkt1, *pkt = &pkt1;
468  av_init_packet(pkt);
469  pkt->data = NULL;
470  pkt->size = 0;
471  pkt->stream_index = stream_index;
472  return packet_queue_put(q, pkt);
473 }
474 
475 /* packet queue handling */
477 {
478  memset(q, 0, sizeof(PacketQueue));
479  q->mutex = SDL_CreateMutex();
480  if (!q->mutex) {
481  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
482  return AVERROR(ENOMEM);
483  }
484  q->cond = SDL_CreateCond();
485  if (!q->cond) {
486  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
487  return AVERROR(ENOMEM);
488  }
489  q->abort_request = 1;
490  return 0;
491 }
492 
494 {
495  MyAVPacketList *pkt, *pkt1;
496 
497  SDL_LockMutex(q->mutex);
498  for (pkt = q->first_pkt; pkt; pkt = pkt1) {
499  pkt1 = pkt->next;
500  av_packet_unref(&pkt->pkt);
501  av_freep(&pkt);
502  }
503  q->last_pkt = NULL;
504  q->first_pkt = NULL;
505  q->nb_packets = 0;
506  q->size = 0;
507  q->duration = 0;
508  SDL_UnlockMutex(q->mutex);
509 }
510 
512 {
514  SDL_DestroyMutex(q->mutex);
515  SDL_DestroyCond(q->cond);
516 }
517 
519 {
520  SDL_LockMutex(q->mutex);
521 
522  q->abort_request = 1;
523 
524  SDL_CondSignal(q->cond);
525 
526  SDL_UnlockMutex(q->mutex);
527 }
528 
530 {
531  SDL_LockMutex(q->mutex);
532  q->abort_request = 0;
533  packet_queue_put_private(q, &flush_pkt);
534  SDL_UnlockMutex(q->mutex);
535 }
536 
537 /* return < 0 if aborted, 0 if no packet and > 0 if packet. */
539 {
540  MyAVPacketList *pkt1;
541  int ret;
542 
543  SDL_LockMutex(q->mutex);
544 
545  for (;;) {
546  if (q->abort_request) {
547  ret = -1;
548  break;
549  }
550 
551  pkt1 = q->first_pkt;
552  if (pkt1) {
553  q->first_pkt = pkt1->next;
554  if (!q->first_pkt)
555  q->last_pkt = NULL;
556  q->nb_packets--;
557  q->size -= pkt1->pkt.size + sizeof(*pkt1);
558  q->duration -= pkt1->pkt.duration;
559  *pkt = pkt1->pkt;
560  if (serial)
561  *serial = pkt1->serial;
562  av_free(pkt1);
563  ret = 1;
564  break;
565  } else if (!block) {
566  ret = 0;
567  break;
568  } else {
569  SDL_CondWait(q->cond, q->mutex);
570  }
571  }
572  SDL_UnlockMutex(q->mutex);
573  return ret;
574 }
575 
576 static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond) {
577  memset(d, 0, sizeof(Decoder));
578  d->avctx = avctx;
579  d->queue = queue;
580  d->empty_queue_cond = empty_queue_cond;
582  d->pkt_serial = -1;
583 }
584 
586  int ret = AVERROR(EAGAIN);
587 
588  for (;;) {
589  AVPacket pkt;
590 
591  if (d->queue->serial == d->pkt_serial) {
592  do {
593  if (d->queue->abort_request)
594  return -1;
595 
596  switch (d->avctx->codec_type) {
597  case AVMEDIA_TYPE_VIDEO:
598  ret = avcodec_receive_frame(d->avctx, frame);
599  if (ret >= 0) {
600  if (decoder_reorder_pts == -1) {
601  frame->pts = frame->best_effort_timestamp;
602  } else if (!decoder_reorder_pts) {
603  frame->pts = frame->pkt_dts;
604  }
605  }
606  break;
607  case AVMEDIA_TYPE_AUDIO:
608  ret = avcodec_receive_frame(d->avctx, frame);
609  if (ret >= 0) {
610  AVRational tb = (AVRational){1, frame->sample_rate};
611  if (frame->pts != AV_NOPTS_VALUE)
612  frame->pts = av_rescale_q(frame->pts, d->avctx->pkt_timebase, tb);
613  else if (d->next_pts != AV_NOPTS_VALUE)
614  frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb);
615  if (frame->pts != AV_NOPTS_VALUE) {
616  d->next_pts = frame->pts + frame->nb_samples;
617  d->next_pts_tb = tb;
618  }
619  }
620  break;
621  }
622  if (ret == AVERROR_EOF) {
623  d->finished = d->pkt_serial;
625  return 0;
626  }
627  if (ret >= 0)
628  return 1;
629  } while (ret != AVERROR(EAGAIN));
630  }
631 
632  do {
633  if (d->queue->nb_packets == 0)
634  SDL_CondSignal(d->empty_queue_cond);
635  if (d->packet_pending) {
636  av_packet_move_ref(&pkt, &d->pkt);
637  d->packet_pending = 0;
638  } else {
639  if (packet_queue_get(d->queue, &pkt, 1, &d->pkt_serial) < 0)
640  return -1;
641  }
642  } while (d->queue->serial != d->pkt_serial);
643 
644  if (pkt.data == flush_pkt.data) {
646  d->finished = 0;
647  d->next_pts = d->start_pts;
648  d->next_pts_tb = d->start_pts_tb;
649  } else {
651  int got_frame = 0;
652  ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &pkt);
653  if (ret < 0) {
654  ret = AVERROR(EAGAIN);
655  } else {
656  if (got_frame && !pkt.data) {
657  d->packet_pending = 1;
658  av_packet_move_ref(&d->pkt, &pkt);
659  }
660  ret = got_frame ? 0 : (pkt.data ? AVERROR(EAGAIN) : AVERROR_EOF);
661  }
662  } else {
663  if (avcodec_send_packet(d->avctx, &pkt) == AVERROR(EAGAIN)) {
664  av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n");
665  d->packet_pending = 1;
666  av_packet_move_ref(&d->pkt, &pkt);
667  }
668  }
669  av_packet_unref(&pkt);
670  }
671  }
672 }
673 
674 static void decoder_destroy(Decoder *d) {
675  av_packet_unref(&d->pkt);
677 }
678 
680 {
681  av_frame_unref(vp->frame);
682  avsubtitle_free(&vp->sub);
683 }
684 
685 static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
686 {
687  int i;
688  memset(f, 0, sizeof(FrameQueue));
689  if (!(f->mutex = SDL_CreateMutex())) {
690  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
691  return AVERROR(ENOMEM);
692  }
693  if (!(f->cond = SDL_CreateCond())) {
694  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
695  return AVERROR(ENOMEM);
696  }
697  f->pktq = pktq;
698  f->max_size = FFMIN(max_size, FRAME_QUEUE_SIZE);
699  f->keep_last = !!keep_last;
700  for (i = 0; i < f->max_size; i++)
701  if (!(f->queue[i].frame = av_frame_alloc()))
702  return AVERROR(ENOMEM);
703  return 0;
704 }
705 
707 {
708  int i;
709  for (i = 0; i < f->max_size; i++) {
710  Frame *vp = &f->queue[i];
712  av_frame_free(&vp->frame);
713  }
714  SDL_DestroyMutex(f->mutex);
715  SDL_DestroyCond(f->cond);
716 }
717 
719 {
720  SDL_LockMutex(f->mutex);
721  SDL_CondSignal(f->cond);
722  SDL_UnlockMutex(f->mutex);
723 }
724 
726 {
727  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
728 }
729 
731 {
732  return &f->queue[(f->rindex + f->rindex_shown + 1) % f->max_size];
733 }
734 
736 {
737  return &f->queue[f->rindex];
738 }
739 
741 {
742  /* wait until we have space to put a new frame */
743  SDL_LockMutex(f->mutex);
744  while (f->size >= f->max_size &&
745  !f->pktq->abort_request) {
746  SDL_CondWait(f->cond, f->mutex);
747  }
748  SDL_UnlockMutex(f->mutex);
749 
750  if (f->pktq->abort_request)
751  return NULL;
752 
753  return &f->queue[f->windex];
754 }
755 
757 {
758  /* wait until we have a readable a new frame */
759  SDL_LockMutex(f->mutex);
760  while (f->size - f->rindex_shown <= 0 &&
761  !f->pktq->abort_request) {
762  SDL_CondWait(f->cond, f->mutex);
763  }
764  SDL_UnlockMutex(f->mutex);
765 
766  if (f->pktq->abort_request)
767  return NULL;
768 
769  return &f->queue[(f->rindex + f->rindex_shown) % f->max_size];
770 }
771 
773 {
774  if (++f->windex == f->max_size)
775  f->windex = 0;
776  SDL_LockMutex(f->mutex);
777  f->size++;
778  SDL_CondSignal(f->cond);
779  SDL_UnlockMutex(f->mutex);
780 }
781 
783 {
784  if (f->keep_last && !f->rindex_shown) {
785  f->rindex_shown = 1;
786  return;
787  }
789  if (++f->rindex == f->max_size)
790  f->rindex = 0;
791  SDL_LockMutex(f->mutex);
792  f->size--;
793  SDL_CondSignal(f->cond);
794  SDL_UnlockMutex(f->mutex);
795 }
796 
797 /* return the number of undisplayed frames in the queue */
799 {
800  return f->size - f->rindex_shown;
801 }
802 
803 /* return last shown position */
805 {
806  Frame *fp = &f->queue[f->rindex];
807  if (f->rindex_shown && fp->serial == f->pktq->serial)
808  return fp->pos;
809  else
810  return -1;
811 }
812 
813 static void decoder_abort(Decoder *d, FrameQueue *fq)
814 {
816  frame_queue_signal(fq);
817  SDL_WaitThread(d->decoder_tid, NULL);
818  d->decoder_tid = NULL;
820 }
821 
822 static inline void fill_rectangle(int x, int y, int w, int h)
823 {
824  SDL_Rect rect;
825  rect.x = x;
826  rect.y = y;
827  rect.w = w;
828  rect.h = h;
829  if (w && h)
830  SDL_RenderFillRect(renderer, &rect);
831 }
832 
833 static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
834 {
835  Uint32 format;
836  int access, w, h;
837  if (SDL_QueryTexture(*texture, &format, &access, &w, &h) < 0 || new_width != w || new_height != h || new_format != format) {
838  void *pixels;
839  int pitch;
840  SDL_DestroyTexture(*texture);
841  if (!(*texture = SDL_CreateTexture(renderer, new_format, SDL_TEXTUREACCESS_STREAMING, new_width, new_height)))
842  return -1;
843  if (SDL_SetTextureBlendMode(*texture, blendmode) < 0)
844  return -1;
845  if (init_texture) {
846  if (SDL_LockTexture(*texture, NULL, &pixels, &pitch) < 0)
847  return -1;
848  memset(pixels, 0, pitch * new_height);
849  SDL_UnlockTexture(*texture);
850  }
851  av_log(NULL, AV_LOG_VERBOSE, "Created %dx%d texture with %s.\n", new_width, new_height, SDL_GetPixelFormatName(new_format));
852  }
853  return 0;
854 }
855 
856 static void calculate_display_rect(SDL_Rect *rect,
857  int scr_xleft, int scr_ytop, int scr_width, int scr_height,
858  int pic_width, int pic_height, AVRational pic_sar)
859 {
860  float aspect_ratio;
861  int width, height, x, y;
862 
863  if (pic_sar.num == 0)
864  aspect_ratio = 0;
865  else
866  aspect_ratio = av_q2d(pic_sar);
867 
868  if (aspect_ratio <= 0.0)
869  aspect_ratio = 1.0;
870  aspect_ratio *= (float)pic_width / (float)pic_height;
871 
872  /* XXX: we suppose the screen has a 1.0 pixel ratio */
873  height = scr_height;
874  width = lrint(height * aspect_ratio) & ~1;
875  if (width > scr_width) {
876  width = scr_width;
877  height = lrint(width / aspect_ratio) & ~1;
878  }
879  x = (scr_width - width) / 2;
880  y = (scr_height - height) / 2;
881  rect->x = scr_xleft + x;
882  rect->y = scr_ytop + y;
883  rect->w = FFMAX(width, 1);
884  rect->h = FFMAX(height, 1);
885 }
886 
887 static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
888 {
889  int i;
890  *sdl_blendmode = SDL_BLENDMODE_NONE;
891  *sdl_pix_fmt = SDL_PIXELFORMAT_UNKNOWN;
892  if (format == AV_PIX_FMT_RGB32 ||
893  format == AV_PIX_FMT_RGB32_1 ||
894  format == AV_PIX_FMT_BGR32 ||
895  format == AV_PIX_FMT_BGR32_1)
896  *sdl_blendmode = SDL_BLENDMODE_BLEND;
897  for (i = 0; i < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; i++) {
898  if (format == sdl_texture_format_map[i].format) {
899  *sdl_pix_fmt = sdl_texture_format_map[i].texture_fmt;
900  return;
901  }
902  }
903 }
904 
905 static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx) {
906  int ret = 0;
907  Uint32 sdl_pix_fmt;
908  SDL_BlendMode sdl_blendmode;
909  get_sdl_pix_fmt_and_blendmode(frame->format, &sdl_pix_fmt, &sdl_blendmode);
910  if (realloc_texture(tex, sdl_pix_fmt == SDL_PIXELFORMAT_UNKNOWN ? SDL_PIXELFORMAT_ARGB8888 : sdl_pix_fmt, frame->width, frame->height, sdl_blendmode, 0) < 0)
911  return -1;
912  switch (sdl_pix_fmt) {
913  case SDL_PIXELFORMAT_UNKNOWN:
914  /* This should only happen if we are not using avfilter... */
915  *img_convert_ctx = sws_getCachedContext(*img_convert_ctx,
916  frame->width, frame->height, frame->format, frame->width, frame->height,
918  if (*img_convert_ctx != NULL) {
919  uint8_t *pixels[4];
920  int pitch[4];
921  if (!SDL_LockTexture(*tex, NULL, (void **)pixels, pitch)) {
922  sws_scale(*img_convert_ctx, (const uint8_t * const *)frame->data, frame->linesize,
923  0, frame->height, pixels, pitch);
924  SDL_UnlockTexture(*tex);
925  }
926  } else {
927  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
928  ret = -1;
929  }
930  break;
931  case SDL_PIXELFORMAT_IYUV:
932  if (frame->linesize[0] > 0 && frame->linesize[1] > 0 && frame->linesize[2] > 0) {
933  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0], frame->linesize[0],
934  frame->data[1], frame->linesize[1],
935  frame->data[2], frame->linesize[2]);
936  } else if (frame->linesize[0] < 0 && frame->linesize[1] < 0 && frame->linesize[2] < 0) {
937  ret = SDL_UpdateYUVTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0],
938  frame->data[1] + frame->linesize[1] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[1],
939  frame->data[2] + frame->linesize[2] * (AV_CEIL_RSHIFT(frame->height, 1) - 1), -frame->linesize[2]);
940  } else {
941  av_log(NULL, AV_LOG_ERROR, "Mixed negative and positive linesizes are not supported.\n");
942  return -1;
943  }
944  break;
945  default:
946  if (frame->linesize[0] < 0) {
947  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0] + frame->linesize[0] * (frame->height - 1), -frame->linesize[0]);
948  } else {
949  ret = SDL_UpdateTexture(*tex, NULL, frame->data[0], frame->linesize[0]);
950  }
951  break;
952  }
953  return ret;
954 }
955 
957 {
958  Frame *vp;
959  Frame *sp = NULL;
960  SDL_Rect rect;
961 
962  vp = frame_queue_peek_last(&is->pictq);
963  if (is->subtitle_st) {
964  if (frame_queue_nb_remaining(&is->subpq) > 0) {
965  sp = frame_queue_peek(&is->subpq);
966 
967  if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
968  if (!sp->uploaded) {
969  uint8_t* pixels[4];
970  int pitch[4];
971  int i;
972  if (!sp->width || !sp->height) {
973  sp->width = vp->width;
974  sp->height = vp->height;
975  }
976  if (realloc_texture(&is->sub_texture, SDL_PIXELFORMAT_ARGB8888, sp->width, sp->height, SDL_BLENDMODE_BLEND, 1) < 0)
977  return;
978 
979  for (i = 0; i < sp->sub.num_rects; i++) {
980  AVSubtitleRect *sub_rect = sp->sub.rects[i];
981 
982  sub_rect->x = av_clip(sub_rect->x, 0, sp->width );
983  sub_rect->y = av_clip(sub_rect->y, 0, sp->height);
984  sub_rect->w = av_clip(sub_rect->w, 0, sp->width - sub_rect->x);
985  sub_rect->h = av_clip(sub_rect->h, 0, sp->height - sub_rect->y);
986 
988  sub_rect->w, sub_rect->h, AV_PIX_FMT_PAL8,
989  sub_rect->w, sub_rect->h, AV_PIX_FMT_BGRA,
990  0, NULL, NULL, NULL);
991  if (!is->sub_convert_ctx) {
992  av_log(NULL, AV_LOG_FATAL, "Cannot initialize the conversion context\n");
993  return;
994  }
995  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)pixels, pitch)) {
996  sws_scale(is->sub_convert_ctx, (const uint8_t * const *)sub_rect->data, sub_rect->linesize,
997  0, sub_rect->h, pixels, pitch);
998  SDL_UnlockTexture(is->sub_texture);
999  }
1000  }
1001  sp->uploaded = 1;
1002  }
1003  } else
1004  sp = NULL;
1005  }
1006  }
1007 
1008  calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
1009 
1010  if (!vp->uploaded) {
1011  if (upload_texture(&is->vid_texture, vp->frame, &is->img_convert_ctx) < 0)
1012  return;
1013  vp->uploaded = 1;
1014  vp->flip_v = vp->frame->linesize[0] < 0;
1015  }
1016 
1017  SDL_RenderCopyEx(renderer, is->vid_texture, NULL, &rect, 0, NULL, vp->flip_v ? SDL_FLIP_VERTICAL : 0);
1018  if (sp) {
1019 #if USE_ONEPASS_SUBTITLE_RENDER
1020  SDL_RenderCopy(renderer, is->sub_texture, NULL, &rect);
1021 #else
1022  int i;
1023  double xratio = (double)rect.w / (double)sp->width;
1024  double yratio = (double)rect.h / (double)sp->height;
1025  for (i = 0; i < sp->sub.num_rects; i++) {
1026  SDL_Rect *sub_rect = (SDL_Rect*)sp->sub.rects[i];
1027  SDL_Rect target = {.x = rect.x + sub_rect->x * xratio,
1028  .y = rect.y + sub_rect->y * yratio,
1029  .w = sub_rect->w * xratio,
1030  .h = sub_rect->h * yratio};
1031  SDL_RenderCopy(renderer, is->sub_texture, sub_rect, &target);
1032  }
1033 #endif
1034  }
1035 }
1036 
1037 static inline int compute_mod(int a, int b)
1038 {
1039  return a < 0 ? a%b + b : a%b;
1040 }
1041 
1043 {
1044  int i, i_start, x, y1, y, ys, delay, n, nb_display_channels;
1045  int ch, channels, h, h2;
1046  int64_t time_diff;
1047  int rdft_bits, nb_freq;
1048 
1049  for (rdft_bits = 1; (1 << rdft_bits) < 2 * s->height; rdft_bits++)
1050  ;
1051  nb_freq = 1 << (rdft_bits - 1);
1052 
1053  /* compute display index : center on currently output samples */
1054  channels = s->audio_tgt.channels;
1055  nb_display_channels = channels;
1056  if (!s->paused) {
1057  int data_used= s->show_mode == SHOW_MODE_WAVES ? s->width : (2*nb_freq);
1058  n = 2 * channels;
1059  delay = s->audio_write_buf_size;
1060  delay /= n;
1061 
1062  /* to be more precise, we take into account the time spent since
1063  the last buffer computation */
1064  if (audio_callback_time) {
1065  time_diff = av_gettime_relative() - audio_callback_time;
1066  delay -= (time_diff * s->audio_tgt.freq) / 1000000;
1067  }
1068 
1069  delay += 2 * data_used;
1070  if (delay < data_used)
1071  delay = data_used;
1072 
1073  i_start= x = compute_mod(s->sample_array_index - delay * channels, SAMPLE_ARRAY_SIZE);
1074  if (s->show_mode == SHOW_MODE_WAVES) {
1075  h = INT_MIN;
1076  for (i = 0; i < 1000; i += channels) {
1077  int idx = (SAMPLE_ARRAY_SIZE + x - i) % SAMPLE_ARRAY_SIZE;
1078  int a = s->sample_array[idx];
1079  int b = s->sample_array[(idx + 4 * channels) % SAMPLE_ARRAY_SIZE];
1080  int c = s->sample_array[(idx + 5 * channels) % SAMPLE_ARRAY_SIZE];
1081  int d = s->sample_array[(idx + 9 * channels) % SAMPLE_ARRAY_SIZE];
1082  int score = a - d;
1083  if (h < score && (b ^ c) < 0) {
1084  h = score;
1085  i_start = idx;
1086  }
1087  }
1088  }
1089 
1090  s->last_i_start = i_start;
1091  } else {
1092  i_start = s->last_i_start;
1093  }
1094 
1095  if (s->show_mode == SHOW_MODE_WAVES) {
1096  SDL_SetRenderDrawColor(renderer, 255, 255, 255, 255);
1097 
1098  /* total height for one channel */
1099  h = s->height / nb_display_channels;
1100  /* graph height / 2 */
1101  h2 = (h * 9) / 20;
1102  for (ch = 0; ch < nb_display_channels; ch++) {
1103  i = i_start + ch;
1104  y1 = s->ytop + ch * h + (h / 2); /* position of center line */
1105  for (x = 0; x < s->width; x++) {
1106  y = (s->sample_array[i] * h2) >> 15;
1107  if (y < 0) {
1108  y = -y;
1109  ys = y1 - y;
1110  } else {
1111  ys = y1;
1112  }
1113  fill_rectangle(s->xleft + x, ys, 1, y);
1114  i += channels;
1115  if (i >= SAMPLE_ARRAY_SIZE)
1116  i -= SAMPLE_ARRAY_SIZE;
1117  }
1118  }
1119 
1120  SDL_SetRenderDrawColor(renderer, 0, 0, 255, 255);
1121 
1122  for (ch = 1; ch < nb_display_channels; ch++) {
1123  y = s->ytop + ch * h;
1124  fill_rectangle(s->xleft, y, s->width, 1);
1125  }
1126  } else {
1127  if (realloc_texture(&s->vis_texture, SDL_PIXELFORMAT_ARGB8888, s->width, s->height, SDL_BLENDMODE_NONE, 1) < 0)
1128  return;
1129 
1130  nb_display_channels= FFMIN(nb_display_channels, 2);
1131  if (rdft_bits != s->rdft_bits) {
1132  av_rdft_end(s->rdft);
1133  av_free(s->rdft_data);
1134  s->rdft = av_rdft_init(rdft_bits, DFT_R2C);
1135  s->rdft_bits = rdft_bits;
1136  s->rdft_data = av_malloc_array(nb_freq, 4 *sizeof(*s->rdft_data));
1137  }
1138  if (!s->rdft || !s->rdft_data){
1139  av_log(NULL, AV_LOG_ERROR, "Failed to allocate buffers for RDFT, switching to waves display\n");
1140  s->show_mode = SHOW_MODE_WAVES;
1141  } else {
1142  FFTSample *data[2];
1143  SDL_Rect rect = {.x = s->xpos, .y = 0, .w = 1, .h = s->height};
1144  uint32_t *pixels;
1145  int pitch;
1146  for (ch = 0; ch < nb_display_channels; ch++) {
1147  data[ch] = s->rdft_data + 2 * nb_freq * ch;
1148  i = i_start + ch;
1149  for (x = 0; x < 2 * nb_freq; x++) {
1150  double w = (x-nb_freq) * (1.0 / nb_freq);
1151  data[ch][x] = s->sample_array[i] * (1.0 - w * w);
1152  i += channels;
1153  if (i >= SAMPLE_ARRAY_SIZE)
1154  i -= SAMPLE_ARRAY_SIZE;
1155  }
1156  av_rdft_calc(s->rdft, data[ch]);
1157  }
1158  /* Least efficient way to do this, we should of course
1159  * directly access it but it is more than fast enough. */
1160  if (!SDL_LockTexture(s->vis_texture, &rect, (void **)&pixels, &pitch)) {
1161  pitch >>= 2;
1162  pixels += pitch * s->height;
1163  for (y = 0; y < s->height; y++) {
1164  double w = 1 / sqrt(nb_freq);
1165  int a = sqrt(w * sqrt(data[0][2 * y + 0] * data[0][2 * y + 0] + data[0][2 * y + 1] * data[0][2 * y + 1]));
1166  int b = (nb_display_channels == 2 ) ? sqrt(w * hypot(data[1][2 * y + 0], data[1][2 * y + 1]))
1167  : a;
1168  a = FFMIN(a, 255);
1169  b = FFMIN(b, 255);
1170  pixels -= pitch;
1171  *pixels = (a << 16) + (b << 8) + ((a+b) >> 1);
1172  }
1173  SDL_UnlockTexture(s->vis_texture);
1174  }
1175  SDL_RenderCopy(renderer, s->vis_texture, NULL, NULL);
1176  }
1177  if (!s->paused)
1178  s->xpos++;
1179  if (s->xpos >= s->width)
1180  s->xpos= s->xleft;
1181  }
1182 }
1183 
1184 static void stream_component_close(VideoState *is, int stream_index)
1185 {
1186  AVFormatContext *ic = is->ic;
1187  AVCodecParameters *codecpar;
1188 
1189  if (stream_index < 0 || stream_index >= ic->nb_streams)
1190  return;
1191  codecpar = ic->streams[stream_index]->codecpar;
1192 
1193  switch (codecpar->codec_type) {
1194  case AVMEDIA_TYPE_AUDIO:
1195  decoder_abort(&is->auddec, &is->sampq);
1196  SDL_CloseAudioDevice(audio_dev);
1197  decoder_destroy(&is->auddec);
1198  swr_free(&is->swr_ctx);
1199  av_freep(&is->audio_buf1);
1200  is->audio_buf1_size = 0;
1201  is->audio_buf = NULL;
1202 
1203  if (is->rdft) {
1204  av_rdft_end(is->rdft);
1205  av_freep(&is->rdft_data);
1206  is->rdft = NULL;
1207  is->rdft_bits = 0;
1208  }
1209  break;
1210  case AVMEDIA_TYPE_VIDEO:
1211  decoder_abort(&is->viddec, &is->pictq);
1212  decoder_destroy(&is->viddec);
1213  break;
1214  case AVMEDIA_TYPE_SUBTITLE:
1215  decoder_abort(&is->subdec, &is->subpq);
1216  decoder_destroy(&is->subdec);
1217  break;
1218  default:
1219  break;
1220  }
1221 
1222  ic->streams[stream_index]->discard = AVDISCARD_ALL;
1223  switch (codecpar->codec_type) {
1224  case AVMEDIA_TYPE_AUDIO:
1225  is->audio_st = NULL;
1226  is->audio_stream = -1;
1227  break;
1228  case AVMEDIA_TYPE_VIDEO:
1229  is->video_st = NULL;
1230  is->video_stream = -1;
1231  break;
1232  case AVMEDIA_TYPE_SUBTITLE:
1233  is->subtitle_st = NULL;
1234  is->subtitle_stream = -1;
1235  break;
1236  default:
1237  break;
1238  }
1239 }
1240 
1241 static void stream_close(VideoState *is)
1242 {
1243  /* XXX: use a special url_shutdown call to abort parse cleanly */
1244  is->abort_request = 1;
1245  SDL_WaitThread(is->read_tid, NULL);
1246 
1247  /* close each stream */
1248  if (is->audio_stream >= 0)
1250  if (is->video_stream >= 0)
1252  if (is->subtitle_stream >= 0)
1254 
1255  avformat_close_input(&is->ic);
1256 
1260 
1261  /* free all pictures */
1262  frame_queue_destory(&is->pictq);
1263  frame_queue_destory(&is->sampq);
1264  frame_queue_destory(&is->subpq);
1265  SDL_DestroyCond(is->continue_read_thread);
1268  av_free(is->filename);
1269  if (is->vis_texture)
1270  SDL_DestroyTexture(is->vis_texture);
1271  if (is->vid_texture)
1272  SDL_DestroyTexture(is->vid_texture);
1273  if (is->sub_texture)
1274  SDL_DestroyTexture(is->sub_texture);
1275  av_free(is);
1276 }
1277 
1278 static void do_exit(VideoState *is)
1279 {
1280  if (is) {
1281  stream_close(is);
1282  }
1283  if (renderer)
1284  SDL_DestroyRenderer(renderer);
1285  if (window)
1286  SDL_DestroyWindow(window);
1287  uninit_opts();
1288 #if CONFIG_AVFILTER
1289  av_freep(&vfilters_list);
1290 #endif
1292  if (show_status)
1293  printf("\n");
1294  SDL_Quit();
1295  av_log(NULL, AV_LOG_QUIET, "%s", "");
1296  exit(0);
1297 }
1298 
1299 static void sigterm_handler(int sig)
1300 {
1301  exit(123);
1302 }
1303 
1305 {
1306  SDL_Rect rect;
1307  calculate_display_rect(&rect, 0, 0, INT_MAX, height, width, height, sar);
1308  default_width = rect.w;
1309  default_height = rect.h;
1310 }
1311 
1312 static int video_open(VideoState *is)
1313 {
1314  int w,h;
1315 
1316  if (screen_width) {
1317  w = screen_width;
1318  h = screen_height;
1319  } else {
1320  w = default_width;
1321  h = default_height;
1322  }
1323 
1324  if (!window_title)
1326  SDL_SetWindowTitle(window, window_title);
1327 
1328  SDL_SetWindowSize(window, w, h);
1329  SDL_SetWindowPosition(window, SDL_WINDOWPOS_CENTERED, SDL_WINDOWPOS_CENTERED);
1330  if (is_full_screen)
1331  SDL_SetWindowFullscreen(window, SDL_WINDOW_FULLSCREEN_DESKTOP);
1332  SDL_ShowWindow(window);
1333 
1334  is->width = w;
1335  is->height = h;
1336 
1337  return 0;
1338 }
1339 
1340 /* display the current picture, if any */
1341 static void video_display(VideoState *is)
1342 {
1343  if (!is->width)
1344  video_open(is);
1345 
1346  SDL_SetRenderDrawColor(renderer, 0, 0, 0, 255);
1347  SDL_RenderClear(renderer);
1348  if (is->audio_st && is->show_mode != SHOW_MODE_VIDEO)
1349  video_audio_display(is);
1350  else if (is->video_st)
1351  video_image_display(is);
1352  SDL_RenderPresent(renderer);
1353 }
1354 
1355 static double get_clock(Clock *c)
1356 {
1357  if (*c->queue_serial != c->serial)
1358  return NAN;
1359  if (c->paused) {
1360  return c->pts;
1361  } else {
1362  double time = av_gettime_relative() / 1000000.0;
1363  return c->pts_drift + time - (time - c->last_updated) * (1.0 - c->speed);
1364  }
1365 }
1366 
1367 static void set_clock_at(Clock *c, double pts, int serial, double time)
1368 {
1369  c->pts = pts;
1370  c->last_updated = time;
1371  c->pts_drift = c->pts - time;
1372  c->serial = serial;
1373 }
1374 
1375 static void set_clock(Clock *c, double pts, int serial)
1376 {
1377  double time = av_gettime_relative() / 1000000.0;
1378  set_clock_at(c, pts, serial, time);
1379 }
1380 
1381 static void set_clock_speed(Clock *c, double speed)
1382 {
1383  set_clock(c, get_clock(c), c->serial);
1384  c->speed = speed;
1385 }
1386 
1387 static void init_clock(Clock *c, int *queue_serial)
1388 {
1389  c->speed = 1.0;
1390  c->paused = 0;
1391  c->queue_serial = queue_serial;
1392  set_clock(c, NAN, -1);
1393 }
1394 
1395 static void sync_clock_to_slave(Clock *c, Clock *slave)
1396 {
1397  double clock = get_clock(c);
1398  double slave_clock = get_clock(slave);
1399  if (!isnan(slave_clock) && (isnan(clock) || fabs(clock - slave_clock) > AV_NOSYNC_THRESHOLD))
1400  set_clock(c, slave_clock, slave->serial);
1401 }
1402 
1404  if (is->av_sync_type == AV_SYNC_VIDEO_MASTER) {
1405  if (is->video_st)
1406  return AV_SYNC_VIDEO_MASTER;
1407  else
1408  return AV_SYNC_AUDIO_MASTER;
1409  } else if (is->av_sync_type == AV_SYNC_AUDIO_MASTER) {
1410  if (is->audio_st)
1411  return AV_SYNC_AUDIO_MASTER;
1412  else
1413  return AV_SYNC_EXTERNAL_CLOCK;
1414  } else {
1415  return AV_SYNC_EXTERNAL_CLOCK;
1416  }
1417 }
1418 
1419 /* get the current master clock value */
1420 static double get_master_clock(VideoState *is)
1421 {
1422  double val;
1423 
1424  switch (get_master_sync_type(is)) {
1425  case AV_SYNC_VIDEO_MASTER:
1426  val = get_clock(&is->vidclk);
1427  break;
1428  case AV_SYNC_AUDIO_MASTER:
1429  val = get_clock(&is->audclk);
1430  break;
1431  default:
1432  val = get_clock(&is->extclk);
1433  break;
1434  }
1435  return val;
1436 }
1437 
1439  if (is->video_stream >= 0 && is->videoq.nb_packets <= EXTERNAL_CLOCK_MIN_FRAMES ||
1442  } else if ((is->video_stream < 0 || is->videoq.nb_packets > EXTERNAL_CLOCK_MAX_FRAMES) &&
1445  } else {
1446  double speed = is->extclk.speed;
1447  if (speed != 1.0)
1448  set_clock_speed(&is->extclk, speed + EXTERNAL_CLOCK_SPEED_STEP * (1.0 - speed) / fabs(1.0 - speed));
1449  }
1450 }
1451 
1452 /* seek in the stream */
1453 static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
1454 {
1455  if (!is->seek_req) {
1456  is->seek_pos = pos;
1457  is->seek_rel = rel;
1458  is->seek_flags &= ~AVSEEK_FLAG_BYTE;
1459  if (seek_by_bytes)
1461  is->seek_req = 1;
1462  SDL_CondSignal(is->continue_read_thread);
1463  }
1464 }
1465 
1466 /* pause or resume the video */
1468 {
1469  if (is->paused) {
1470  is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated;
1471  if (is->read_pause_return != AVERROR(ENOSYS)) {
1472  is->vidclk.paused = 0;
1473  }
1474  set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial);
1475  }
1476  set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial);
1477  is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = !is->paused;
1478 }
1479 
1480 static void toggle_pause(VideoState *is)
1481 {
1482  stream_toggle_pause(is);
1483  is->step = 0;
1484 }
1485 
1486 static void toggle_mute(VideoState *is)
1487 {
1488  is->muted = !is->muted;
1489 }
1490 
1491 static void update_volume(VideoState *is, int sign, double step)
1492 {
1493  double volume_level = is->audio_volume ? (20 * log(is->audio_volume / (double)SDL_MIX_MAXVOLUME) / log(10)) : -1000.0;
1494  int new_volume = lrint(SDL_MIX_MAXVOLUME * pow(10.0, (volume_level + sign * step) / 20.0));
1495  is->audio_volume = av_clip(is->audio_volume == new_volume ? (is->audio_volume + sign) : new_volume, 0, SDL_MIX_MAXVOLUME);
1496 }
1497 
1499 {
1500  /* if the stream is paused unpause it, then step */
1501  if (is->paused)
1502  stream_toggle_pause(is);
1503  is->step = 1;
1504 }
1505 
1506 static double compute_target_delay(double delay, VideoState *is)
1507 {
1508  double sync_threshold, diff = 0;
1509 
1510  /* update delay to follow master synchronisation source */
1512  /* if video is slave, we try to correct big delays by
1513  duplicating or deleting a frame */
1514  diff = get_clock(&is->vidclk) - get_master_clock(is);
1515 
1516  /* skip or repeat frame. We take into account the
1517  delay to compute the threshold. I still don't know
1518  if it is the best guess */
1519  sync_threshold = FFMAX(AV_SYNC_THRESHOLD_MIN, FFMIN(AV_SYNC_THRESHOLD_MAX, delay));
1520  if (!isnan(diff) && fabs(diff) < is->max_frame_duration) {
1521  if (diff <= -sync_threshold)
1522  delay = FFMAX(0, delay + diff);
1523  else if (diff >= sync_threshold && delay > AV_SYNC_FRAMEDUP_THRESHOLD)
1524  delay = delay + diff;
1525  else if (diff >= sync_threshold)
1526  delay = 2 * delay;
1527  }
1528  }
1529 
1530  av_log(NULL, AV_LOG_TRACE, "video: delay=%0.3f A-V=%f\n",
1531  delay, -diff);
1532 
1533  return delay;
1534 }
1535 
1536 static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp) {
1537  if (vp->serial == nextvp->serial) {
1538  double duration = nextvp->pts - vp->pts;
1539  if (isnan(duration) || duration <= 0 || duration > is->max_frame_duration)
1540  return vp->duration;
1541  else
1542  return duration;
1543  } else {
1544  return 0.0;
1545  }
1546 }
1547 
1548 static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial) {
1549  /* update current video pts */
1550  set_clock(&is->vidclk, pts, serial);
1551  sync_clock_to_slave(&is->extclk, &is->vidclk);
1552 }
1553 
1554 /* called to display each frame */
1555 static void video_refresh(void *opaque, double *remaining_time)
1556 {
1557  VideoState *is = opaque;
1558  double time;
1559 
1560  Frame *sp, *sp2;
1561 
1562  if (!is->paused && get_master_sync_type(is) == AV_SYNC_EXTERNAL_CLOCK && is->realtime)
1564 
1565  if (!display_disable && is->show_mode != SHOW_MODE_VIDEO && is->audio_st) {
1566  time = av_gettime_relative() / 1000000.0;
1567  if (is->force_refresh || is->last_vis_time + rdftspeed < time) {
1568  video_display(is);
1569  is->last_vis_time = time;
1570  }
1571  *remaining_time = FFMIN(*remaining_time, is->last_vis_time + rdftspeed - time);
1572  }
1573 
1574  if (is->video_st) {
1575 retry:
1576  if (frame_queue_nb_remaining(&is->pictq) == 0) {
1577  // nothing to do, no picture to display in the queue
1578  } else {
1579  double last_duration, duration, delay;
1580  Frame *vp, *lastvp;
1581 
1582  /* dequeue the picture */
1583  lastvp = frame_queue_peek_last(&is->pictq);
1584  vp = frame_queue_peek(&is->pictq);
1585 
1586  if (vp->serial != is->videoq.serial) {
1587  frame_queue_next(&is->pictq);
1588  goto retry;
1589  }
1590 
1591  if (lastvp->serial != vp->serial)
1592  is->frame_timer = av_gettime_relative() / 1000000.0;
1593 
1594  if (is->paused)
1595  goto display;
1596 
1597  /* compute nominal last_duration */
1598  last_duration = vp_duration(is, lastvp, vp);
1599  delay = compute_target_delay(last_duration, is);
1600 
1601  time= av_gettime_relative()/1000000.0;
1602  if (time < is->frame_timer + delay) {
1603  *remaining_time = FFMIN(is->frame_timer + delay - time, *remaining_time);
1604  goto display;
1605  }
1606 
1607  is->frame_timer += delay;
1608  if (delay > 0 && time - is->frame_timer > AV_SYNC_THRESHOLD_MAX)
1609  is->frame_timer = time;
1610 
1611  SDL_LockMutex(is->pictq.mutex);
1612  if (!isnan(vp->pts))
1613  update_video_pts(is, vp->pts, vp->pos, vp->serial);
1614  SDL_UnlockMutex(is->pictq.mutex);
1615 
1616  if (frame_queue_nb_remaining(&is->pictq) > 1) {
1617  Frame *nextvp = frame_queue_peek_next(&is->pictq);
1618  duration = vp_duration(is, vp, nextvp);
1619  if(!is->step && (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) && time > is->frame_timer + duration){
1620  is->frame_drops_late++;
1621  frame_queue_next(&is->pictq);
1622  goto retry;
1623  }
1624  }
1625 
1626  if (is->subtitle_st) {
1627  while (frame_queue_nb_remaining(&is->subpq) > 0) {
1628  sp = frame_queue_peek(&is->subpq);
1629 
1630  if (frame_queue_nb_remaining(&is->subpq) > 1)
1631  sp2 = frame_queue_peek_next(&is->subpq);
1632  else
1633  sp2 = NULL;
1634 
1635  if (sp->serial != is->subtitleq.serial
1636  || (is->vidclk.pts > (sp->pts + ((float) sp->sub.end_display_time / 1000)))
1637  || (sp2 && is->vidclk.pts > (sp2->pts + ((float) sp2->sub.start_display_time / 1000))))
1638  {
1639  if (sp->uploaded) {
1640  int i;
1641  for (i = 0; i < sp->sub.num_rects; i++) {
1642  AVSubtitleRect *sub_rect = sp->sub.rects[i];
1643  uint8_t *pixels;
1644  int pitch, j;
1645 
1646  if (!SDL_LockTexture(is->sub_texture, (SDL_Rect *)sub_rect, (void **)&pixels, &pitch)) {
1647  for (j = 0; j < sub_rect->h; j++, pixels += pitch)
1648  memset(pixels, 0, sub_rect->w << 2);
1649  SDL_UnlockTexture(is->sub_texture);
1650  }
1651  }
1652  }
1653  frame_queue_next(&is->subpq);
1654  } else {
1655  break;
1656  }
1657  }
1658  }
1659 
1660  frame_queue_next(&is->pictq);
1661  is->force_refresh = 1;
1662 
1663  if (is->step && !is->paused)
1664  stream_toggle_pause(is);
1665  }
1666 display:
1667  /* display picture */
1668  if (!display_disable && is->force_refresh && is->show_mode == SHOW_MODE_VIDEO && is->pictq.rindex_shown)
1669  video_display(is);
1670  }
1671  is->force_refresh = 0;
1672  if (show_status) {
1673  static int64_t last_time;
1674  int64_t cur_time;
1675  int aqsize, vqsize, sqsize;
1676  double av_diff;
1677 
1678  cur_time = av_gettime_relative();
1679  if (!last_time || (cur_time - last_time) >= 30000) {
1680  aqsize = 0;
1681  vqsize = 0;
1682  sqsize = 0;
1683  if (is->audio_st)
1684  aqsize = is->audioq.size;
1685  if (is->video_st)
1686  vqsize = is->videoq.size;
1687  if (is->subtitle_st)
1688  sqsize = is->subtitleq.size;
1689  av_diff = 0;
1690  if (is->audio_st && is->video_st)
1691  av_diff = get_clock(&is->audclk) - get_clock(&is->vidclk);
1692  else if (is->video_st)
1693  av_diff = get_master_clock(is) - get_clock(&is->vidclk);
1694  else if (is->audio_st)
1695  av_diff = get_master_clock(is) - get_clock(&is->audclk);
1697  "%7.2f %s:%7.3f fd=%4d aq=%5dKB vq=%5dKB sq=%5dB f=%"PRId64"/%"PRId64" \r",
1698  get_master_clock(is),
1699  (is->audio_st && is->video_st) ? "A-V" : (is->video_st ? "M-V" : (is->audio_st ? "M-A" : " ")),
1700  av_diff,
1702  aqsize / 1024,
1703  vqsize / 1024,
1704  sqsize,
1707  fflush(stdout);
1708  last_time = cur_time;
1709  }
1710  }
1711 }
1712 
1713 static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
1714 {
1715  Frame *vp;
1716 
1717 #if defined(DEBUG_SYNC)
1718  printf("frame_type=%c pts=%0.3f\n",
1719  av_get_picture_type_char(src_frame->pict_type), pts);
1720 #endif
1721 
1722  if (!(vp = frame_queue_peek_writable(&is->pictq)))
1723  return -1;
1724 
1725  vp->sar = src_frame->sample_aspect_ratio;
1726  vp->uploaded = 0;
1727 
1728  vp->width = src_frame->width;
1729  vp->height = src_frame->height;
1730  vp->format = src_frame->format;
1731 
1732  vp->pts = pts;
1733  vp->duration = duration;
1734  vp->pos = pos;
1735  vp->serial = serial;
1736 
1737  set_default_window_size(vp->width, vp->height, vp->sar);
1738 
1739  av_frame_move_ref(vp->frame, src_frame);
1740  frame_queue_push(&is->pictq);
1741  return 0;
1742 }
1743 
1745 {
1746  int got_picture;
1747 
1748  if ((got_picture = decoder_decode_frame(&is->viddec, frame, NULL)) < 0)
1749  return -1;
1750 
1751  if (got_picture) {
1752  double dpts = NAN;
1753 
1754  if (frame->pts != AV_NOPTS_VALUE)
1755  dpts = av_q2d(is->video_st->time_base) * frame->pts;
1756 
1757  frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
1758 
1760  if (frame->pts != AV_NOPTS_VALUE) {
1761  double diff = dpts - get_master_clock(is);
1762  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD &&
1763  diff - is->frame_last_filter_delay < 0 &&
1764  is->viddec.pkt_serial == is->vidclk.serial &&
1765  is->videoq.nb_packets) {
1766  is->frame_drops_early++;
1767  av_frame_unref(frame);
1768  got_picture = 0;
1769  }
1770  }
1771  }
1772  }
1773 
1774  return got_picture;
1775 }
1776 
1777 #if CONFIG_AVFILTER
1778 static int configure_filtergraph(AVFilterGraph *graph, const char *filtergraph,
1779  AVFilterContext *source_ctx, AVFilterContext *sink_ctx)
1780 {
1781  int ret, i;
1782  int nb_filters = graph->nb_filters;
1784 
1785  if (filtergraph) {
1786  outputs = avfilter_inout_alloc();
1787  inputs = avfilter_inout_alloc();
1788  if (!outputs || !inputs) {
1789  ret = AVERROR(ENOMEM);
1790  goto fail;
1791  }
1792 
1793  outputs->name = av_strdup("in");
1794  outputs->filter_ctx = source_ctx;
1795  outputs->pad_idx = 0;
1796  outputs->next = NULL;
1797 
1798  inputs->name = av_strdup("out");
1799  inputs->filter_ctx = sink_ctx;
1800  inputs->pad_idx = 0;
1801  inputs->next = NULL;
1802 
1803  if ((ret = avfilter_graph_parse_ptr(graph, filtergraph, &inputs, &outputs, NULL)) < 0)
1804  goto fail;
1805  } else {
1806  if ((ret = avfilter_link(source_ctx, 0, sink_ctx, 0)) < 0)
1807  goto fail;
1808  }
1809 
1810  /* Reorder the filters to ensure that inputs of the custom filters are merged first */
1811  for (i = 0; i < graph->nb_filters - nb_filters; i++)
1812  FFSWAP(AVFilterContext*, graph->filters[i], graph->filters[i + nb_filters]);
1813 
1814  ret = avfilter_graph_config(graph, NULL);
1815 fail:
1816  avfilter_inout_free(&outputs);
1817  avfilter_inout_free(&inputs);
1818  return ret;
1819 }
1820 
1821 static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters, AVFrame *frame)
1822 {
1824  char sws_flags_str[512] = "";
1825  char buffersrc_args[256];
1826  int ret;
1827  AVFilterContext *filt_src = NULL, *filt_out = NULL, *last_filter = NULL;
1828  AVCodecParameters *codecpar = is->video_st->codecpar;
1829  AVRational fr = av_guess_frame_rate(is->ic, is->video_st, NULL);
1830  AVDictionaryEntry *e = NULL;
1831  int nb_pix_fmts = 0;
1832  int i, j;
1833 
1834  for (i = 0; i < renderer_info.num_texture_formats; i++) {
1835  for (j = 0; j < FF_ARRAY_ELEMS(sdl_texture_format_map) - 1; j++) {
1836  if (renderer_info.texture_formats[i] == sdl_texture_format_map[j].texture_fmt) {
1837  pix_fmts[nb_pix_fmts++] = sdl_texture_format_map[j].format;
1838  break;
1839  }
1840  }
1841  }
1842  pix_fmts[nb_pix_fmts] = AV_PIX_FMT_NONE;
1843 
1844  while ((e = av_dict_get(sws_dict, "", e, AV_DICT_IGNORE_SUFFIX))) {
1845  if (!strcmp(e->key, "sws_flags")) {
1846  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", "flags", e->value);
1847  } else
1848  av_strlcatf(sws_flags_str, sizeof(sws_flags_str), "%s=%s:", e->key, e->value);
1849  }
1850  if (strlen(sws_flags_str))
1851  sws_flags_str[strlen(sws_flags_str)-1] = '\0';
1852 
1853  graph->scale_sws_opts = av_strdup(sws_flags_str);
1854 
1855  snprintf(buffersrc_args, sizeof(buffersrc_args),
1856  "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
1857  frame->width, frame->height, frame->format,
1859  codecpar->sample_aspect_ratio.num, FFMAX(codecpar->sample_aspect_ratio.den, 1));
1860  if (fr.num && fr.den)
1861  av_strlcatf(buffersrc_args, sizeof(buffersrc_args), ":frame_rate=%d/%d", fr.num, fr.den);
1862 
1863  if ((ret = avfilter_graph_create_filter(&filt_src,
1864  avfilter_get_by_name("buffer"),
1865  "ffplay_buffer", buffersrc_args, NULL,
1866  graph)) < 0)
1867  goto fail;
1868 
1869  ret = avfilter_graph_create_filter(&filt_out,
1870  avfilter_get_by_name("buffersink"),
1871  "ffplay_buffersink", NULL, NULL, graph);
1872  if (ret < 0)
1873  goto fail;
1874 
1875  if ((ret = av_opt_set_int_list(filt_out, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1876  goto fail;
1877 
1878  last_filter = filt_out;
1879 
1880 /* Note: this macro adds a filter before the lastly added filter, so the
1881  * processing order of the filters is in reverse */
1882 #define INSERT_FILT(name, arg) do { \
1883  AVFilterContext *filt_ctx; \
1884  \
1885  ret = avfilter_graph_create_filter(&filt_ctx, \
1886  avfilter_get_by_name(name), \
1887  "ffplay_" name, arg, NULL, graph); \
1888  if (ret < 0) \
1889  goto fail; \
1890  \
1891  ret = avfilter_link(filt_ctx, 0, last_filter, 0); \
1892  if (ret < 0) \
1893  goto fail; \
1894  \
1895  last_filter = filt_ctx; \
1896 } while (0)
1897 
1898  if (autorotate) {
1899  double theta = get_rotation(is->video_st);
1900 
1901  if (fabs(theta - 90) < 1.0) {
1902  INSERT_FILT("transpose", "clock");
1903  } else if (fabs(theta - 180) < 1.0) {
1904  INSERT_FILT("hflip", NULL);
1905  INSERT_FILT("vflip", NULL);
1906  } else if (fabs(theta - 270) < 1.0) {
1907  INSERT_FILT("transpose", "cclock");
1908  } else if (fabs(theta) > 1.0) {
1909  char rotate_buf[64];
1910  snprintf(rotate_buf, sizeof(rotate_buf), "%f*PI/180", theta);
1911  INSERT_FILT("rotate", rotate_buf);
1912  }
1913  }
1914 
1915  if ((ret = configure_filtergraph(graph, vfilters, filt_src, last_filter)) < 0)
1916  goto fail;
1917 
1918  is->in_video_filter = filt_src;
1919  is->out_video_filter = filt_out;
1920 
1921 fail:
1922  return ret;
1923 }
1924 
1925 static int configure_audio_filters(VideoState *is, const char *afilters, int force_output_format)
1926 {
1928  int sample_rates[2] = { 0, -1 };
1929  int64_t channel_layouts[2] = { 0, -1 };
1930  int channels[2] = { 0, -1 };
1931  AVFilterContext *filt_asrc = NULL, *filt_asink = NULL;
1932  char aresample_swr_opts[512] = "";
1933  AVDictionaryEntry *e = NULL;
1934  char asrc_args[256];
1935  int ret;
1936 
1937  avfilter_graph_free(&is->agraph);
1938  if (!(is->agraph = avfilter_graph_alloc()))
1939  return AVERROR(ENOMEM);
1940 
1941  while ((e = av_dict_get(swr_opts, "", e, AV_DICT_IGNORE_SUFFIX)))
1942  av_strlcatf(aresample_swr_opts, sizeof(aresample_swr_opts), "%s=%s:", e->key, e->value);
1943  if (strlen(aresample_swr_opts))
1944  aresample_swr_opts[strlen(aresample_swr_opts)-1] = '\0';
1945  av_opt_set(is->agraph, "aresample_swr_opts", aresample_swr_opts, 0);
1946 
1947  ret = snprintf(asrc_args, sizeof(asrc_args),
1948  "sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
1949  is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
1950  is->audio_filter_src.channels,
1951  1, is->audio_filter_src.freq);
1952  if (is->audio_filter_src.channel_layout)
1953  snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
1954  ":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
1955 
1956  ret = avfilter_graph_create_filter(&filt_asrc,
1957  avfilter_get_by_name("abuffer"), "ffplay_abuffer",
1958  asrc_args, NULL, is->agraph);
1959  if (ret < 0)
1960  goto end;
1961 
1962 
1963  ret = avfilter_graph_create_filter(&filt_asink,
1964  avfilter_get_by_name("abuffersink"), "ffplay_abuffersink",
1965  NULL, NULL, is->agraph);
1966  if (ret < 0)
1967  goto end;
1968 
1969  if ((ret = av_opt_set_int_list(filt_asink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
1970  goto end;
1971  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 1, AV_OPT_SEARCH_CHILDREN)) < 0)
1972  goto end;
1973 
1974  if (force_output_format) {
1975  channel_layouts[0] = is->audio_tgt.channel_layout;
1976  channels [0] = is->audio_tgt.channels;
1977  sample_rates [0] = is->audio_tgt.freq;
1978  if ((ret = av_opt_set_int(filt_asink, "all_channel_counts", 0, AV_OPT_SEARCH_CHILDREN)) < 0)
1979  goto end;
1980  if ((ret = av_opt_set_int_list(filt_asink, "channel_layouts", channel_layouts, -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1981  goto end;
1982  if ((ret = av_opt_set_int_list(filt_asink, "channel_counts" , channels , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1983  goto end;
1984  if ((ret = av_opt_set_int_list(filt_asink, "sample_rates" , sample_rates , -1, AV_OPT_SEARCH_CHILDREN)) < 0)
1985  goto end;
1986  }
1987 
1988 
1989  if ((ret = configure_filtergraph(is->agraph, afilters, filt_asrc, filt_asink)) < 0)
1990  goto end;
1991 
1992  is->in_audio_filter = filt_asrc;
1993  is->out_audio_filter = filt_asink;
1994 
1995 end:
1996  if (ret < 0)
1997  avfilter_graph_free(&is->agraph);
1998  return ret;
1999 }
2000 #endif /* CONFIG_AVFILTER */
2001 
2002 static int audio_thread(void *arg)
2003 {
2004  VideoState *is = arg;
2005  AVFrame *frame = av_frame_alloc();
2006  Frame *af;
2007 #if CONFIG_AVFILTER
2008  int last_serial = -1;
2009  int64_t dec_channel_layout;
2010  int reconfigure;
2011 #endif
2012  int got_frame = 0;
2013  AVRational tb;
2014  int ret = 0;
2015 
2016  if (!frame)
2017  return AVERROR(ENOMEM);
2018 
2019  do {
2020  if ((got_frame = decoder_decode_frame(&is->auddec, frame, NULL)) < 0)
2021  goto the_end;
2022 
2023  if (got_frame) {
2024  tb = (AVRational){1, frame->sample_rate};
2025 
2026 #if CONFIG_AVFILTER
2027  dec_channel_layout = get_valid_channel_layout(frame->channel_layout, frame->channels);
2028 
2029  reconfigure =
2030  cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
2031  frame->format, frame->channels) ||
2032  is->audio_filter_src.channel_layout != dec_channel_layout ||
2033  is->audio_filter_src.freq != frame->sample_rate ||
2034  is->auddec.pkt_serial != last_serial;
2035 
2036  if (reconfigure) {
2037  char buf1[1024], buf2[1024];
2038  av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
2039  av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
2041  "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
2042  is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
2043  frame->sample_rate, frame->channels, av_get_sample_fmt_name(frame->format), buf2, is->auddec.pkt_serial);
2044 
2045  is->audio_filter_src.fmt = frame->format;
2046  is->audio_filter_src.channels = frame->channels;
2047  is->audio_filter_src.channel_layout = dec_channel_layout;
2048  is->audio_filter_src.freq = frame->sample_rate;
2049  last_serial = is->auddec.pkt_serial;
2050 
2051  if ((ret = configure_audio_filters(is, afilters, 1)) < 0)
2052  goto the_end;
2053  }
2054 
2055  if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
2056  goto the_end;
2057 
2058  while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
2059  tb = av_buffersink_get_time_base(is->out_audio_filter);
2060 #endif
2061  if (!(af = frame_queue_peek_writable(&is->sampq)))
2062  goto the_end;
2063 
2064  af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2065  af->pos = frame->pkt_pos;
2066  af->serial = is->auddec.pkt_serial;
2067  af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});
2068 
2069  av_frame_move_ref(af->frame, frame);
2070  frame_queue_push(&is->sampq);
2071 
2072 #if CONFIG_AVFILTER
2073  if (is->audioq.serial != is->auddec.pkt_serial)
2074  break;
2075  }
2076  if (ret == AVERROR_EOF)
2077  is->auddec.finished = is->auddec.pkt_serial;
2078 #endif
2079  }
2080  } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
2081  the_end:
2082 #if CONFIG_AVFILTER
2083  avfilter_graph_free(&is->agraph);
2084 #endif
2085  av_frame_free(&frame);
2086  return ret;
2087 }
2088 
2089 static int decoder_start(Decoder *d, int (*fn)(void *), void *arg)
2090 {
2092  d->decoder_tid = SDL_CreateThread(fn, "decoder", arg);
2093  if (!d->decoder_tid) {
2094  av_log(NULL, AV_LOG_ERROR, "SDL_CreateThread(): %s\n", SDL_GetError());
2095  return AVERROR(ENOMEM);
2096  }
2097  return 0;
2098 }
2099 
2100 static int video_thread(void *arg)
2101 {
2102  VideoState *is = arg;
2103  AVFrame *frame = av_frame_alloc();
2104  double pts;
2105  double duration;
2106  int ret;
2108  AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL);
2109 
2110 #if CONFIG_AVFILTER
2112  AVFilterContext *filt_out = NULL, *filt_in = NULL;
2113  int last_w = 0;
2114  int last_h = 0;
2115  enum AVPixelFormat last_format = -2;
2116  int last_serial = -1;
2117  int last_vfilter_idx = 0;
2118  if (!graph) {
2119  av_frame_free(&frame);
2120  return AVERROR(ENOMEM);
2121  }
2122 
2123 #endif
2124 
2125  if (!frame) {
2126 #if CONFIG_AVFILTER
2127  avfilter_graph_free(&graph);
2128 #endif
2129  return AVERROR(ENOMEM);
2130  }
2131 
2132  for (;;) {
2133  ret = get_video_frame(is, frame);
2134  if (ret < 0)
2135  goto the_end;
2136  if (!ret)
2137  continue;
2138 
2139 #if CONFIG_AVFILTER
2140  if ( last_w != frame->width
2141  || last_h != frame->height
2142  || last_format != frame->format
2143  || last_serial != is->viddec.pkt_serial
2144  || last_vfilter_idx != is->vfilter_idx) {
2146  "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
2147  last_w, last_h,
2148  (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
2149  frame->width, frame->height,
2150  (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial);
2151  avfilter_graph_free(&graph);
2152  graph = avfilter_graph_alloc();
2153  if ((ret = configure_video_filters(graph, is, vfilters_list ? vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
2154  SDL_Event event;
2155  event.type = FF_QUIT_EVENT;
2156  event.user.data1 = is;
2157  SDL_PushEvent(&event);
2158  goto the_end;
2159  }
2160  filt_in = is->in_video_filter;
2161  filt_out = is->out_video_filter;
2162  last_w = frame->width;
2163  last_h = frame->height;
2164  last_format = frame->format;
2165  last_serial = is->viddec.pkt_serial;
2166  last_vfilter_idx = is->vfilter_idx;
2167  frame_rate = av_buffersink_get_frame_rate(filt_out);
2168  }
2169 
2170  ret = av_buffersrc_add_frame(filt_in, frame);
2171  if (ret < 0)
2172  goto the_end;
2173 
2174  while (ret >= 0) {
2175  is->frame_last_returned_time = av_gettime_relative() / 1000000.0;
2176 
2177  ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
2178  if (ret < 0) {
2179  if (ret == AVERROR_EOF)
2180  is->viddec.finished = is->viddec.pkt_serial;
2181  ret = 0;
2182  break;
2183  }
2184 
2186  if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
2187  is->frame_last_filter_delay = 0;
2188  tb = av_buffersink_get_time_base(filt_out);
2189 #endif
2190  duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
2191  pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
2192  ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial);
2193  av_frame_unref(frame);
2194 #if CONFIG_AVFILTER
2195  }
2196 #endif
2197 
2198  if (ret < 0)
2199  goto the_end;
2200  }
2201  the_end:
2202 #if CONFIG_AVFILTER
2203  avfilter_graph_free(&graph);
2204 #endif
2205  av_frame_free(&frame);
2206  return 0;
2207 }
2208 
2209 static int subtitle_thread(void *arg)
2210 {
2211  VideoState *is = arg;
2212  Frame *sp;
2213  int got_subtitle;
2214  double pts;
2215 
2216  for (;;) {
2217  if (!(sp = frame_queue_peek_writable(&is->subpq)))
2218  return 0;
2219 
2220  if ((got_subtitle = decoder_decode_frame(&is->subdec, NULL, &sp->sub)) < 0)
2221  break;
2222 
2223  pts = 0;
2224 
2225  if (got_subtitle && sp->sub.format == 0) {
2226  if (sp->sub.pts != AV_NOPTS_VALUE)
2227  pts = sp->sub.pts / (double)AV_TIME_BASE;
2228  sp->pts = pts;
2229  sp->serial = is->subdec.pkt_serial;
2230  sp->width = is->subdec.avctx->width;
2231  sp->height = is->subdec.avctx->height;
2232  sp->uploaded = 0;
2233 
2234  /* now we can update the picture count */
2235  frame_queue_push(&is->subpq);
2236  } else if (got_subtitle) {
2237  avsubtitle_free(&sp->sub);
2238  }
2239  }
2240  return 0;
2241 }
2242 
2243 /* copy samples for viewing in editor window */
2244 static void update_sample_display(VideoState *is, short *samples, int samples_size)
2245 {
2246  int size, len;
2247 
2248  size = samples_size / sizeof(short);
2249  while (size > 0) {
2251  if (len > size)
2252  len = size;
2253  memcpy(is->sample_array + is->sample_array_index, samples, len * sizeof(short));
2254  samples += len;
2255  is->sample_array_index += len;
2257  is->sample_array_index = 0;
2258  size -= len;
2259  }
2260 }
2261 
2262 /* return the wanted number of samples to get better sync if sync_type is video
2263  * or external master clock */
2264 static int synchronize_audio(VideoState *is, int nb_samples)
2265 {
2266  int wanted_nb_samples = nb_samples;
2267 
2268  /* if not master, then we try to remove or add samples to correct the clock */
2270  double diff, avg_diff;
2271  int min_nb_samples, max_nb_samples;
2272 
2273  diff = get_clock(&is->audclk) - get_master_clock(is);
2274 
2275  if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD) {
2276  is->audio_diff_cum = diff + is->audio_diff_avg_coef * is->audio_diff_cum;
2278  /* not enough measures to have a correct estimate */
2279  is->audio_diff_avg_count++;
2280  } else {
2281  /* estimate the A-V difference */
2282  avg_diff = is->audio_diff_cum * (1.0 - is->audio_diff_avg_coef);
2283 
2284  if (fabs(avg_diff) >= is->audio_diff_threshold) {
2285  wanted_nb_samples = nb_samples + (int)(diff * is->audio_src.freq);
2286  min_nb_samples = ((nb_samples * (100 - SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2287  max_nb_samples = ((nb_samples * (100 + SAMPLE_CORRECTION_PERCENT_MAX) / 100));
2288  wanted_nb_samples = av_clip(wanted_nb_samples, min_nb_samples, max_nb_samples);
2289  }
2290  av_log(NULL, AV_LOG_TRACE, "diff=%f adiff=%f sample_diff=%d apts=%0.3f %f\n",
2291  diff, avg_diff, wanted_nb_samples - nb_samples,
2293  }
2294  } else {
2295  /* too big difference : may be initial PTS errors, so
2296  reset A-V filter */
2297  is->audio_diff_avg_count = 0;
2298  is->audio_diff_cum = 0;
2299  }
2300  }
2301 
2302  return wanted_nb_samples;
2303 }
2304 
2305 /**
2306  * Decode one audio frame and return its uncompressed size.
2307  *
2308  * The processed audio frame is decoded, converted if required, and
2309  * stored in is->audio_buf, with size in bytes given by the return
2310  * value.
2311  */
2313 {
2314  int data_size, resampled_data_size;
2315  int64_t dec_channel_layout;
2316  av_unused double audio_clock0;
2317  int wanted_nb_samples;
2318  Frame *af;
2319 
2320  if (is->paused)
2321  return -1;
2322 
2323  do {
2324 #if defined(_WIN32)
2325  while (frame_queue_nb_remaining(&is->sampq) == 0) {
2327  return -1;
2328  av_usleep (1000);
2329  }
2330 #endif
2331  if (!(af = frame_queue_peek_readable(&is->sampq)))
2332  return -1;
2333  frame_queue_next(&is->sampq);
2334  } while (af->serial != is->audioq.serial);
2335 
2336  data_size = av_samples_get_buffer_size(NULL, af->frame->channels,
2337  af->frame->nb_samples,
2338  af->frame->format, 1);
2339 
2340  dec_channel_layout =
2343  wanted_nb_samples = synchronize_audio(is, af->frame->nb_samples);
2344 
2345  if (af->frame->format != is->audio_src.fmt ||
2346  dec_channel_layout != is->audio_src.channel_layout ||
2347  af->frame->sample_rate != is->audio_src.freq ||
2348  (wanted_nb_samples != af->frame->nb_samples && !is->swr_ctx)) {
2349  swr_free(&is->swr_ctx);
2352  dec_channel_layout, af->frame->format, af->frame->sample_rate,
2353  0, NULL);
2354  if (!is->swr_ctx || swr_init(is->swr_ctx) < 0) {
2356  "Cannot create sample rate converter for conversion of %d Hz %s %d channels to %d Hz %s %d channels!\n",
2359  swr_free(&is->swr_ctx);
2360  return -1;
2361  }
2362  is->audio_src.channel_layout = dec_channel_layout;
2363  is->audio_src.channels = af->frame->channels;
2364  is->audio_src.freq = af->frame->sample_rate;
2365  is->audio_src.fmt = af->frame->format;
2366  }
2367 
2368  if (is->swr_ctx) {
2369  const uint8_t **in = (const uint8_t **)af->frame->extended_data;
2370  uint8_t **out = &is->audio_buf1;
2371  int out_count = (int64_t)wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate + 256;
2372  int out_size = av_samples_get_buffer_size(NULL, is->audio_tgt.channels, out_count, is->audio_tgt.fmt, 0);
2373  int len2;
2374  if (out_size < 0) {
2375  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size() failed\n");
2376  return -1;
2377  }
2378  if (wanted_nb_samples != af->frame->nb_samples) {
2379  if (swr_set_compensation(is->swr_ctx, (wanted_nb_samples - af->frame->nb_samples) * is->audio_tgt.freq / af->frame->sample_rate,
2380  wanted_nb_samples * is->audio_tgt.freq / af->frame->sample_rate) < 0) {
2381  av_log(NULL, AV_LOG_ERROR, "swr_set_compensation() failed\n");
2382  return -1;
2383  }
2384  }
2385  av_fast_malloc(&is->audio_buf1, &is->audio_buf1_size, out_size);
2386  if (!is->audio_buf1)
2387  return AVERROR(ENOMEM);
2388  len2 = swr_convert(is->swr_ctx, out, out_count, in, af->frame->nb_samples);
2389  if (len2 < 0) {
2390  av_log(NULL, AV_LOG_ERROR, "swr_convert() failed\n");
2391  return -1;
2392  }
2393  if (len2 == out_count) {
2394  av_log(NULL, AV_LOG_WARNING, "audio buffer is probably too small\n");
2395  if (swr_init(is->swr_ctx) < 0)
2396  swr_free(&is->swr_ctx);
2397  }
2398  is->audio_buf = is->audio_buf1;
2399  resampled_data_size = len2 * is->audio_tgt.channels * av_get_bytes_per_sample(is->audio_tgt.fmt);
2400  } else {
2401  is->audio_buf = af->frame->data[0];
2402  resampled_data_size = data_size;
2403  }
2404 
2405  audio_clock0 = is->audio_clock;
2406  /* update the audio clock with the pts */
2407  if (!isnan(af->pts))
2408  is->audio_clock = af->pts + (double) af->frame->nb_samples / af->frame->sample_rate;
2409  else
2410  is->audio_clock = NAN;
2411  is->audio_clock_serial = af->serial;
2412 #ifdef DEBUG
2413  {
2414  static double last_clock;
2415  printf("audio: delay=%0.3f clock=%0.3f clock0=%0.3f\n",
2416  is->audio_clock - last_clock,
2417  is->audio_clock, audio_clock0);
2418  last_clock = is->audio_clock;
2419  }
2420 #endif
2421  return resampled_data_size;
2422 }
2423 
2424 /* prepare a new audio buffer */
2425 static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
2426 {
2427  VideoState *is = opaque;
2428  int audio_size, len1;
2429 
2431 
2432  while (len > 0) {
2433  if (is->audio_buf_index >= is->audio_buf_size) {
2434  audio_size = audio_decode_frame(is);
2435  if (audio_size < 0) {
2436  /* if error, just output silence */
2437  is->audio_buf = NULL;
2439  } else {
2440  if (is->show_mode != SHOW_MODE_VIDEO)
2441  update_sample_display(is, (int16_t *)is->audio_buf, audio_size);
2442  is->audio_buf_size = audio_size;
2443  }
2444  is->audio_buf_index = 0;
2445  }
2446  len1 = is->audio_buf_size - is->audio_buf_index;
2447  if (len1 > len)
2448  len1 = len;
2449  if (!is->muted && is->audio_buf && is->audio_volume == SDL_MIX_MAXVOLUME)
2450  memcpy(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, len1);
2451  else {
2452  memset(stream, 0, len1);
2453  if (!is->muted && is->audio_buf)
2454  SDL_MixAudioFormat(stream, (uint8_t *)is->audio_buf + is->audio_buf_index, AUDIO_S16SYS, len1, is->audio_volume);
2455  }
2456  len -= len1;
2457  stream += len1;
2458  is->audio_buf_index += len1;
2459  }
2461  /* Let's assume the audio driver that is used by SDL has two periods. */
2462  if (!isnan(is->audio_clock)) {
2464  sync_clock_to_slave(&is->extclk, &is->audclk);
2465  }
2466 }
2467 
2468 static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
2469 {
2470  SDL_AudioSpec wanted_spec, spec;
2471  const char *env;
2472  static const int next_nb_channels[] = {0, 0, 1, 6, 2, 6, 4, 6};
2473  static const int next_sample_rates[] = {0, 44100, 48000, 96000, 192000};
2474  int next_sample_rate_idx = FF_ARRAY_ELEMS(next_sample_rates) - 1;
2475 
2476  env = SDL_getenv("SDL_AUDIO_CHANNELS");
2477  if (env) {
2478  wanted_nb_channels = atoi(env);
2479  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2480  }
2481  if (!wanted_channel_layout || wanted_nb_channels != av_get_channel_layout_nb_channels(wanted_channel_layout)) {
2482  wanted_channel_layout = av_get_default_channel_layout(wanted_nb_channels);
2483  wanted_channel_layout &= ~AV_CH_LAYOUT_STEREO_DOWNMIX;
2484  }
2485  wanted_nb_channels = av_get_channel_layout_nb_channels(wanted_channel_layout);
2486  wanted_spec.channels = wanted_nb_channels;
2487  wanted_spec.freq = wanted_sample_rate;
2488  if (wanted_spec.freq <= 0 || wanted_spec.channels <= 0) {
2489  av_log(NULL, AV_LOG_ERROR, "Invalid sample rate or channel count!\n");
2490  return -1;
2491  }
2492  while (next_sample_rate_idx && next_sample_rates[next_sample_rate_idx] >= wanted_spec.freq)
2493  next_sample_rate_idx--;
2494  wanted_spec.format = AUDIO_S16SYS;
2495  wanted_spec.silence = 0;
2496  wanted_spec.samples = FFMAX(SDL_AUDIO_MIN_BUFFER_SIZE, 2 << av_log2(wanted_spec.freq / SDL_AUDIO_MAX_CALLBACKS_PER_SEC));
2497  wanted_spec.callback = sdl_audio_callback;
2498  wanted_spec.userdata = opaque;
2499  while (!(audio_dev = SDL_OpenAudioDevice(NULL, 0, &wanted_spec, &spec, SDL_AUDIO_ALLOW_FREQUENCY_CHANGE | SDL_AUDIO_ALLOW_CHANNELS_CHANGE))) {
2500  av_log(NULL, AV_LOG_WARNING, "SDL_OpenAudio (%d channels, %d Hz): %s\n",
2501  wanted_spec.channels, wanted_spec.freq, SDL_GetError());
2502  wanted_spec.channels = next_nb_channels[FFMIN(7, wanted_spec.channels)];
2503  if (!wanted_spec.channels) {
2504  wanted_spec.freq = next_sample_rates[next_sample_rate_idx--];
2505  wanted_spec.channels = wanted_nb_channels;
2506  if (!wanted_spec.freq) {
2508  "No more combinations to try, audio open failed\n");
2509  return -1;
2510  }
2511  }
2512  wanted_channel_layout = av_get_default_channel_layout(wanted_spec.channels);
2513  }
2514  if (spec.format != AUDIO_S16SYS) {
2516  "SDL advised audio format %d is not supported!\n", spec.format);
2517  return -1;
2518  }
2519  if (spec.channels != wanted_spec.channels) {
2520  wanted_channel_layout = av_get_default_channel_layout(spec.channels);
2521  if (!wanted_channel_layout) {
2523  "SDL advised channel count %d is not supported!\n", spec.channels);
2524  return -1;
2525  }
2526  }
2527 
2528  audio_hw_params->fmt = AV_SAMPLE_FMT_S16;
2529  audio_hw_params->freq = spec.freq;
2530  audio_hw_params->channel_layout = wanted_channel_layout;
2531  audio_hw_params->channels = spec.channels;
2532  audio_hw_params->frame_size = av_samples_get_buffer_size(NULL, audio_hw_params->channels, 1, audio_hw_params->fmt, 1);
2533  audio_hw_params->bytes_per_sec = av_samples_get_buffer_size(NULL, audio_hw_params->channels, audio_hw_params->freq, audio_hw_params->fmt, 1);
2534  if (audio_hw_params->bytes_per_sec <= 0 || audio_hw_params->frame_size <= 0) {
2535  av_log(NULL, AV_LOG_ERROR, "av_samples_get_buffer_size failed\n");
2536  return -1;
2537  }
2538  return spec.size;
2539 }
2540 
2541 /* open a given stream. Return 0 if OK */
2542 static int stream_component_open(VideoState *is, int stream_index)
2543 {
2544  AVFormatContext *ic = is->ic;
2545  AVCodecContext *avctx;
2546  AVCodec *codec;
2547  const char *forced_codec_name = NULL;
2548  AVDictionary *opts = NULL;
2549  AVDictionaryEntry *t = NULL;
2550  int sample_rate, nb_channels;
2551  int64_t channel_layout;
2552  int ret = 0;
2553  int stream_lowres = lowres;
2554 
2555  if (stream_index < 0 || stream_index >= ic->nb_streams)
2556  return -1;
2557 
2558  avctx = avcodec_alloc_context3(NULL);
2559  if (!avctx)
2560  return AVERROR(ENOMEM);
2561 
2562  ret = avcodec_parameters_to_context(avctx, ic->streams[stream_index]->codecpar);
2563  if (ret < 0)
2564  goto fail;
2565  avctx->pkt_timebase = ic->streams[stream_index]->time_base;
2566 
2567  codec = avcodec_find_decoder(avctx->codec_id);
2568 
2569  switch(avctx->codec_type){
2570  case AVMEDIA_TYPE_AUDIO : is->last_audio_stream = stream_index; forced_codec_name = audio_codec_name; break;
2571  case AVMEDIA_TYPE_SUBTITLE: is->last_subtitle_stream = stream_index; forced_codec_name = subtitle_codec_name; break;
2572  case AVMEDIA_TYPE_VIDEO : is->last_video_stream = stream_index; forced_codec_name = video_codec_name; break;
2573  }
2574  if (forced_codec_name)
2575  codec = avcodec_find_decoder_by_name(forced_codec_name);
2576  if (!codec) {
2577  if (forced_codec_name) av_log(NULL, AV_LOG_WARNING,
2578  "No codec could be found with name '%s'\n", forced_codec_name);
2579  else av_log(NULL, AV_LOG_WARNING,
2580  "No codec could be found with id %d\n", avctx->codec_id);
2581  ret = AVERROR(EINVAL);
2582  goto fail;
2583  }
2584 
2585  avctx->codec_id = codec->id;
2586  if (stream_lowres > codec->max_lowres) {
2587  av_log(avctx, AV_LOG_WARNING, "The maximum value for lowres supported by the decoder is %d\n",
2588  codec->max_lowres);
2589  stream_lowres = codec->max_lowres;
2590  }
2591  avctx->lowres = stream_lowres;
2592 
2593  if (fast)
2594  avctx->flags2 |= AV_CODEC_FLAG2_FAST;
2595 
2596  opts = filter_codec_opts(codec_opts, avctx->codec_id, ic, ic->streams[stream_index], codec);
2597  if (!av_dict_get(opts, "threads", NULL, 0))
2598  av_dict_set(&opts, "threads", "auto", 0);
2599  if (stream_lowres)
2600  av_dict_set_int(&opts, "lowres", stream_lowres, 0);
2601  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO || avctx->codec_type == AVMEDIA_TYPE_AUDIO)
2602  av_dict_set(&opts, "refcounted_frames", "1", 0);
2603  if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) {
2604  goto fail;
2605  }
2606  if ((t = av_dict_get(opts, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
2607  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2609  goto fail;
2610  }
2611 
2612  is->eof = 0;
2613  ic->streams[stream_index]->discard = AVDISCARD_DEFAULT;
2614  switch (avctx->codec_type) {
2615  case AVMEDIA_TYPE_AUDIO:
2616 #if CONFIG_AVFILTER
2617  {
2618  AVFilterContext *sink;
2619 
2620  is->audio_filter_src.freq = avctx->sample_rate;
2621  is->audio_filter_src.channels = avctx->channels;
2622  is->audio_filter_src.channel_layout = get_valid_channel_layout(avctx->channel_layout, avctx->channels);
2623  is->audio_filter_src.fmt = avctx->sample_fmt;
2624  if ((ret = configure_audio_filters(is, afilters, 0)) < 0)
2625  goto fail;
2626  sink = is->out_audio_filter;
2627  sample_rate = av_buffersink_get_sample_rate(sink);
2628  nb_channels = av_buffersink_get_channels(sink);
2629  channel_layout = av_buffersink_get_channel_layout(sink);
2630  }
2631 #else
2632  sample_rate = avctx->sample_rate;
2633  nb_channels = avctx->channels;
2634  channel_layout = avctx->channel_layout;
2635 #endif
2636 
2637  /* prepare audio output */
2638  if ((ret = audio_open(is, channel_layout, nb_channels, sample_rate, &is->audio_tgt)) < 0)
2639  goto fail;
2640  is->audio_hw_buf_size = ret;
2641  is->audio_src = is->audio_tgt;
2642  is->audio_buf_size = 0;
2643  is->audio_buf_index = 0;
2644 
2645  /* init averaging filter */
2646  is->audio_diff_avg_coef = exp(log(0.01) / AUDIO_DIFF_AVG_NB);
2647  is->audio_diff_avg_count = 0;
2648  /* since we do not have a precise anough audio FIFO fullness,
2649  we correct audio sync only if larger than this threshold */
2651 
2652  is->audio_stream = stream_index;
2653  is->audio_st = ic->streams[stream_index];
2654 
2655  decoder_init(&is->auddec, avctx, &is->audioq, is->continue_read_thread);
2657  is->auddec.start_pts = is->audio_st->start_time;
2659  }
2660  if ((ret = decoder_start(&is->auddec, audio_thread, is)) < 0)
2661  goto out;
2662  SDL_PauseAudioDevice(audio_dev, 0);
2663  break;
2664  case AVMEDIA_TYPE_VIDEO:
2665  is->video_stream = stream_index;
2666  is->video_st = ic->streams[stream_index];
2667 
2668  decoder_init(&is->viddec, avctx, &is->videoq, is->continue_read_thread);
2669  if ((ret = decoder_start(&is->viddec, video_thread, is)) < 0)
2670  goto out;
2671  is->queue_attachments_req = 1;
2672  break;
2673  case AVMEDIA_TYPE_SUBTITLE:
2674  is->subtitle_stream = stream_index;
2675  is->subtitle_st = ic->streams[stream_index];
2676 
2677  decoder_init(&is->subdec, avctx, &is->subtitleq, is->continue_read_thread);
2678  if ((ret = decoder_start(&is->subdec, subtitle_thread, is)) < 0)
2679  goto out;
2680  break;
2681  default:
2682  break;
2683  }
2684  goto out;
2685 
2686 fail:
2687  avcodec_free_context(&avctx);
2688 out:
2689  av_dict_free(&opts);
2690 
2691  return ret;
2692 }
2693 
2694 static int decode_interrupt_cb(void *ctx)
2695 {
2696  VideoState *is = ctx;
2697  return is->abort_request;
2698 }
2699 
2700 static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue) {
2701  return stream_id < 0 ||
2702  queue->abort_request ||
2704  queue->nb_packets > MIN_FRAMES && (!queue->duration || av_q2d(st->time_base) * queue->duration > 1.0);
2705 }
2706 
2708 {
2709  if( !strcmp(s->iformat->name, "rtp")
2710  || !strcmp(s->iformat->name, "rtsp")
2711  || !strcmp(s->iformat->name, "sdp")
2712  )
2713  return 1;
2714 
2715  if(s->pb && ( !strncmp(s->url, "rtp:", 4)
2716  || !strncmp(s->url, "udp:", 4)
2717  )
2718  )
2719  return 1;
2720  return 0;
2721 }
2722 
2723 /* this thread gets the stream from the disk or the network */
2724 static int read_thread(void *arg)
2725 {
2726  VideoState *is = arg;
2727  AVFormatContext *ic = NULL;
2728  int err, i, ret;
2729  int st_index[AVMEDIA_TYPE_NB];
2730  AVPacket pkt1, *pkt = &pkt1;
2731  int64_t stream_start_time;
2732  int pkt_in_play_range = 0;
2733  AVDictionaryEntry *t;
2734  SDL_mutex *wait_mutex = SDL_CreateMutex();
2735  int scan_all_pmts_set = 0;
2736  int64_t pkt_ts;
2737 
2738  if (!wait_mutex) {
2739  av_log(NULL, AV_LOG_FATAL, "SDL_CreateMutex(): %s\n", SDL_GetError());
2740  ret = AVERROR(ENOMEM);
2741  goto fail;
2742  }
2743 
2744  memset(st_index, -1, sizeof(st_index));
2745  is->last_video_stream = is->video_stream = -1;
2746  is->last_audio_stream = is->audio_stream = -1;
2747  is->last_subtitle_stream = is->subtitle_stream = -1;
2748  is->eof = 0;
2749 
2750  ic = avformat_alloc_context();
2751  if (!ic) {
2752  av_log(NULL, AV_LOG_FATAL, "Could not allocate context.\n");
2753  ret = AVERROR(ENOMEM);
2754  goto fail;
2755  }
2757  ic->interrupt_callback.opaque = is;
2758  if (!av_dict_get(format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE)) {
2759  av_dict_set(&format_opts, "scan_all_pmts", "1", AV_DICT_DONT_OVERWRITE);
2760  scan_all_pmts_set = 1;
2761  }
2762  err = avformat_open_input(&ic, is->filename, is->iformat, &format_opts);
2763  if (err < 0) {
2764  print_error(is->filename, err);
2765  ret = -1;
2766  goto fail;
2767  }
2768  if (scan_all_pmts_set)
2769  av_dict_set(&format_opts, "scan_all_pmts", NULL, AV_DICT_MATCH_CASE);
2770 
2772  av_log(NULL, AV_LOG_ERROR, "Option %s not found.\n", t->key);
2774  goto fail;
2775  }
2776  is->ic = ic;
2777 
2778  if (genpts)
2779  ic->flags |= AVFMT_FLAG_GENPTS;
2780 
2782 
2783  if (find_stream_info) {
2785  int orig_nb_streams = ic->nb_streams;
2786 
2787  err = avformat_find_stream_info(ic, opts);
2788 
2789  for (i = 0; i < orig_nb_streams; i++)
2790  av_dict_free(&opts[i]);
2791  av_freep(&opts);
2792 
2793  if (err < 0) {
2795  "%s: could not find codec parameters\n", is->filename);
2796  ret = -1;
2797  goto fail;
2798  }
2799  }
2800 
2801  if (ic->pb)
2802  ic->pb->eof_reached = 0; // FIXME hack, ffplay maybe should not use avio_feof() to test for the end
2803 
2804  if (seek_by_bytes < 0)
2805  seek_by_bytes = !!(ic->iformat->flags & AVFMT_TS_DISCONT) && strcmp("ogg", ic->iformat->name);
2806 
2807  is->max_frame_duration = (ic->iformat->flags & AVFMT_TS_DISCONT) ? 10.0 : 3600.0;
2808 
2809  if (!window_title && (t = av_dict_get(ic->metadata, "title", NULL, 0)))
2810  window_title = av_asprintf("%s - %s", t->value, input_filename);
2811 
2812  /* if seeking requested, we execute it */
2813  if (start_time != AV_NOPTS_VALUE) {
2814  int64_t timestamp;
2815 
2816  timestamp = start_time;
2817  /* add the stream start time */
2818  if (ic->start_time != AV_NOPTS_VALUE)
2819  timestamp += ic->start_time;
2820  ret = avformat_seek_file(ic, -1, INT64_MIN, timestamp, INT64_MAX, 0);
2821  if (ret < 0) {
2822  av_log(NULL, AV_LOG_WARNING, "%s: could not seek to position %0.3f\n",
2823  is->filename, (double)timestamp / AV_TIME_BASE);
2824  }
2825  }
2826 
2827  is->realtime = is_realtime(ic);
2828 
2829  if (show_status)
2830  av_dump_format(ic, 0, is->filename, 0);
2831 
2832  for (i = 0; i < ic->nb_streams; i++) {
2833  AVStream *st = ic->streams[i];
2834  enum AVMediaType type = st->codecpar->codec_type;
2835  st->discard = AVDISCARD_ALL;
2836  if (type >= 0 && wanted_stream_spec[type] && st_index[type] == -1)
2837  if (avformat_match_stream_specifier(ic, st, wanted_stream_spec[type]) > 0)
2838  st_index[type] = i;
2839  }
2840  for (i = 0; i < AVMEDIA_TYPE_NB; i++) {
2841  if (wanted_stream_spec[i] && st_index[i] == -1) {
2842  av_log(NULL, AV_LOG_ERROR, "Stream specifier %s does not match any %s stream\n", wanted_stream_spec[i], av_get_media_type_string(i));
2843  st_index[i] = INT_MAX;
2844  }
2845  }
2846 
2847  if (!video_disable)
2848  st_index[AVMEDIA_TYPE_VIDEO] =
2850  st_index[AVMEDIA_TYPE_VIDEO], -1, NULL, 0);
2851  if (!audio_disable)
2852  st_index[AVMEDIA_TYPE_AUDIO] =
2854  st_index[AVMEDIA_TYPE_AUDIO],
2855  st_index[AVMEDIA_TYPE_VIDEO],
2856  NULL, 0);
2858  st_index[AVMEDIA_TYPE_SUBTITLE] =
2860  st_index[AVMEDIA_TYPE_SUBTITLE],
2861  (st_index[AVMEDIA_TYPE_AUDIO] >= 0 ?
2862  st_index[AVMEDIA_TYPE_AUDIO] :
2863  st_index[AVMEDIA_TYPE_VIDEO]),
2864  NULL, 0);
2865 
2866  is->show_mode = show_mode;
2867  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2868  AVStream *st = ic->streams[st_index[AVMEDIA_TYPE_VIDEO]];
2869  AVCodecParameters *codecpar = st->codecpar;
2871  if (codecpar->width)
2872  set_default_window_size(codecpar->width, codecpar->height, sar);
2873  }
2874 
2875  /* open the streams */
2876  if (st_index[AVMEDIA_TYPE_AUDIO] >= 0) {
2877  stream_component_open(is, st_index[AVMEDIA_TYPE_AUDIO]);
2878  }
2879 
2880  ret = -1;
2881  if (st_index[AVMEDIA_TYPE_VIDEO] >= 0) {
2882  ret = stream_component_open(is, st_index[AVMEDIA_TYPE_VIDEO]);
2883  }
2884  if (is->show_mode == SHOW_MODE_NONE)
2885  is->show_mode = ret >= 0 ? SHOW_MODE_VIDEO : SHOW_MODE_RDFT;
2886 
2887  if (st_index[AVMEDIA_TYPE_SUBTITLE] >= 0) {
2888  stream_component_open(is, st_index[AVMEDIA_TYPE_SUBTITLE]);
2889  }
2890 
2891  if (is->video_stream < 0 && is->audio_stream < 0) {
2892  av_log(NULL, AV_LOG_FATAL, "Failed to open file '%s' or configure filtergraph\n",
2893  is->filename);
2894  ret = -1;
2895  goto fail;
2896  }
2897 
2898  if (infinite_buffer < 0 && is->realtime)
2899  infinite_buffer = 1;
2900 
2901  for (;;) {
2902  if (is->abort_request)
2903  break;
2904  if (is->paused != is->last_paused) {
2905  is->last_paused = is->paused;
2906  if (is->paused)
2907  is->read_pause_return = av_read_pause(ic);
2908  else
2909  av_read_play(ic);
2910  }
2911 #if CONFIG_RTSP_DEMUXER || CONFIG_MMSH_PROTOCOL
2912  if (is->paused &&
2913  (!strcmp(ic->iformat->name, "rtsp") ||
2914  (ic->pb && !strncmp(input_filename, "mmsh:", 5)))) {
2915  /* wait 10 ms to avoid trying to get another packet */
2916  /* XXX: horrible */
2917  SDL_Delay(10);
2918  continue;
2919  }
2920 #endif
2921  if (is->seek_req) {
2922  int64_t seek_target = is->seek_pos;
2923  int64_t seek_min = is->seek_rel > 0 ? seek_target - is->seek_rel + 2: INT64_MIN;
2924  int64_t seek_max = is->seek_rel < 0 ? seek_target - is->seek_rel - 2: INT64_MAX;
2925 // FIXME the +-2 is due to rounding being not done in the correct direction in generation
2926 // of the seek_pos/seek_rel variables
2927 
2928  ret = avformat_seek_file(is->ic, -1, seek_min, seek_target, seek_max, is->seek_flags);
2929  if (ret < 0) {
2931  "%s: error while seeking\n", is->ic->url);
2932  } else {
2933  if (is->audio_stream >= 0) {
2934  packet_queue_flush(&is->audioq);
2935  packet_queue_put(&is->audioq, &flush_pkt);
2936  }
2937  if (is->subtitle_stream >= 0) {
2939  packet_queue_put(&is->subtitleq, &flush_pkt);
2940  }
2941  if (is->video_stream >= 0) {
2942  packet_queue_flush(&is->videoq);
2943  packet_queue_put(&is->videoq, &flush_pkt);
2944  }
2945  if (is->seek_flags & AVSEEK_FLAG_BYTE) {
2946  set_clock(&is->extclk, NAN, 0);
2947  } else {
2948  set_clock(&is->extclk, seek_target / (double)AV_TIME_BASE, 0);
2949  }
2950  }
2951  is->seek_req = 0;
2952  is->queue_attachments_req = 1;
2953  is->eof = 0;
2954  if (is->paused)
2955  step_to_next_frame(is);
2956  }
2957  if (is->queue_attachments_req) {
2959  AVPacket copy = { 0 };
2960  if ((ret = av_packet_ref(&copy, &is->video_st->attached_pic)) < 0)
2961  goto fail;
2962  packet_queue_put(&is->videoq, &copy);
2964  }
2965  is->queue_attachments_req = 0;
2966  }
2967 
2968  /* if the queue are full, no need to read more */
2969  if (infinite_buffer<1 &&
2970  (is->audioq.size + is->videoq.size + is->subtitleq.size > MAX_QUEUE_SIZE
2971  || (stream_has_enough_packets(is->audio_st, is->audio_stream, &is->audioq) &&
2974  /* wait 10 ms */
2975  SDL_LockMutex(wait_mutex);
2976  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
2977  SDL_UnlockMutex(wait_mutex);
2978  continue;
2979  }
2980  if (!is->paused &&
2981  (!is->audio_st || (is->auddec.finished == is->audioq.serial && frame_queue_nb_remaining(&is->sampq) == 0)) &&
2982  (!is->video_st || (is->viddec.finished == is->videoq.serial && frame_queue_nb_remaining(&is->pictq) == 0))) {
2983  if (loop != 1 && (!loop || --loop)) {
2984  stream_seek(is, start_time != AV_NOPTS_VALUE ? start_time : 0, 0, 0);
2985  } else if (autoexit) {
2986  ret = AVERROR_EOF;
2987  goto fail;
2988  }
2989  }
2990  ret = av_read_frame(ic, pkt);
2991  if (ret < 0) {
2992  if ((ret == AVERROR_EOF || avio_feof(ic->pb)) && !is->eof) {
2993  if (is->video_stream >= 0)
2995  if (is->audio_stream >= 0)
2997  if (is->subtitle_stream >= 0)
2999  is->eof = 1;
3000  }
3001  if (ic->pb && ic->pb->error)
3002  break;
3003  SDL_LockMutex(wait_mutex);
3004  SDL_CondWaitTimeout(is->continue_read_thread, wait_mutex, 10);
3005  SDL_UnlockMutex(wait_mutex);
3006  continue;
3007  } else {
3008  is->eof = 0;
3009  }
3010  /* check if packet is in play range specified by user, then queue, otherwise discard */
3011  stream_start_time = ic->streams[pkt->stream_index]->start_time;
3012  pkt_ts = pkt->pts == AV_NOPTS_VALUE ? pkt->dts : pkt->pts;
3013  pkt_in_play_range = duration == AV_NOPTS_VALUE ||
3014  (pkt_ts - (stream_start_time != AV_NOPTS_VALUE ? stream_start_time : 0)) *
3015  av_q2d(ic->streams[pkt->stream_index]->time_base) -
3016  (double)(start_time != AV_NOPTS_VALUE ? start_time : 0) / 1000000
3017  <= ((double)duration / 1000000);
3018  if (pkt->stream_index == is->audio_stream && pkt_in_play_range) {
3019  packet_queue_put(&is->audioq, pkt);
3020  } else if (pkt->stream_index == is->video_stream && pkt_in_play_range
3022  packet_queue_put(&is->videoq, pkt);
3023  } else if (pkt->stream_index == is->subtitle_stream && pkt_in_play_range) {
3024  packet_queue_put(&is->subtitleq, pkt);
3025  } else {
3026  av_packet_unref(pkt);
3027  }
3028  }
3029 
3030  ret = 0;
3031  fail:
3032  if (ic && !is->ic)
3033  avformat_close_input(&ic);
3034 
3035  if (ret != 0) {
3036  SDL_Event event;
3037 
3038  event.type = FF_QUIT_EVENT;
3039  event.user.data1 = is;
3040  SDL_PushEvent(&event);
3041  }
3042  SDL_DestroyMutex(wait_mutex);
3043  return 0;
3044 }
3045 
3046 static VideoState *stream_open(const char *filename, AVInputFormat *iformat)
3047 {
3048  VideoState *is;
3049 
3050  is = av_mallocz(sizeof(VideoState));
3051  if (!is)
3052  return NULL;
3053  is->filename = av_strdup(filename);
3054  if (!is->filename)
3055  goto fail;
3056  is->iformat = iformat;
3057  is->ytop = 0;
3058  is->xleft = 0;
3059 
3060  /* start video display */
3061  if (frame_queue_init(&is->pictq, &is->videoq, VIDEO_PICTURE_QUEUE_SIZE, 1) < 0)
3062  goto fail;
3063  if (frame_queue_init(&is->subpq, &is->subtitleq, SUBPICTURE_QUEUE_SIZE, 0) < 0)
3064  goto fail;
3065  if (frame_queue_init(&is->sampq, &is->audioq, SAMPLE_QUEUE_SIZE, 1) < 0)
3066  goto fail;
3067 
3068  if (packet_queue_init(&is->videoq) < 0 ||
3069  packet_queue_init(&is->audioq) < 0 ||
3070  packet_queue_init(&is->subtitleq) < 0)
3071  goto fail;
3072 
3073  if (!(is->continue_read_thread = SDL_CreateCond())) {
3074  av_log(NULL, AV_LOG_FATAL, "SDL_CreateCond(): %s\n", SDL_GetError());
3075  goto fail;
3076  }
3077 
3078  init_clock(&is->vidclk, &is->videoq.serial);
3079  init_clock(&is->audclk, &is->audioq.serial);
3080  init_clock(&is->extclk, &is->extclk.serial);
3081  is->audio_clock_serial = -1;
3082  if (startup_volume < 0)
3083  av_log(NULL, AV_LOG_WARNING, "-volume=%d < 0, setting to 0\n", startup_volume);
3084  if (startup_volume > 100)
3085  av_log(NULL, AV_LOG_WARNING, "-volume=%d > 100, setting to 100\n", startup_volume);
3086  startup_volume = av_clip(startup_volume, 0, 100);
3087  startup_volume = av_clip(SDL_MIX_MAXVOLUME * startup_volume / 100, 0, SDL_MIX_MAXVOLUME);
3089  is->muted = 0;
3090  is->av_sync_type = av_sync_type;
3091  is->read_tid = SDL_CreateThread(read_thread, "read_thread", is);
3092  if (!is->read_tid) {
3093  av_log(NULL, AV_LOG_FATAL, "SDL_CreateThread(): %s\n", SDL_GetError());
3094 fail:
3095  stream_close(is);
3096  return NULL;
3097  }
3098  return is;
3099 }
3100 
3102 {
3103  AVFormatContext *ic = is->ic;
3104  int start_index, stream_index;
3105  int old_index;
3106  AVStream *st;
3107  AVProgram *p = NULL;
3108  int nb_streams = is->ic->nb_streams;
3109 
3110  if (codec_type == AVMEDIA_TYPE_VIDEO) {
3111  start_index = is->last_video_stream;
3112  old_index = is->video_stream;
3113  } else if (codec_type == AVMEDIA_TYPE_AUDIO) {
3114  start_index = is->last_audio_stream;
3115  old_index = is->audio_stream;
3116  } else {
3117  start_index = is->last_subtitle_stream;
3118  old_index = is->subtitle_stream;
3119  }
3120  stream_index = start_index;
3121 
3122  if (codec_type != AVMEDIA_TYPE_VIDEO && is->video_stream != -1) {
3124  if (p) {
3125  nb_streams = p->nb_stream_indexes;
3126  for (start_index = 0; start_index < nb_streams; start_index++)
3127  if (p->stream_index[start_index] == stream_index)
3128  break;
3129  if (start_index == nb_streams)
3130  start_index = -1;
3131  stream_index = start_index;
3132  }
3133  }
3134 
3135  for (;;) {
3136  if (++stream_index >= nb_streams)
3137  {
3138  if (codec_type == AVMEDIA_TYPE_SUBTITLE)
3139  {
3140  stream_index = -1;
3141  is->last_subtitle_stream = -1;
3142  goto the_end;
3143  }
3144  if (start_index == -1)
3145  return;
3146  stream_index = 0;
3147  }
3148  if (stream_index == start_index)
3149  return;
3150  st = is->ic->streams[p ? p->stream_index[stream_index] : stream_index];
3151  if (st->codecpar->codec_type == codec_type) {
3152  /* check that parameters are OK */
3153  switch (codec_type) {
3154  case AVMEDIA_TYPE_AUDIO:
3155  if (st->codecpar->sample_rate != 0 &&
3156  st->codecpar->channels != 0)
3157  goto the_end;
3158  break;
3159  case AVMEDIA_TYPE_VIDEO:
3160  case AVMEDIA_TYPE_SUBTITLE:
3161  goto the_end;
3162  default:
3163  break;
3164  }
3165  }
3166  }
3167  the_end:
3168  if (p && stream_index != -1)
3169  stream_index = p->stream_index[stream_index];
3170  av_log(NULL, AV_LOG_INFO, "Switch %s stream from #%d to #%d\n",
3171  av_get_media_type_string(codec_type),
3172  old_index,
3173  stream_index);
3174 
3175  stream_component_close(is, old_index);
3176  stream_component_open(is, stream_index);
3177 }
3178 
3179 
3181 {
3183  SDL_SetWindowFullscreen(window, is_full_screen ? SDL_WINDOW_FULLSCREEN_DESKTOP : 0);
3184 }
3185 
3187 {
3188  int next = is->show_mode;
3189  do {
3190  next = (next + 1) % SHOW_MODE_NB;
3191  } while (next != is->show_mode && (next == SHOW_MODE_VIDEO && !is->video_st || next != SHOW_MODE_VIDEO && !is->audio_st));
3192  if (is->show_mode != next) {
3193  is->force_refresh = 1;
3194  is->show_mode = next;
3195  }
3196 }
3197 
3198 static void refresh_loop_wait_event(VideoState *is, SDL_Event *event) {
3199  double remaining_time = 0.0;
3200  SDL_PumpEvents();
3201  while (!SDL_PeepEvents(event, 1, SDL_GETEVENT, SDL_FIRSTEVENT, SDL_LASTEVENT)) {
3203  SDL_ShowCursor(0);
3204  cursor_hidden = 1;
3205  }
3206  if (remaining_time > 0.0)
3207  av_usleep((int64_t)(remaining_time * 1000000.0));
3208  remaining_time = REFRESH_RATE;
3209  if (is->show_mode != SHOW_MODE_NONE && (!is->paused || is->force_refresh))
3210  video_refresh(is, &remaining_time);
3211  SDL_PumpEvents();
3212  }
3213 }
3214 
3215 static void seek_chapter(VideoState *is, int incr)
3216 {
3217  int64_t pos = get_master_clock(is) * AV_TIME_BASE;
3218  int i;
3219 
3220  if (!is->ic->nb_chapters)
3221  return;
3222 
3223  /* find the current chapter */
3224  for (i = 0; i < is->ic->nb_chapters; i++) {
3225  AVChapter *ch = is->ic->chapters[i];
3226  if (av_compare_ts(pos, AV_TIME_BASE_Q, ch->start, ch->time_base) < 0) {
3227  i--;
3228  break;
3229  }
3230  }
3231 
3232  i += incr;
3233  i = FFMAX(i, 0);
3234  if (i >= is->ic->nb_chapters)
3235  return;
3236 
3237  av_log(NULL, AV_LOG_VERBOSE, "Seeking to chapter %d.\n", i);
3238  stream_seek(is, av_rescale_q(is->ic->chapters[i]->start, is->ic->chapters[i]->time_base,
3239  AV_TIME_BASE_Q), 0, 0);
3240 }
3241 
3242 /* handle an event sent by the GUI */
3243 static void event_loop(VideoState *cur_stream)
3244 {
3245  SDL_Event event;
3246  double incr, pos, frac;
3247 
3248  for (;;) {
3249  double x;
3250  refresh_loop_wait_event(cur_stream, &event);
3251  switch (event.type) {
3252  case SDL_KEYDOWN:
3253  if (exit_on_keydown) {
3254  do_exit(cur_stream);
3255  break;
3256  }
3257  switch (event.key.keysym.sym) {
3258  case SDLK_ESCAPE:
3259  case SDLK_q:
3260  do_exit(cur_stream);
3261  break;
3262  case SDLK_f:
3263  toggle_full_screen(cur_stream);
3264  cur_stream->force_refresh = 1;
3265  break;
3266  case SDLK_p:
3267  case SDLK_SPACE:
3268  toggle_pause(cur_stream);
3269  break;
3270  case SDLK_m:
3271  toggle_mute(cur_stream);
3272  break;
3273  case SDLK_KP_MULTIPLY:
3274  case SDLK_0:
3275  update_volume(cur_stream, 1, SDL_VOLUME_STEP);
3276  break;
3277  case SDLK_KP_DIVIDE:
3278  case SDLK_9:
3279  update_volume(cur_stream, -1, SDL_VOLUME_STEP);
3280  break;
3281  case SDLK_s: // S: Step to next frame
3282  step_to_next_frame(cur_stream);
3283  break;
3284  case SDLK_a:
3286  break;
3287  case SDLK_v:
3289  break;
3290  case SDLK_c:
3294  break;
3295  case SDLK_t:
3297  break;
3298  case SDLK_w:
3299 #if CONFIG_AVFILTER
3300  if (cur_stream->show_mode == SHOW_MODE_VIDEO && cur_stream->vfilter_idx < nb_vfilters - 1) {
3301  if (++cur_stream->vfilter_idx >= nb_vfilters)
3302  cur_stream->vfilter_idx = 0;
3303  } else {
3304  cur_stream->vfilter_idx = 0;
3305  toggle_audio_display(cur_stream);
3306  }
3307 #else
3308  toggle_audio_display(cur_stream);
3309 #endif
3310  break;
3311  case SDLK_PAGEUP:
3312  if (cur_stream->ic->nb_chapters <= 1) {
3313  incr = 600.0;
3314  goto do_seek;
3315  }
3316  seek_chapter(cur_stream, 1);
3317  break;
3318  case SDLK_PAGEDOWN:
3319  if (cur_stream->ic->nb_chapters <= 1) {
3320  incr = -600.0;
3321  goto do_seek;
3322  }
3323  seek_chapter(cur_stream, -1);
3324  break;
3325  case SDLK_LEFT:
3326  incr = -10.0;
3327  goto do_seek;
3328  case SDLK_RIGHT:
3329  incr = 10.0;
3330  goto do_seek;
3331  case SDLK_UP:
3332  incr = 60.0;
3333  goto do_seek;
3334  case SDLK_DOWN:
3335  incr = -60.0;
3336  do_seek:
3337  if (seek_by_bytes) {
3338  pos = -1;
3339  if (pos < 0 && cur_stream->video_stream >= 0)
3340  pos = frame_queue_last_pos(&cur_stream->pictq);
3341  if (pos < 0 && cur_stream->audio_stream >= 0)
3342  pos = frame_queue_last_pos(&cur_stream->sampq);
3343  if (pos < 0)
3344  pos = avio_tell(cur_stream->ic->pb);
3345  if (cur_stream->ic->bit_rate)
3346  incr *= cur_stream->ic->bit_rate / 8.0;
3347  else
3348  incr *= 180000.0;
3349  pos += incr;
3350  stream_seek(cur_stream, pos, incr, 1);
3351  } else {
3352  pos = get_master_clock(cur_stream);
3353  if (isnan(pos))
3354  pos = (double)cur_stream->seek_pos / AV_TIME_BASE;
3355  pos += incr;
3356  if (cur_stream->ic->start_time != AV_NOPTS_VALUE && pos < cur_stream->ic->start_time / (double)AV_TIME_BASE)
3357  pos = cur_stream->ic->start_time / (double)AV_TIME_BASE;
3358  stream_seek(cur_stream, (int64_t)(pos * AV_TIME_BASE), (int64_t)(incr * AV_TIME_BASE), 0);
3359  }
3360  break;
3361  default:
3362  break;
3363  }
3364  break;
3365  case SDL_MOUSEBUTTONDOWN:
3366  if (exit_on_mousedown) {
3367  do_exit(cur_stream);
3368  break;
3369  }
3370  if (event.button.button == SDL_BUTTON_LEFT) {
3371  static int64_t last_mouse_left_click = 0;
3372  if (av_gettime_relative() - last_mouse_left_click <= 500000) {
3373  toggle_full_screen(cur_stream);
3374  cur_stream->force_refresh = 1;
3375  last_mouse_left_click = 0;
3376  } else {
3377  last_mouse_left_click = av_gettime_relative();
3378  }
3379  }
3380  case SDL_MOUSEMOTION:
3381  if (cursor_hidden) {
3382  SDL_ShowCursor(1);
3383  cursor_hidden = 0;
3384  }
3386  if (event.type == SDL_MOUSEBUTTONDOWN) {
3387  if (event.button.button != SDL_BUTTON_RIGHT)
3388  break;
3389  x = event.button.x;
3390  } else {
3391  if (!(event.motion.state & SDL_BUTTON_RMASK))
3392  break;
3393  x = event.motion.x;
3394  }
3395  if (seek_by_bytes || cur_stream->ic->duration <= 0) {
3396  uint64_t size = avio_size(cur_stream->ic->pb);
3397  stream_seek(cur_stream, size*x/cur_stream->width, 0, 1);
3398  } else {
3399  int64_t ts;
3400  int ns, hh, mm, ss;
3401  int tns, thh, tmm, tss;
3402  tns = cur_stream->ic->duration / 1000000LL;
3403  thh = tns / 3600;
3404  tmm = (tns % 3600) / 60;
3405  tss = (tns % 60);
3406  frac = x / cur_stream->width;
3407  ns = frac * tns;
3408  hh = ns / 3600;
3409  mm = (ns % 3600) / 60;
3410  ss = (ns % 60);
3412  "Seek to %2.0f%% (%2d:%02d:%02d) of total duration (%2d:%02d:%02d) \n", frac*100,
3413  hh, mm, ss, thh, tmm, tss);
3414  ts = frac * cur_stream->ic->duration;
3415  if (cur_stream->ic->start_time != AV_NOPTS_VALUE)
3416  ts += cur_stream->ic->start_time;
3417  stream_seek(cur_stream, ts, 0, 0);
3418  }
3419  break;
3420  case SDL_WINDOWEVENT:
3421  switch (event.window.event) {
3422  case SDL_WINDOWEVENT_RESIZED:
3423  screen_width = cur_stream->width = event.window.data1;
3424  screen_height = cur_stream->height = event.window.data2;
3425  if (cur_stream->vis_texture) {
3426  SDL_DestroyTexture(cur_stream->vis_texture);
3427  cur_stream->vis_texture = NULL;
3428  }
3429  case SDL_WINDOWEVENT_EXPOSED:
3430  cur_stream->force_refresh = 1;
3431  }
3432  break;
3433  case SDL_QUIT:
3434  case FF_QUIT_EVENT:
3435  do_exit(cur_stream);
3436  break;
3437  default:
3438  break;
3439  }
3440  }
3441 }
3442 
3443 static int opt_frame_size(void *optctx, const char *opt, const char *arg)
3444 {
3445  av_log(NULL, AV_LOG_WARNING, "Option -s is deprecated, use -video_size.\n");
3446  return opt_default(NULL, "video_size", arg);
3447 }
3448 
3449 static int opt_width(void *optctx, const char *opt, const char *arg)
3450 {
3451  screen_width = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3452  return 0;
3453 }
3454 
3455 static int opt_height(void *optctx, const char *opt, const char *arg)
3456 {
3457  screen_height = parse_number_or_die(opt, arg, OPT_INT64, 1, INT_MAX);
3458  return 0;
3459 }
3460 
3461 static int opt_format(void *optctx, const char *opt, const char *arg)
3462 {
3463  file_iformat = av_find_input_format(arg);
3464  if (!file_iformat) {
3465  av_log(NULL, AV_LOG_FATAL, "Unknown input format: %s\n", arg);
3466  return AVERROR(EINVAL);
3467  }
3468  return 0;
3469 }
3470 
3471 static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
3472 {
3473  av_log(NULL, AV_LOG_WARNING, "Option -pix_fmt is deprecated, use -pixel_format.\n");
3474  return opt_default(NULL, "pixel_format", arg);
3475 }
3476 
3477 static int opt_sync(void *optctx, const char *opt, const char *arg)
3478 {
3479  if (!strcmp(arg, "audio"))
3481  else if (!strcmp(arg, "video"))
3483  else if (!strcmp(arg, "ext"))
3485  else {
3486  av_log(NULL, AV_LOG_ERROR, "Unknown value for %s: %s\n", opt, arg);
3487  exit(1);
3488  }
3489  return 0;
3490 }
3491 
3492 static int opt_seek(void *optctx, const char *opt, const char *arg)
3493 {
3494  start_time = parse_time_or_die(opt, arg, 1);
3495  return 0;
3496 }
3497 
3498 static int opt_duration(void *optctx, const char *opt, const char *arg)
3499 {
3500  duration = parse_time_or_die(opt, arg, 1);
3501  return 0;
3502 }
3503 
3504 static int opt_show_mode(void *optctx, const char *opt, const char *arg)
3505 {
3506  show_mode = !strcmp(arg, "video") ? SHOW_MODE_VIDEO :
3507  !strcmp(arg, "waves") ? SHOW_MODE_WAVES :
3508  !strcmp(arg, "rdft" ) ? SHOW_MODE_RDFT :
3509  parse_number_or_die(opt, arg, OPT_INT, 0, SHOW_MODE_NB-1);
3510  return 0;
3511 }
3512 
3513 static void opt_input_file(void *optctx, const char *filename)
3514 {
3515  if (input_filename) {
3517  "Argument '%s' provided as input filename, but '%s' was already specified.\n",
3518  filename, input_filename);
3519  exit(1);
3520  }
3521  if (!strcmp(filename, "-"))
3522  filename = "pipe:";
3523  input_filename = filename;
3524 }
3525 
3526 static int opt_codec(void *optctx, const char *opt, const char *arg)
3527 {
3528  const char *spec = strchr(opt, ':');
3529  if (!spec) {
3531  "No media specifier was specified in '%s' in option '%s'\n",
3532  arg, opt);
3533  return AVERROR(EINVAL);
3534  }
3535  spec++;
3536  switch (spec[0]) {
3537  case 'a' : audio_codec_name = arg; break;
3538  case 's' : subtitle_codec_name = arg; break;
3539  case 'v' : video_codec_name = arg; break;
3540  default:
3542  "Invalid media specifier '%s' in option '%s'\n", spec, opt);
3543  return AVERROR(EINVAL);
3544  }
3545  return 0;
3546 }
3547 
3548 static int dummy;
3549 
3550 static const OptionDef options[] = {
3552  { "x", HAS_ARG, { .func_arg = opt_width }, "force displayed width", "width" },
3553  { "y", HAS_ARG, { .func_arg = opt_height }, "force displayed height", "height" },
3554  { "s", HAS_ARG | OPT_VIDEO, { .func_arg = opt_frame_size }, "set frame size (WxH or abbreviation)", "size" },
3555  { "fs", OPT_BOOL, { &is_full_screen }, "force full screen" },
3556  { "an", OPT_BOOL, { &audio_disable }, "disable audio" },
3557  { "vn", OPT_BOOL, { &video_disable }, "disable video" },
3558  { "sn", OPT_BOOL, { &subtitle_disable }, "disable subtitling" },
3559  { "ast", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_AUDIO] }, "select desired audio stream", "stream_specifier" },
3560  { "vst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_VIDEO] }, "select desired video stream", "stream_specifier" },
3561  { "sst", OPT_STRING | HAS_ARG | OPT_EXPERT, { &wanted_stream_spec[AVMEDIA_TYPE_SUBTITLE] }, "select desired subtitle stream", "stream_specifier" },
3562  { "ss", HAS_ARG, { .func_arg = opt_seek }, "seek to a given position in seconds", "pos" },
3563  { "t", HAS_ARG, { .func_arg = opt_duration }, "play \"duration\" seconds of audio/video", "duration" },
3564  { "bytes", OPT_INT | HAS_ARG, { &seek_by_bytes }, "seek by bytes 0=off 1=on -1=auto", "val" },
3565  { "nodisp", OPT_BOOL, { &display_disable }, "disable graphical display" },
3566  { "noborder", OPT_BOOL, { &borderless }, "borderless window" },
3567  { "volume", OPT_INT | HAS_ARG, { &startup_volume}, "set startup volume 0=min 100=max", "volume" },
3568  { "f", HAS_ARG, { .func_arg = opt_format }, "force format", "fmt" },
3569  { "pix_fmt", HAS_ARG | OPT_EXPERT | OPT_VIDEO, { .func_arg = opt_frame_pix_fmt }, "set pixel format", "format" },
3570  { "stats", OPT_BOOL | OPT_EXPERT, { &show_status }, "show status", "" },
3571  { "fast", OPT_BOOL | OPT_EXPERT, { &fast }, "non spec compliant optimizations", "" },
3572  { "genpts", OPT_BOOL | OPT_EXPERT, { &genpts }, "generate pts", "" },
3573  { "drp", OPT_INT | HAS_ARG | OPT_EXPERT, { &decoder_reorder_pts }, "let decoder reorder pts 0=off 1=on -1=auto", ""},
3574  { "lowres", OPT_INT | HAS_ARG | OPT_EXPERT, { &lowres }, "", "" },
3575  { "sync", HAS_ARG | OPT_EXPERT, { .func_arg = opt_sync }, "set audio-video sync. type (type=audio/video/ext)", "type" },
3576  { "autoexit", OPT_BOOL | OPT_EXPERT, { &autoexit }, "exit at the end", "" },
3577  { "exitonkeydown", OPT_BOOL | OPT_EXPERT, { &exit_on_keydown }, "exit on key down", "" },
3578  { "exitonmousedown", OPT_BOOL | OPT_EXPERT, { &exit_on_mousedown }, "exit on mouse down", "" },
3579  { "loop", OPT_INT | HAS_ARG | OPT_EXPERT, { &loop }, "set number of times the playback shall be looped", "loop count" },
3580  { "framedrop", OPT_BOOL | OPT_EXPERT, { &framedrop }, "drop frames when cpu is too slow", "" },
3581  { "infbuf", OPT_BOOL | OPT_EXPERT, { &infinite_buffer }, "don't limit the input buffer size (useful with realtime streams)", "" },
3582  { "window_title", OPT_STRING | HAS_ARG, { &window_title }, "set window title", "window title" },
3583 #if CONFIG_AVFILTER
3584  { "vf", OPT_EXPERT | HAS_ARG, { .func_arg = opt_add_vfilter }, "set video filters", "filter_graph" },
3585  { "af", OPT_STRING | HAS_ARG, { &afilters }, "set audio filters", "filter_graph" },
3586 #endif
3587  { "rdftspeed", OPT_INT | HAS_ARG| OPT_AUDIO | OPT_EXPERT, { &rdftspeed }, "rdft speed", "msecs" },
3588  { "showmode", HAS_ARG, { .func_arg = opt_show_mode}, "select show mode (0 = video, 1 = waves, 2 = RDFT)", "mode" },
3589  { "default", HAS_ARG | OPT_AUDIO | OPT_VIDEO | OPT_EXPERT, { .func_arg = opt_default }, "generic catch all option", "" },
3590  { "i", OPT_BOOL, { &dummy}, "read specified file", "input_file"},
3591  { "codec", HAS_ARG, { .func_arg = opt_codec}, "force decoder", "decoder_name" },
3592  { "acodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &audio_codec_name }, "force audio decoder", "decoder_name" },
3593  { "scodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &subtitle_codec_name }, "force subtitle decoder", "decoder_name" },
3594  { "vcodec", HAS_ARG | OPT_STRING | OPT_EXPERT, { &video_codec_name }, "force video decoder", "decoder_name" },
3595  { "autorotate", OPT_BOOL, { &autorotate }, "automatically rotate video", "" },
3596  { "find_stream_info", OPT_BOOL | OPT_INPUT | OPT_EXPERT, { &find_stream_info },
3597  "read and decode the streams to fill missing information with heuristics" },
3598  { NULL, },
3599 };
3600 
3601 static void show_usage(void)
3602 {
3603  av_log(NULL, AV_LOG_INFO, "Simple media player\n");
3604  av_log(NULL, AV_LOG_INFO, "usage: %s [options] input_file\n", program_name);
3605  av_log(NULL, AV_LOG_INFO, "\n");
3606 }
3607 
3608 void show_help_default(const char *opt, const char *arg)
3609 {
3611  show_usage();
3612  show_help_options(options, "Main options:", 0, OPT_EXPERT, 0);
3613  show_help_options(options, "Advanced options:", OPT_EXPERT, 0, 0);
3614  printf("\n");
3617 #if !CONFIG_AVFILTER
3619 #else
3621 #endif
3622  printf("\nWhile playing:\n"
3623  "q, ESC quit\n"
3624  "f toggle full screen\n"
3625  "p, SPC pause\n"
3626  "m toggle mute\n"
3627  "9, 0 decrease and increase volume respectively\n"
3628  "/, * decrease and increase volume respectively\n"
3629  "a cycle audio channel in the current program\n"
3630  "v cycle video channel\n"
3631  "t cycle subtitle channel in the current program\n"
3632  "c cycle program\n"
3633  "w cycle video filters or show modes\n"
3634  "s activate frame-step mode\n"
3635  "left/right seek backward/forward 10 seconds\n"
3636  "down/up seek backward/forward 1 minute\n"
3637  "page down/page up seek backward/forward 10 minutes\n"
3638  "right mouse click seek to percentage in file corresponding to fraction of width\n"
3639  "left double-click toggle full screen\n"
3640  );
3641 }
3642 
3643 /* Called from the main */
3644 int main(int argc, char **argv)
3645 {
3646  int flags;
3647  VideoState *is;
3648 
3649  init_dynload();
3650 
3652  parse_loglevel(argc, argv, options);
3653 
3654  /* register all codecs, demux and protocols */
3655 #if CONFIG_AVDEVICE
3657 #endif
3659 
3660  init_opts();
3661 
3662  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
3663  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
3664 
3665  show_banner(argc, argv, options);
3666 
3667  parse_options(NULL, argc, argv, options, opt_input_file);
3668 
3669  if (!input_filename) {
3670  show_usage();
3671  av_log(NULL, AV_LOG_FATAL, "An input file must be specified\n");
3673  "Use -h to get full help or, even better, run 'man %s'\n", program_name);
3674  exit(1);
3675  }
3676 
3677  if (display_disable) {
3678  video_disable = 1;
3679  }
3680  flags = SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER;
3681  if (audio_disable)
3682  flags &= ~SDL_INIT_AUDIO;
3683  else {
3684  /* Try to work around an occasional ALSA buffer underflow issue when the
3685  * period size is NPOT due to ALSA resampling by forcing the buffer size. */
3686  if (!SDL_getenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE"))
3687  SDL_setenv("SDL_AUDIO_ALSA_SET_BUFFER_SIZE","1", 1);
3688  }
3689  if (display_disable)
3690  flags &= ~SDL_INIT_VIDEO;
3691  if (SDL_Init (flags)) {
3692  av_log(NULL, AV_LOG_FATAL, "Could not initialize SDL - %s\n", SDL_GetError());
3693  av_log(NULL, AV_LOG_FATAL, "(Did you set the DISPLAY variable?)\n");
3694  exit(1);
3695  }
3696 
3697  SDL_EventState(SDL_SYSWMEVENT, SDL_IGNORE);
3698  SDL_EventState(SDL_USEREVENT, SDL_IGNORE);
3699 
3700  av_init_packet(&flush_pkt);
3701  flush_pkt.data = (uint8_t *)&flush_pkt;
3702 
3703  if (!display_disable) {
3704  int flags = SDL_WINDOW_HIDDEN;
3705  if (borderless)
3706  flags |= SDL_WINDOW_BORDERLESS;
3707  else
3708  flags |= SDL_WINDOW_RESIZABLE;
3709  window = SDL_CreateWindow(program_name, SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, default_width, default_height, flags);
3710  SDL_SetHint(SDL_HINT_RENDER_SCALE_QUALITY, "linear");
3711  if (window) {
3712  renderer = SDL_CreateRenderer(window, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC);
3713  if (!renderer) {
3714  av_log(NULL, AV_LOG_WARNING, "Failed to initialize a hardware accelerated renderer: %s\n", SDL_GetError());
3715  renderer = SDL_CreateRenderer(window, -1, 0);
3716  }
3717  if (renderer) {
3718  if (!SDL_GetRendererInfo(renderer, &renderer_info))
3719  av_log(NULL, AV_LOG_VERBOSE, "Initialized %s renderer.\n", renderer_info.name);
3720  }
3721  }
3722  if (!window || !renderer || !renderer_info.num_texture_formats) {
3723  av_log(NULL, AV_LOG_FATAL, "Failed to create window or renderer: %s", SDL_GetError());
3724  do_exit(NULL);
3725  }
3726  }
3727 
3728  is = stream_open(input_filename, file_iformat);
3729  if (!is) {
3730  av_log(NULL, AV_LOG_FATAL, "Failed to initialize VideoState!\n");
3731  do_exit(NULL);
3732  }
3733 
3734  event_loop(is);
3735 
3736  /* never returns */
3737 
3738  return 0;
3739 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1569
#define EXTERNAL_CLOCK_SPEED_STEP
Definition: ffplay.c:94
AVFilterContext ** filters
Definition: avfilter.h:842
static void packet_queue_abort(PacketQueue *q)
Definition: ffplay.c:518
static int opt_show_mode(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3504
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
static void frame_queue_push(FrameQueue *f)
Definition: ffplay.c:772
packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
Definition: pixfmt.h:77
static SDL_AudioDeviceID audio_dev
Definition: ffplay.c:365
static void video_image_display(VideoState *is)
Definition: ffplay.c:956
#define NULL
Definition: coverity.c:32
Clock audclk
Definition: ffplay.c:219
const char const char void * val
Definition: avisynth_c.h:771
#define AVFMT_NOBINSEARCH
Format does not allow to fall back on binary search via read_timestamp.
Definition: avformat.h:475
static void packet_queue_flush(PacketQueue *q)
Definition: ffplay.c:493
const char * s
Definition: avisynth_c.h:768
int width
Definition: ffplay.c:292
#define OPT_EXPERT
Definition: cmdutils.h:163
static double get_clock(Clock *c)
Definition: ffplay.c:1355
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
enum AVSampleFormat fmt
Definition: ffplay.c:138
static int opt_height(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3455
static const struct TextureFormatEntry sdl_texture_format_map[]
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffplay.c:64
static void copy(const float *p1, float *p2, const int length)
SDL_cond * cond
Definition: ffplay.c:178
static const char * format[]
Definition: af_aiir.c:311
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3078
static int audio_open(void *opaque, int64_t wanted_channel_layout, int wanted_nb_channels, int wanted_sample_rate, struct AudioParams *audio_hw_params)
Definition: ffplay.c:2468
int size
FrameQueue pictq
Definition: ffplay.c:223
Decoder auddec
Definition: ffplay.c:227
#define EXTERNAL_CLOCK_SPEED_MIN
Definition: ffplay.c:92
AVStream * subtitle_st
Definition: ffplay.c:277
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
#define VIDEO_PICTURE_QUEUE_SIZE
Definition: ffplay.c:129
static SDL_Renderer * renderer
Definition: ffplay.c:363
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3826
#define SWS_BICUBIC
Definition: swscale.h:60
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1618
double rdftspeed
Definition: ffplay.c:343
double frame_timer
Definition: ffplay.c:280
static AVInputFormat * file_iformat
Definition: ffplay.c:310
#define OPT_VIDEO
Definition: cmdutils.h:165
static void opt_input_file(void *optctx, const char *filename)
Definition: ffplay.c:3513
double get_rotation(AVStream *st)
Definition: cmdutils.c:2151
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
const char * fmt
Definition: avisynth_c.h:769
misc image utilities
static int opt_format(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3461
AVFilterGraph * avfilter_graph_alloc(void)
Allocate a filter graph.
Definition: avfiltergraph.c:83
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AVRational next_pts_tb
Definition: ffplay.c:199
static int get_master_sync_type(VideoState *is)
Definition: ffplay.c:1403
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
int rindex
Definition: ffplay.c:171
#define AV_SYNC_FRAMEDUP_THRESHOLD
Definition: ffplay.c:84
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int default_height
Definition: ffplay.c:314
Memory buffer source API.
double frame_last_filter_delay
Definition: ffplay.c:282
FrameQueue sampq
Definition: ffplay.c:225
enum VideoState::ShowMode show_mode
int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx)
Check validity and configure all the links and formats in the graph.
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:203
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3960
int seek_flags
Definition: ffplay.c:212
struct AVFilterInOut * next
next input/input in the list, NULL if this is the last
Definition: avfilter.h:1014
#define av_opt_set_int_list(obj, name, val, term, flags)
Set a binary option to an integer list.
Definition: opt.h:707
int serial
Definition: ffplay.c:124
AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame)
Guess the sample aspect ratio of a frame, based on both the stream and the frame aspect ratio...
Definition: utils.c:4985
channels
Definition: aptx.c:30
#define OPT_AUDIO
Definition: cmdutils.h:166
static void stream_cycle_channel(VideoState *is, int codec_type)
Definition: ffplay.c:3101
int num
Numerator.
Definition: rational.h:59
static int opt_frame_pix_fmt(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3471
int size
Definition: avcodec.h:1431
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
const char * b
Definition: vf_curves.c:113
static void stream_toggle_pause(VideoState *is)
Definition: ffplay.c:1467
#define DEBUG
Definition: vf_framerate.c:29
MyAVPacketList * first_pkt
Definition: ffplay.c:119
static void stream_seek(VideoState *is, int64_t pos, int64_t rel, int seek_by_bytes)
Definition: ffplay.c:1453
static int seek_by_bytes
Definition: ffplay.c:321
double audio_diff_cum
Definition: ffplay.c:237
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define REFRESH_RATE
Definition: ffplay.c:100
AVInputFormat * iformat
Definition: ffplay.c:205
enum AVMediaType codec_type
Definition: rtp.c:37
int64_t bit_rate
Total stream bitrate in bit/s, 0 if not available.
Definition: avformat.h:1464
AVCodecContext * avctx
Definition: ffplay.c:191
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
int paused
Definition: ffplay.c:208
static int opt_codec(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3526
static AVStream * video_stream
static unsigned sws_flags
Definition: ffplay.c:110
int abort_request
Definition: ffplay.c:123
AVS_VideoFrame int int int int new_height
Definition: avisynth_c.h:818
unsigned num_rects
Definition: avcodec.h:3864
static void set_clock_at(Clock *c, double pts, int serial, double time)
Definition: ffplay.c:1367
static void toggle_pause(VideoState *is)
Definition: ffplay.c:1480
int out_size
Definition: movenc.c:55
double audio_diff_threshold
Definition: ffplay.c:239
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:580
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
discard all
Definition: avcodec.h:794
int64_t channel_layout
Definition: ffplay.c:137
const AVClass * sws_get_class(void)
Get the AVClass for swsContext.
Definition: options.c:95
static int audio_disable
Definition: ffplay.c:317
AVStream * audio_st
Definition: ffplay.c:241
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:998
#define AV_PIX_FMT_RGB444
Definition: pixfmt.h:357
static const char * audio_codec_name
Definition: ffplay.c:340
#define fn(a)
int serial
Definition: ffplay.c:157
AVCodec.
Definition: avcodec.h:3408
static void seek_chapter(VideoState *is, int incr)
Definition: ffplay.c:3215
double pts_drift
Definition: ffplay.c:145
#define CMDUTILS_COMMON_OPTIONS
Definition: cmdutils.h:215
AVDictionary * filter_codec_opts(AVDictionary *opts, enum AVCodecID codec_id, AVFormatContext *s, AVStream *st, AVCodec *codec)
Filter out options for given codec.
Definition: cmdutils.c:2055
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3876
int width
Definition: ffplay.c:161
char * scale_sws_opts
sws options to use for the auto-inserted scale filters
Definition: avfilter.h:845
AVStream * video_st
Definition: ffplay.c:284
Clock extclk
Definition: ffplay.c:221
static VideoState * stream_open(const char *filename, AVInputFormat *iformat)
Definition: ffplay.c:3046
void * opaque
Definition: avio.h:60
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
struct SwsContext * img_convert_ctx
Definition: ffplay.c:287
AVSubtitleRect ** rects
Definition: avcodec.h:3865
Format I/O context.
Definition: avformat.h:1342
static void toggle_audio_display(VideoState *is)
Definition: ffplay.c:3186
AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame)
Guess the frame rate, based on both the container and codec information.
Definition: utils.c:5008
Definition: ffplay.c:154
memory buffer sink API for audio and video
const AVClass * avcodec_get_class(void)
Get the AVClass for AVCodecContext.
Definition: options.c:294
static int16_t block[64]
Definition: dct.c:115
int av_sync_type
Definition: ffplay.c:233
unsigned int nb_stream_indexes
Definition: avformat.h:1265
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
int rindex_shown
Definition: ffplay.c:176
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3828
double pts
Definition: ffplay.c:158
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
double audio_diff_avg_coef
Definition: ffplay.c:238
AVRational start_pts_tb
Definition: ffplay.c:197
static int read_thread(void *arg)
Definition: ffplay.c:2724
int keep_last
Definition: ffplay.c:175
int avfilter_link(AVFilterContext *src, unsigned srcpad, AVFilterContext *dst, unsigned dstpad)
Link two filters together.
Definition: avfilter.c:135
int rdft_bits
Definition: ffplay.c:268
int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance)
Activate resampling compensation ("soft" compensation).
Definition: swresample.c:889
int size
Definition: ffplay.c:121
static Frame * frame_queue_peek(FrameQueue *f)
Definition: ffplay.c:725
static int64_t start_time
Definition: ffplay.c:327
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2181
void log_callback_help(void *ptr, int level, const char *fmt, va_list vl)
Trivial log callback.
Definition: cmdutils.c:99
uint8_t
static int nb_streams
Definition: ffprobe.c:276
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int opt_default(void *optctx, const char *opt, const char *arg)
Fallback for options that are not explicitly handled, these will be parsed through AVOptions...
Definition: cmdutils.c:545
static int default_width
Definition: ffplay.c:313
int last_video_stream
Definition: ffplay.c:304
int width
Video only.
Definition: avcodec.h:3950
int last_subtitle_stream
Definition: ffplay.c:304
8 bits with AV_PIX_FMT_RGB32 palette
Definition: pixfmt.h:73
AVOptions.
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
#define HAS_ARG
Definition: cmdutils.h:161
int audio_hw_buf_size
Definition: ffplay.c:243
static void stream_component_close(VideoState *is, int stream_index)
Definition: ffplay.c:1184
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:202
static int decode_interrupt_cb(void *ctx)
Definition: ffplay.c:2694
int64_t duration
Definition: ffplay.c:122
struct SwrContext * swr_ctx
Definition: ffplay.c:257
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1448
int finished
Definition: ffplay.c:193
static void event_loop(VideoState *cur_stream)
Definition: ffplay.c:3243
static int cmp_audio_fmts(enum AVSampleFormat fmt1, int64_t channel_count1, enum AVSampleFormat fmt2, int64_t channel_count2)
Definition: ffplay.c:403
int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, const char *spec)
Check if the stream st contained in s is matched by the stream specifier spec.
Definition: utils.c:5029
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
static void packet_queue_destroy(PacketQueue *q)
Definition: ffplay.c:511
#define AVFMT_FLAG_GENPTS
Generate missing pts even if it requires parsing future frames.
Definition: avformat.h:1474
static int framedrop
Definition: ffplay.c:337
SDL_Texture * vis_texture
Definition: ffplay.c:272
void init_opts(void)
Initialize the cmdutils option system, in particular allocate the *_opts contexts.
Definition: cmdutils.c:85
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1410
AVPacket pkt
Definition: ffplay.c:113
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2078
int bytes_per_sec
Definition: ffplay.c:140
static AVFrame * frame
AVFormatContext * avformat_alloc_context(void)
Allocate an AVFormatContext.
Definition: options.c:144
int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt, const char *name, const char *args, void *opaque, AVFilterGraph *graph_ctx)
Create and add a filter instance into an existing graph.
const char data[16]
Definition: mxf.c:90
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
static int64_t audio_callback_time
Definition: ffplay.c:356
#define height
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static int64_t get_valid_channel_layout(int64_t channel_layout, int channels)
Definition: ffplay.c:414
int flags
Flags modifying the (de)muxer behaviour.
Definition: avformat.h:1473
static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial)
Definition: ffplay.c:538
static void sigterm_handler(int sig)
Definition: ffplay.c:1299
uint8_t * data
Definition: avcodec.h:1430
void parse_options(void *optctx, int argc, char **argv, const OptionDef *options, void(*parse_arg_function)(void *, const char *))
Definition: cmdutils.c:383
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int freq
Definition: ffplay.c:135
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:647
static int flags
Definition: log.c:55
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:4922
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
Frame queue[FRAME_QUEUE_SIZE]
Definition: ffplay.c:170
Definition: mxfdec.c:285
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define sp
Definition: regdef.h:63
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
Definition: ffplay.c:143
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2749
const AVClass * avformat_get_class(void)
Get the AVClass for AVFormatContext.
Definition: options.c:168
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:506
external API header
#define AV_NOSYNC_THRESHOLD
Definition: ffplay.c:86
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3829
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
void show_help_options(const OptionDef *options, const char *msg, int req_flags, int rej_flags, int alt_flags)
Print help for all options matching specified flags.
Definition: cmdutils.c:177
static int64_t duration
Definition: ffplay.c:328
AVRational sar
Definition: ffplay.c:164
unsigned int * stream_index
Definition: avformat.h:1264
#define av_log(a,...)
int(* callback)(void *)
Definition: avio.h:59
#define AV_OPT_FLAG_ENCODING_PARAM
a generic parameter which can be set by the user for muxing or encoding
Definition: opt.h:276
PacketQueue videoq
Definition: ffplay.c:285
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:601
AVDictionary ** setup_find_stream_info_opts(AVFormatContext *s, AVDictionary *codec_opts)
Setup AVCodecContext options for avformat_find_stream_info().
Definition: cmdutils.c:2112
AVDictionary * format_opts
Definition: cmdutils.c:73
static int realloc_texture(SDL_Texture **texture, Uint32 new_format, int new_width, int new_height, SDL_BlendMode blendmode, int init_texture)
Definition: ffplay.c:833
static int borderless
Definition: ffplay.c:323
static void * av_x_if_null(const void *p, const void *x)
Return x default pointer in case p is NULL.
Definition: avutil.h:308
#define SAMPLE_ARRAY_SIZE
Definition: ffplay.c:104
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static void toggle_mute(VideoState *is)
Definition: ffplay.c:1486
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:561
Main libavdevice API header.
int av_find_best_stream(AVFormatContext *ic, enum AVMediaType type, int wanted_stream_nb, int related_stream, AVCodec **decoder_ret, int flags)
Find the "best" stream in the file.
Definition: utils.c:4169
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3054
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3422
AVProgram * av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s)
Find the programs which belong to a given stream.
Definition: utils.c:4152
int audio_diff_avg_count
Definition: ffplay.c:240
int ytop
Definition: ffplay.c:292
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
AVDictionary * metadata
Metadata that applies to the whole file.
Definition: avformat.h:1580
#define AV_PIX_FMT_BGR32_1
Definition: pixfmt.h:345
int seek_req
Definition: ffplay.c:211
#define AV_PIX_FMT_NE(be, le)
Definition: pixfmt.h:339
#define FRAME_QUEUE_SIZE
Definition: ffplay.c:132
static void video_refresh(void *opaque, double *remaining_time)
Definition: ffplay.c:1555
int read_pause_return
Definition: ffplay.c:215
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3827
#define AVERROR(e)
Definition: error.h:43
static AVStream * audio_stream
static void frame_queue_next(FrameQueue *f)
Definition: ffplay.c:782
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
The libswresample context.
#define MIN_FRAMES
Definition: ffplay.c:67
static int decoder_start(Decoder *d, int(*fn)(void *), void *arg)
Definition: ffplay.c:2089
RDFTContext * rdft
Definition: ffplay.c:267
static int frame_queue_nb_remaining(FrameQueue *f)
Definition: ffplay.c:798
packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
Definition: pixfmt.h:91
#define EXTERNAL_CLOCK_MAX_FRAMES
Definition: ffplay.c:69
char * url
input or output URL.
Definition: avformat.h:1438
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:709
static int packet_queue_put_nullpacket(PacketQueue *q, int stream_index)
Definition: ffplay.c:465
static int autorotate
Definition: ffplay.c:351
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define CURSOR_HIDE_DELAY
Definition: ffplay.c:106
int av_read_play(AVFormatContext *s)
Start playing a network-based stream (e.g.
Definition: utils.c:4236
static double compute_target_delay(double delay, VideoState *is)
Definition: ffplay.c:1506
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3880
const char * arg
Definition: jacosubdec.c:66
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:558
static int packet_queue_put_private(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:422
AVChapter ** chapters
Definition: avformat.h:1570
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
uint16_t width
Definition: gdv.c:47
simple assert() macros that are a bit more flexible than ISO C assert().
static void stream_close(VideoState *is)
Definition: ffplay.c:1241
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
static int find_stream_info
Definition: ffplay.c:352
#define AV_PIX_FMT_0BGR32
Definition: pixfmt.h:347
int video_stream
Definition: ffplay.c:283
static int video_open(VideoState *is)
Definition: ffplay.c:1312
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
int * queue_serial
Definition: ffplay.c:150
static void init_clock(Clock *c, int *queue_serial)
Definition: ffplay.c:1387
int xpos
Definition: ffplay.c:270
int channels
Definition: ffplay.c:136
static void get_sdl_pix_fmt_and_blendmode(int format, Uint32 *sdl_pix_fmt, SDL_BlendMode *sdl_blendmode)
Definition: ffplay.c:887
static enum ShowMode show_mode
Definition: ffplay.c:339
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1260
#define FFMAX(a, b)
Definition: common.h:94
static void packet_queue_start(PacketQueue *q)
Definition: ffplay.c:529
static const OptionDef options[]
Definition: ffplay.c:3550
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
float FFTSample
Definition: avfft.h:35
static int dummy
Definition: ffplay.c:3548
#define fail()
Definition: checkasm.h:116
int8_t exp
Definition: eval.c:72
enum AVPixelFormat format
Definition: ffplay.c:368
double audio_clock
Definition: ffplay.c:235
int force_refresh
Definition: ffplay.c:207
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2224
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
AVDictionary * sws_dict
Definition: cmdutils.c:71
static int opt_sync(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3477
struct SwsContext * sws_getCachedContext(struct SwsContext *context, int srcW, int srcH, enum AVPixelFormat srcFormat, int dstW, int dstH, enum AVPixelFormat dstFormat, int flags, SwsFilter *srcFilter, SwsFilter *dstFilter, const double *param)
Check if context can be reused, otherwise reallocate a new one.
Definition: utils.c:2344
static void update_sample_display(VideoState *is, short *samples, int samples_size)
Definition: ffplay.c:2244
void av_rdft_calc(RDFTContext *s, FFTSample *data)
uint32_t end_display_time
Definition: avcodec.h:3863
static void decoder_destroy(Decoder *d)
Definition: ffplay.c:674
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3866
static int genpts
Definition: ffplay.c:330
static AVPacket flush_pkt
Definition: ffplay.c:358
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
int flip_v
Definition: ffplay.c:166
double frame_last_returned_time
Definition: ffplay.c:281
const AVFilter * avfilter_get_by_name(const char *name)
Get a filter definition matching the given name.
Definition: allfilters.c:428
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
Definition: mem.c:488
static const char * subtitle_codec_name
Definition: ffplay.c:341
static int subtitle_disable
Definition: ffplay.c:319
void av_format_inject_global_side_data(AVFormatContext *s)
This function will cause global side data to be injected in the next packet of each stream as well as...
Definition: utils.c:154
int max_size
Definition: ffplay.c:174
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1398
int step
Definition: ffplay.c:293
SDL_Thread * decoder_tid
Definition: ffplay.c:200
AVDictionary * opts
Definition: movenc.c:50
static SDL_Window * window
Definition: ffplay.c:362
const char program_name[]
program name, defined by the program for show_version().
Definition: ffplay.c:63
int av_read_pause(AVFormatContext *s)
Pause a network-based stream (e.g.
Definition: utils.c:4245
SDL_mutex * mutex
Definition: ffplay.c:125
static av_const double hypot(double x, double y)
Definition: libm.h:366
int audio_write_buf_size
Definition: ffplay.c:249
AVInputFormat * av_find_input_format(const char *short_name)
Find AVInputFormat based on the short name of the input format.
Definition: format.c:118
#define SAMPLE_QUEUE_SIZE
Definition: ffplay.c:131
int attribute_align_arg swr_convert(struct SwrContext *s, uint8_t *out_arg[SWR_CH_MAX], int out_count, const uint8_t *in_arg [SWR_CH_MAX], int in_count)
Definition: swresample.c:706
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
#define SUBPICTURE_QUEUE_SIZE
Definition: ffplay.c:130
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
struct MyAVPacketList * next
Definition: ffplay.c:114
#define AV_CH_LAYOUT_STEREO_DOWNMIX
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
#define FFMIN(a, b)
Definition: common.h:96
SDL_mutex * mutex
Definition: ffplay.c:177
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
int windex
Definition: ffplay.c:172
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:156
#define AV_OPT_SEARCH_CHILDREN
Search in possible children of the given object first.
Definition: opt.h:555
static int cursor_hidden
Definition: ffplay.c:345
static void decoder_init(Decoder *d, AVCodecContext *avctx, PacketQueue *queue, SDL_cond *empty_queue_cond)
Definition: ffplay.c:576
AVSubtitle sub
Definition: ffplay.c:156
int width
picture width / height.
Definition: avcodec.h:1690
uint8_t w
Definition: llviddspenc.c:38
int main(int argc, char **argv)
Definition: ffplay.c:3644
int height
Definition: ffplay.c:162
#define SDL_VOLUME_STEP
Definition: ffplay.c:77
static void show_usage(void)
Definition: ffplay.c:3601
int nb_packets
Definition: ffplay.c:120
static int opt_width(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3449
static int get_video_frame(VideoState *is, AVFrame *frame)
Definition: ffplay.c:1744
int frame_drops_late
Definition: ffplay.c:259
struct AudioParams audio_src
Definition: ffplay.c:252
static void refresh_loop_wait_event(VideoState *is, SDL_Event *event)
Definition: ffplay.c:3198
static void set_clock_speed(Clock *c, double speed)
Definition: ffplay.c:1381
#define SDL_AUDIO_MAX_CALLBACKS_PER_SEC
Definition: ffplay.c:74
AVFormatContext * ctx
Definition: movenc.c:48
struct SwrContext * swr_alloc_set_opts(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, int log_offset, void *log_ctx)
Allocate SwrContext if needed and set/reset common parameters.
Definition: swresample.c:59
static int fast
Definition: ffplay.c:329
void sws_freeContext(struct SwsContext *swsContext)
Free the swscaler context swsContext.
Definition: utils.c:2284
int last_i_start
Definition: ffplay.c:266
uint16_t format
Definition: avcodec.h:3861
#define OPT_INT64
Definition: cmdutils.h:170
MyAVPacketList * last_pkt
Definition: ffplay.c:119
Definition: avfft.h:72
void av_rdft_end(RDFTContext *s)
static void step_to_next_frame(VideoState *is)
Definition: ffplay.c:1498
int n
Definition: avisynth_c.h:684
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:65
static int audio_decode_frame(VideoState *is)
Decode one audio frame and return its uncompressed size.
Definition: ffplay.c:2312
RDFTContext * av_rdft_init(int nbits, enum RDFTransformType trans)
Set up a real FFT.
static int is_full_screen
Definition: ffplay.c:355
#define AV_SYNC_THRESHOLD_MAX
Definition: ffplay.c:82
static void fill_rectangle(int x, int y, int w, int h)
Definition: ffplay.c:822
static int stream_has_enough_packets(AVStream *st, int stream_id, PacketQueue *queue)
Definition: ffplay.c:2700
static int startup_volume
Definition: ffplay.c:324
#define SDL_AUDIO_MIN_BUFFER_SIZE
Definition: ffplay.c:72
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
if(ret< 0)
Definition: vf_mcdeint.c:279
static int packet_queue_init(PacketQueue *q)
Definition: ffplay.c:476
static int decoder_reorder_pts
Definition: ffplay.c:332
#define AUDIO_DIFF_AVG_NB
Definition: ffplay.c:97
static void set_clock(Clock *c, double pts, int serial)
Definition: ffplay.c:1375
int16_t sample_array[SAMPLE_ARRAY_SIZE]
Definition: ffplay.c:264
AVFilterContext * filter_ctx
filter context associated to this input/output
Definition: avfilter.h:1008
int paused
Definition: ffplay.c:149
static const char * input_filename
Definition: ffplay.c:311
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
#define av_log2
Definition: intmath.h:83
static Frame * frame_queue_peek_last(FrameQueue *f)
Definition: ffplay.c:735
void show_help_default(const char *opt, const char *arg)
Per-fftool specific help handler.
Definition: ffplay.c:3608
int64_t pos
Definition: ffplay.c:160
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
int last_audio_stream
Definition: ffplay.c:304
Stream structure.
Definition: avformat.h:873
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1946
const AVClass * avfilter_get_class(void)
Definition: avfilter.c:1620
A linked-list of the inputs/outputs of the filter chain.
Definition: avfilter.h:1003
#define SAMPLE_CORRECTION_PERCENT_MAX
Definition: ffplay.c:89
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
#define AV_PIX_FMT_BGR555
Definition: pixfmt.h:361
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:642
#define AV_PIX_FMT_BGR32
Definition: pixfmt.h:344
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:4934
char * filename
Definition: ffplay.c:291
static int screen_height
Definition: ffplay.c:316
static int opt_duration(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3498
sample_rate
static AVInputFormat * iformat
Definition: ffprobe.c:253
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
int64_t next_pts
Definition: ffplay.c:198
static int autoexit
Definition: ffplay.c:333
AVFrame * frame
Definition: ffplay.c:155
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int serial
Definition: ffplay.c:148
int uploaded
Definition: ffplay.c:165
enum AVMediaType codec_type
Definition: avcodec.h:1526
static void calculate_display_rect(SDL_Rect *rect, int scr_xleft, int scr_ytop, int scr_width, int scr_height, int pic_width, int pic_height, AVRational pic_sar)
Definition: ffplay.c:856
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
enum AVCodecID codec_id
Definition: avcodec.h:1528
static void do_exit(VideoState *is)
Definition: ffplay.c:1278
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int sample_rate
samples per second
Definition: avcodec.h:2173
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
AVIOContext * pb
I/O context.
Definition: avformat.h:1384
#define ss
#define AV_CODEC_FLAG2_FAST
Allow non spec compliant speedup tricks.
Definition: avcodec.h:901
static int loop
Definition: ffplay.c:336
int last_paused
Definition: ffplay.c:209
static int exit_on_keydown
Definition: ffplay.c:334
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
FFT functions.
main external API structure.
Definition: avcodec.h:1518
AVCodec * avcodec_find_decoder(enum AVCodecID id)
Find a registered decoder with a matching codec ID.
Definition: allcodecs.c:862
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:592
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:342
Decoder subdec
Definition: ffplay.c:229
av_cold void swr_free(SwrContext **ss)
Free the given SwrContext and set the pointer to NULL.
Definition: swresample.c:137
double max_frame_duration
Definition: ffplay.c:286
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1042
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:306
Clock vidclk
Definition: ffplay.c:220
int x
Definition: f_ebur128.c:91
packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
Definition: pixfmt.h:63
#define fp
Definition: regdef.h:44
int attribute_align_arg sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[], const int srcStride[], int srcSliceY, int srcSliceH, uint8_t *const dst[], const int dstStride[])
swscale wrapper, so we don&#39;t need to export the SwsContext.
Definition: swscale.c:753
#define AVFMT_NOGENSEARCH
Format does not allow to fall back on generic search.
Definition: avformat.h:476
static double vp_duration(VideoState *is, Frame *vp, Frame *nextvp)
Definition: ffplay.c:1536
static const char * window_title
Definition: ffplay.c:312
double pts
Definition: ffplay.c:144
static Frame * frame_queue_peek_next(FrameQueue *f)
Definition: ffplay.c:730
static int audio_thread(void *arg)
Definition: ffplay.c:2002
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
static int av_sync_type
Definition: ffplay.c:326
int pkt_serial
Definition: ffplay.c:192
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
static void frame_queue_signal(FrameQueue *f)
Definition: ffplay.c:718
static const uint16_t channel_layouts[7]
Definition: dca_lbr.c:113
static SDL_RendererInfo renderer_info
Definition: ffplay.c:364
int sample_rate
Sample rate of the audio data.
Definition: frame.h:391
int configure_filtergraph(FilterGraph *fg)
Definition: f_ebur128.c:91
int pad_idx
index of the filt_ctx pad to use for linking
Definition: avfilter.h:1011
PacketQueue audioq
Definition: ffplay.c:242
int packet_pending
Definition: ffplay.c:194
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
int64_t seek_pos
Definition: ffplay.c:213
Rational number (pair of numerator and denominator).
Definition: rational.h:58
#define AV_SYNC_THRESHOLD_MIN
Definition: ffplay.c:80
#define isnan(x)
Definition: libm.h:340
struct SwsContext * sub_convert_ctx
Definition: ffplay.c:288
#define AV_OPT_FLAG_DECODING_PARAM
a generic parameter which can be set by the user for demuxing or decoding
Definition: opt.h:277
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:165
#define OPT_STRING
Definition: cmdutils.h:164
static void video_audio_display(VideoState *s)
Definition: ffplay.c:1042
SDL_cond * cond
Definition: ffplay.c:126
#define EXTERNAL_CLOCK_SPEED_MAX
Definition: ffplay.c:93
#define AVSEEK_FLAG_BYTE
seeking based on position in bytes
Definition: avformat.h:2487
static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub)
Definition: ffplay.c:585
cl_device_type type
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:789
size_t av_strlcatf(char *dst, size_t size, const char *fmt,...)
Definition: avstring.c:101
static int is_realtime(AVFormatContext *s)
Definition: ffplay.c:2707
static void check_external_clock_speed(VideoState *is)
Definition: ffplay.c:1438
int queue_attachments_req
Definition: ffplay.c:210
unsigned nb_filters
Definition: avfilter.h:843
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:538
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:483
static void frame_queue_unref_item(Frame *vp)
Definition: ffplay.c:679
int error
contains the error code or 0 if no error happened
Definition: avio.h:245
misc parsing utilities
SDL_cond * empty_queue_cond
Definition: ffplay.c:195
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1767
int audio_stream
Definition: ffplay.c:231
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2528
double parse_number_or_die(const char *context, const char *numstr, int type, double min, double max)
Parse a string and return its corresponding value as a double.
Definition: cmdutils.c:144
static int stream_component_open(VideoState *is, int stream_index)
Definition: ffplay.c:2542
#define AV_PIX_FMT_BGR565
Definition: pixfmt.h:360
char * name
unique name for this input/output in the list
Definition: avfilter.h:1005
static int64_t cursor_last_shown
Definition: ffplay.c:344
static int frame_queue_init(FrameQueue *f, PacketQueue *pktq, int max_size, int keep_last)
Definition: ffplay.c:685
static int opt_frame_size(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3443
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:551
static int upload_texture(SDL_Texture **tex, AVFrame *frame, struct SwsContext **img_convert_ctx)
Definition: ffplay.c:905
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
AVCodec * avcodec_find_decoder_by_name(const char *name)
Find a registered decoder with the specified name.
Definition: allcodecs.c:890
static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, double duration, int64_t pos, int serial)
Definition: ffplay.c:1713
SDL_Texture * sub_texture
Definition: ffplay.c:273
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1447
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
int frame_drops_early
Definition: ffplay.c:258
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
static int synchronize_audio(VideoState *is, int nb_samples)
Definition: ffplay.c:2264
#define EXTERNAL_CLOCK_MIN_FRAMES
Definition: ffplay.c:68
SDL_Texture * vid_texture
Definition: ffplay.c:274
int sample_array_index
Definition: ffplay.c:265
SDL_cond * continue_read_thread
Definition: ffplay.c:306
int64_t start
Definition: avformat.h:1302
static void frame_queue_destory(FrameQueue *f)
Definition: ffplay.c:706
int sample_rate
Audio only.
Definition: avcodec.h:3994
uint8_t max_lowres
maximum value for lowres supported by the decoder
Definition: avcodec.h:3433
static int64_t frame_queue_last_pos(FrameQueue *f)
Definition: ffplay.c:804
#define OPT_BOOL
Definition: cmdutils.h:162
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:327
double speed
Definition: ffplay.c:147
static int exit_on_mousedown
Definition: ffplay.c:335
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
int
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:62
static int video_thread(void *arg)
Definition: ffplay.c:2100
#define OPT_INT
Definition: cmdutils.h:167
AVFilterInOut * avfilter_inout_alloc(void)
Allocate a single AVFilterInOut entry.
Definition: graphparser.c:198
static void set_default_window_size(int width, int height, AVRational sar)
Definition: ffplay.c:1304
AVDictionary * codec_opts
Definition: cmdutils.c:73
struct AudioParams audio_tgt
Definition: ffplay.c:256
sample_rates
uint8_t * audio_buf
Definition: ffplay.c:244
enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt)
Get the packed alternative form of the given sample format.
Definition: samplefmt.c:75
int muted
Definition: ffplay.c:251
static int display_disable
Definition: ffplay.c:322
static int video_disable
Definition: ffplay.c:318
int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
Read packets of a media file to get stream information.
Definition: utils.c:3550
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:912
signed 16 bits
Definition: samplefmt.h:61
int audio_buf_index
Definition: ffplay.c:248
uint8_t * audio_buf1
Definition: ffplay.c:245
static int opt_seek(void *optctx, const char *opt, const char *arg)
Definition: ffplay.c:3492
static double c[64]
int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags)
Convenience wrapper for av_dict_set that converts the value to a string and stores it...
Definition: dict.c:147
static int screen_width
Definition: ffplay.c:315
PacketQueue * pktq
Definition: ffplay.c:179
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:926
uint32_t start_display_time
Definition: avcodec.h:3862
int(* read_seek)(struct AVFormatContext *, int stream_index, int64_t timestamp, int flags)
Seek to a given timestamp relative to the frames in stream component stream_index.
Definition: avformat.h:739
static void update_volume(VideoState *is, int sign, double step)
Definition: ffplay.c:1491
FFTSample * rdft_data
Definition: ffplay.c:269
static void update_video_pts(VideoState *is, double pts, int64_t pos, int serial)
Definition: ffplay.c:1548
int audio_clock_serial
Definition: ffplay.c:236
packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
Definition: pixfmt.h:82
#define AV_PIX_FMT_RGB555
Definition: pixfmt.h:356
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1301
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
char * key
Definition: dict.h:86
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:90
PacketQueue subtitleq
Definition: ffplay.c:278
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1354
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4413
static int lowres
Definition: ffplay.c:331
int eof
Definition: ffplay.c:289
#define GROW_ARRAY(array, nb_elems)
Definition: cmdutils.h:622
static av_always_inline int diff(const uint32_t a, const uint32_t b)
#define av_free(p)
static int infinite_buffer
Definition: ffplay.c:338
#define AVFMT_NO_BYTE_SEEK
Format does not allow seeking by bytes.
Definition: avformat.h:477
double duration
Definition: ffplay.c:159
int pixels
Definition: avisynth_c.h:429
#define AVERROR_OPTION_NOT_FOUND
Option not found.
Definition: error.h:61
char * value
Definition: dict.h:87
int eof_reached
true if eof reached
Definition: avio.h:239
int len
#define AV_PIX_FMT_RGB32_1
Definition: pixfmt.h:343
int channels
number of audio channels
Definition: avcodec.h:2174
unsigned int audio_buf1_size
Definition: ffplay.c:247
int av_buffersink_get_channels(const AVFilterContext *ctx)
SDL_Thread * read_tid
Definition: ffplay.c:204
AVPacket pkt
Definition: ffplay.c:189
int frame_size
Definition: ffplay.c:139
void av_log_set_flags(int arg)
Definition: log.c:390
int64_t start_pts
Definition: ffplay.c:196
int abort_request
Definition: ffplay.c:206
static void decoder_abort(Decoder *d, FrameQueue *fq)
Definition: ffplay.c:813
static int packet_queue_put(PacketQueue *q, AVPacket *pkt)
Definition: ffplay.c:451
double last_updated
Definition: ffplay.c:146
Decoder viddec
Definition: ffplay.c:228
#define lrint
Definition: tablegen.h:53
AVDictionary * swr_opts
Definition: cmdutils.c:72
int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options)
Open an input stream and read the header.
Definition: utils.c:537
int height
Definition: ffplay.c:292
void show_help_children(const AVClass *class, int flags)
Show help for all options with given flags in class and all its children.
Definition: cmdutils.c:206
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1605
int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters, AVFilterInOut **inputs, AVFilterInOut **outputs, void *log_ctx)
Add a graph described by a string to a graph.
Definition: graphparser.c:538
#define AV_PIX_FMT_RGB565
Definition: pixfmt.h:355
int channels
Audio only.
Definition: avcodec.h:3990
An instance of a filter.
Definition: avfilter.h:338
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1429
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int64_t duration
Duration of the stream, in AV_TIME_BASE fractional seconds.
Definition: avformat.h:1457
int height
Definition: frame.h:276
FILE * out
Definition: movenc.c:54
static const char * video_codec_name
Definition: ffplay.c:342
#define MAX_QUEUE_SIZE
Definition: ffplay.c:66
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3077
PacketQueue * queue
Definition: ffplay.c:190
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:647
static Frame * frame_queue_peek_writable(FrameQueue *f)
Definition: ffplay.c:740
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int subtitle_thread(void *arg)
Definition: ffplay.c:2209
FrameQueue subpq
Definition: ffplay.c:224
int format
Definition: ffplay.c:163
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1020
static double get_master_clock(VideoState *is)
Definition: ffplay.c:1420
#define av_malloc_array(a, b)
int size
Definition: ffplay.c:173
int avio_feof(AVIOContext *s)
feof() equivalent for AVIOContext.
Definition: aviobuf.c:358
#define FF_QUIT_EVENT
Definition: ffplay.c:360
int xleft
Definition: ffplay.c:292
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2279
int stream_index
Definition: avcodec.h:1432
#define OPT_INPUT
Definition: cmdutils.h:181
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:902
int subtitle_stream
Definition: ffplay.c:276
unsigned int audio_buf_size
Definition: ffplay.c:246
int64_t seek_rel
Definition: ffplay.c:214
int realtime
Definition: ffplay.c:217
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:928
static void video_display(VideoState *is)
Definition: ffplay.c:1341
static const char * wanted_stream_spec[AVMEDIA_TYPE_NB]
Definition: ffplay.c:320
static int show_status
Definition: ffplay.c:325
static int compute_mod(int a, int b)
Definition: ffplay.c:1037
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
This structure stores compressed data.
Definition: avcodec.h:1407
int av_opt_set(void *obj, const char *name, const char *val, int search_flags)
Definition: opt.c:449
static void sdl_audio_callback(void *opaque, Uint8 *stream, int len)
Definition: ffplay.c:2425
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
static void sync_clock_to_slave(Clock *c, Clock *slave)
Definition: ffplay.c:1395
static void toggle_full_screen(VideoState *is)
Definition: ffplay.c:3180
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1423
double last_vis_time
Definition: ffplay.c:271
AVPacket attached_pic
For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet will contain the attached pictu...
Definition: avformat.h:955
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define av_unused
Definition: attributes.h:125
#define tb
Definition: regdef.h:68
av_cold int swr_init(struct SwrContext *s)
Initialize context after user parameters have been set.
Definition: swresample.c:152
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
#define AV_PIX_FMT_0RGB32
Definition: pixfmt.h:346
AVFormatContext * ic
Definition: ffplay.c:216
simple arithmetic expression evaluator
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
int audio_volume
Definition: ffplay.c:250
static Frame * frame_queue_peek_readable(FrameQueue *f)
Definition: ffplay.c:756