FFmpeg  4.3.8
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 0)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
240 void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  /* If we are initializing the system, utilize current heartbeat
258  PTS as the start time, and show until the following subpicture
259  is received. Otherwise, utilize the previous subpicture's end time
260  as the fall-back value. */
261  pts = ist->sub2video.initialize ?
262  heartbeat_pts : ist->sub2video.end_pts;
263  end_pts = INT64_MAX;
264  num_rects = 0;
265  }
266  if (sub2video_get_blank_frame(ist) < 0) {
268  "Impossible to get a blank canvas.\n");
269  return;
270  }
271  dst = frame->data [0];
272  dst_linesize = frame->linesize[0];
273  for (i = 0; i < num_rects; i++)
274  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
275  sub2video_push_ref(ist, pts);
276  ist->sub2video.end_pts = end_pts;
277  ist->sub2video.initialize = 0;
278 }
279 
281 {
282  InputFile *infile = input_files[ist->file_index];
283  int i, j, nb_reqs;
284  int64_t pts2;
285 
286  /* When a frame is read from a file, examine all sub2video streams in
287  the same file and send the sub2video frame again. Otherwise, decoded
288  video frames could be accumulating in the filter graph while a filter
289  (possibly overlay) is desperately waiting for a subtitle frame. */
290  for (i = 0; i < infile->nb_streams; i++) {
291  InputStream *ist2 = input_streams[infile->ist_index + i];
292  if (!ist2->sub2video.frame)
293  continue;
294  /* subtitles seem to be usually muxed ahead of other streams;
295  if not, subtracting a larger time here is necessary */
296  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
297  /* do not send the heartbeat frame if the subtitle is already ahead */
298  if (pts2 <= ist2->sub2video.last_pts)
299  continue;
300  if (pts2 >= ist2->sub2video.end_pts || ist2->sub2video.initialize)
301  /* if we have hit the end of the current displayed subpicture,
302  or if we need to initialize the system, update the
303  overlayed subpicture and its start/end times */
304  sub2video_update(ist2, pts2 + 1, NULL);
305  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
306  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
307  if (nb_reqs)
308  sub2video_push_ref(ist2, pts2);
309  }
310 }
311 
312 static void sub2video_flush(InputStream *ist)
313 {
314  int i;
315  int ret;
316 
317  if (ist->sub2video.end_pts < INT64_MAX)
318  sub2video_update(ist, INT64_MAX, NULL);
319  for (i = 0; i < ist->nb_filters; i++) {
320  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
321  if (ret != AVERROR_EOF && ret < 0)
322  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
323  }
324 }
325 
326 /* end of sub2video hack */
327 
328 static void term_exit_sigsafe(void)
329 {
330 #if HAVE_TERMIOS_H
331  if(restore_tty)
332  tcsetattr (0, TCSANOW, &oldtty);
333 #endif
334 }
335 
336 void term_exit(void)
337 {
338  av_log(NULL, AV_LOG_QUIET, "%s", "");
340 }
341 
342 static volatile int received_sigterm = 0;
343 static volatile int received_nb_signals = 0;
345 static volatile int ffmpeg_exited = 0;
346 static int main_return_code = 0;
347 
348 static void
350 {
351  int ret;
352  received_sigterm = sig;
355  if(received_nb_signals > 3) {
356  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
357  strlen("Received > 3 system signals, hard exiting\n"));
358  if (ret < 0) { /* Do nothing */ };
359  exit(123);
360  }
361 }
362 
363 #if HAVE_SETCONSOLECTRLHANDLER
364 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
365 {
366  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
367 
368  switch (fdwCtrlType)
369  {
370  case CTRL_C_EVENT:
371  case CTRL_BREAK_EVENT:
372  sigterm_handler(SIGINT);
373  return TRUE;
374 
375  case CTRL_CLOSE_EVENT:
376  case CTRL_LOGOFF_EVENT:
377  case CTRL_SHUTDOWN_EVENT:
378  sigterm_handler(SIGTERM);
379  /* Basically, with these 3 events, when we return from this method the
380  process is hard terminated, so stall as long as we need to
381  to try and let the main thread(s) clean up and gracefully terminate
382  (we have at most 5 seconds, but should be done far before that). */
383  while (!ffmpeg_exited) {
384  Sleep(0);
385  }
386  return TRUE;
387 
388  default:
389  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
390  return FALSE;
391  }
392 }
393 #endif
394 
395 void term_init(void)
396 {
397 #if HAVE_TERMIOS_H
399  struct termios tty;
400  if (tcgetattr (0, &tty) == 0) {
401  oldtty = tty;
402  restore_tty = 1;
403 
404  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
405  |INLCR|IGNCR|ICRNL|IXON);
406  tty.c_oflag |= OPOST;
407  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
408  tty.c_cflag &= ~(CSIZE|PARENB);
409  tty.c_cflag |= CS8;
410  tty.c_cc[VMIN] = 1;
411  tty.c_cc[VTIME] = 0;
412 
413  tcsetattr (0, TCSANOW, &tty);
414  }
415  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
416  }
417 #endif
418 
419  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
420  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
421 #ifdef SIGXCPU
422  signal(SIGXCPU, sigterm_handler);
423 #endif
424 #ifdef SIGPIPE
425  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
426 #endif
427 #if HAVE_SETCONSOLECTRLHANDLER
428  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
429 #endif
430 }
431 
432 /* read a key without blocking */
433 static int read_key(void)
434 {
435  unsigned char ch;
436 #if HAVE_TERMIOS_H
437  int n = 1;
438  struct timeval tv;
439  fd_set rfds;
440 
441  FD_ZERO(&rfds);
442  FD_SET(0, &rfds);
443  tv.tv_sec = 0;
444  tv.tv_usec = 0;
445  n = select(1, &rfds, NULL, NULL, &tv);
446  if (n > 0) {
447  n = read(0, &ch, 1);
448  if (n == 1)
449  return ch;
450 
451  return n;
452  }
453 #elif HAVE_KBHIT
454 # if HAVE_PEEKNAMEDPIPE
455  static int is_pipe;
456  static HANDLE input_handle;
457  DWORD dw, nchars;
458  if(!input_handle){
459  input_handle = GetStdHandle(STD_INPUT_HANDLE);
460  is_pipe = !GetConsoleMode(input_handle, &dw);
461  }
462 
463  if (is_pipe) {
464  /* When running under a GUI, you will end here. */
465  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
466  // input pipe may have been closed by the program that ran ffmpeg
467  return -1;
468  }
469  //Read it
470  if(nchars != 0) {
471  if (read(0, &ch, 1) == 1)
472  return ch;
473  return 0;
474  }else{
475  return -1;
476  }
477  }
478 # endif
479  if(kbhit())
480  return(getch());
481 #endif
482  return -1;
483 }
484 
485 static int decode_interrupt_cb(void *ctx)
486 {
488 }
489 
491 
492 static void ffmpeg_cleanup(int ret)
493 {
494  int i, j;
495 
496  if (do_benchmark) {
497  int maxrss = getmaxrss() / 1024;
498  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
499  }
500 
501  for (i = 0; i < nb_filtergraphs; i++) {
502  FilterGraph *fg = filtergraphs[i];
504  for (j = 0; j < fg->nb_inputs; j++) {
505  InputFilter *ifilter = fg->inputs[j];
506  struct InputStream *ist = ifilter->ist;
507 
508  while (av_fifo_size(ifilter->frame_queue)) {
509  AVFrame *frame;
510  av_fifo_generic_read(ifilter->frame_queue, &frame,
511  sizeof(frame), NULL);
512  av_frame_free(&frame);
513  }
514  av_fifo_freep(&ifilter->frame_queue);
515  if (ist->sub2video.sub_queue) {
516  while (av_fifo_size(ist->sub2video.sub_queue)) {
517  AVSubtitle sub;
519  &sub, sizeof(sub), NULL);
520  avsubtitle_free(&sub);
521  }
523  }
524  av_buffer_unref(&ifilter->hw_frames_ctx);
525  av_freep(&ifilter->name);
526  av_freep(&fg->inputs[j]);
527  }
528  av_freep(&fg->inputs);
529  for (j = 0; j < fg->nb_outputs; j++) {
530  OutputFilter *ofilter = fg->outputs[j];
531 
532  avfilter_inout_free(&ofilter->out_tmp);
533  av_freep(&ofilter->name);
534  av_freep(&ofilter->formats);
535  av_freep(&ofilter->channel_layouts);
536  av_freep(&ofilter->sample_rates);
537  av_freep(&fg->outputs[j]);
538  }
539  av_freep(&fg->outputs);
540  av_freep(&fg->graph_desc);
541 
542  av_freep(&filtergraphs[i]);
543  }
544  av_freep(&filtergraphs);
545 
547 
548  /* close files */
549  for (i = 0; i < nb_output_files; i++) {
550  OutputFile *of = output_files[i];
552  if (!of)
553  continue;
554  s = of->ctx;
555  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
556  avio_closep(&s->pb);
558  av_dict_free(&of->opts);
559 
560  av_freep(&output_files[i]);
561  }
562  for (i = 0; i < nb_output_streams; i++) {
563  OutputStream *ost = output_streams[i];
564 
565  if (!ost)
566  continue;
567 
568  av_bsf_free(&ost->bsf_ctx);
569 
571  av_frame_free(&ost->last_frame);
572  av_dict_free(&ost->encoder_opts);
573 
574  av_freep(&ost->forced_keyframes);
576  av_freep(&ost->avfilter);
577  av_freep(&ost->logfile_prefix);
578 
580  ost->audio_channels_mapped = 0;
581 
582  av_dict_free(&ost->sws_dict);
583  av_dict_free(&ost->swr_opts);
584 
587 
588  if (ost->muxing_queue) {
589  while (av_fifo_size(ost->muxing_queue)) {
590  AVPacket pkt;
591  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
592  av_packet_unref(&pkt);
593  }
595  }
596 
597  av_freep(&output_streams[i]);
598  }
599 #if HAVE_THREADS
600  free_input_threads();
601 #endif
602  for (i = 0; i < nb_input_files; i++) {
603  avformat_close_input(&input_files[i]->ctx);
604  av_freep(&input_files[i]);
605  }
606  for (i = 0; i < nb_input_streams; i++) {
607  InputStream *ist = input_streams[i];
608 
611  av_dict_free(&ist->decoder_opts);
614  av_freep(&ist->filters);
615  av_freep(&ist->hwaccel_device);
616  av_freep(&ist->dts_buffer);
617 
619 
620  av_freep(&input_streams[i]);
621  }
622 
623  if (vstats_file) {
624  if (fclose(vstats_file))
626  "Error closing vstats file, loss of information possible: %s\n",
627  av_err2str(AVERROR(errno)));
628  }
630 
631  av_freep(&input_streams);
632  av_freep(&input_files);
633  av_freep(&output_streams);
634  av_freep(&output_files);
635 
636  uninit_opts();
637 
639 
640  if (received_sigterm) {
641  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
642  (int) received_sigterm);
643  } else if (ret && atomic_load(&transcode_init_done)) {
644  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
645  }
646  term_exit();
647  ffmpeg_exited = 1;
648 }
649 
651 {
652  AVDictionaryEntry *t = NULL;
653 
654  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
656  }
657 }
658 
660 {
662  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
663  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
664  exit_program(1);
665  }
666 }
667 
668 static void abort_codec_experimental(AVCodec *c, int encoder)
669 {
670  exit_program(1);
671 }
672 
673 static void update_benchmark(const char *fmt, ...)
674 {
675  if (do_benchmark_all) {
677  va_list va;
678  char buf[1024];
679 
680  if (fmt) {
681  va_start(va, fmt);
682  vsnprintf(buf, sizeof(buf), fmt, va);
683  va_end(va);
685  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
686  t.user_usec - current_time.user_usec,
687  t.sys_usec - current_time.sys_usec,
688  t.real_usec - current_time.real_usec, buf);
689  }
690  current_time = t;
691  }
692 }
693 
695 {
696  int i;
697  for (i = 0; i < nb_output_streams; i++) {
698  OutputStream *ost2 = output_streams[i];
699  ost2->finished |= ost == ost2 ? this_stream : others;
700  }
701 }
702 
703 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
704 {
705  AVFormatContext *s = of->ctx;
706  AVStream *st = ost->st;
707  int ret;
708 
709  /*
710  * Audio encoders may split the packets -- #frames in != #packets out.
711  * But there is no reordering, so we can limit the number of output packets
712  * by simply dropping them here.
713  * Counting encoded video frames needs to be done separately because of
714  * reordering, see do_video_out().
715  * Do not count the packet when unqueued because it has been counted when queued.
716  */
717  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
718  if (ost->frame_number >= ost->max_frames) {
719  av_packet_unref(pkt);
720  return;
721  }
722  ost->frame_number++;
723  }
724 
725  if (!of->header_written) {
726  AVPacket tmp_pkt = {0};
727  /* the muxer is not initialized yet, buffer the packet */
728  if (!av_fifo_space(ost->muxing_queue)) {
729  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
730  ost->max_muxing_queue_size);
731  if (new_size <= av_fifo_size(ost->muxing_queue)) {
733  "Too many packets buffered for output stream %d:%d.\n",
734  ost->file_index, ost->st->index);
735  exit_program(1);
736  }
737  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
738  if (ret < 0)
739  exit_program(1);
740  }
741  ret = av_packet_make_refcounted(pkt);
742  if (ret < 0)
743  exit_program(1);
744  av_packet_move_ref(&tmp_pkt, pkt);
745  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
746  return;
747  }
748 
751  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
752 
753  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
754  int i;
756  NULL);
757  ost->quality = sd ? AV_RL32(sd) : -1;
758  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
759 
760  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
761  if (sd && i < sd[5])
762  ost->error[i] = AV_RL64(sd + 8 + 8*i);
763  else
764  ost->error[i] = -1;
765  }
766 
767  if (ost->frame_rate.num && ost->is_cfr) {
768  if (pkt->duration > 0)
769  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
770  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
771  ost->mux_timebase);
772  }
773  }
774 
775  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
776 
777  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
778  if (pkt->dts != AV_NOPTS_VALUE &&
779  pkt->pts != AV_NOPTS_VALUE &&
780  pkt->dts > pkt->pts) {
781  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
782  pkt->dts, pkt->pts,
783  ost->file_index, ost->st->index);
784  pkt->pts =
785  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
786  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
787  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
788  }
790  pkt->dts != AV_NOPTS_VALUE &&
791  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
792  ost->last_mux_dts != AV_NOPTS_VALUE) {
794  if (pkt->dts < max) {
795  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
796  if (exit_on_error)
797  loglevel = AV_LOG_ERROR;
798  av_log(s, loglevel, "Non-monotonous DTS in output stream "
799  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
800  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
801  if (exit_on_error) {
802  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
803  exit_program(1);
804  }
805  av_log(s, loglevel, "changing to %"PRId64". This may result "
806  "in incorrect timestamps in the output file.\n",
807  max);
808  if (pkt->pts >= pkt->dts)
809  pkt->pts = FFMAX(pkt->pts, max);
810  pkt->dts = max;
811  }
812  }
813  }
814  ost->last_mux_dts = pkt->dts;
815 
816  ost->data_size += pkt->size;
817  ost->packets_written++;
818 
819  pkt->stream_index = ost->index;
820 
821  if (debug_ts) {
822  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
823  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
825  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
826  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
827  pkt->size
828  );
829  }
830 
831  ret = av_interleaved_write_frame(s, pkt);
832  if (ret < 0) {
833  print_error("av_interleaved_write_frame()", ret);
834  main_return_code = 1;
836  }
837  av_packet_unref(pkt);
838 }
839 
841 {
842  OutputFile *of = output_files[ost->file_index];
843 
844  ost->finished |= ENCODER_FINISHED;
845  if (of->shortest) {
847  of->recording_time = FFMIN(of->recording_time, end);
848  }
849 }
850 
851 /*
852  * Send a single packet to the output, applying any bitstream filters
853  * associated with the output stream. This may result in any number
854  * of packets actually being written, depending on what bitstream
855  * filters are applied. The supplied packet is consumed and will be
856  * blank (as if newly-allocated) when this function returns.
857  *
858  * If eof is set, instead indicate EOF to all bitstream filters and
859  * therefore flush any delayed packets to the output. A blank packet
860  * must be supplied in this case.
861  */
863  OutputStream *ost, int eof)
864 {
865  int ret = 0;
866 
867  /* apply the output bitstream filters */
868  if (ost->bsf_ctx) {
869  ret = av_bsf_send_packet(ost->bsf_ctx, eof ? NULL : pkt);
870  if (ret < 0)
871  goto finish;
872  while ((ret = av_bsf_receive_packet(ost->bsf_ctx, pkt)) >= 0)
873  write_packet(of, pkt, ost, 0);
874  if (ret == AVERROR(EAGAIN))
875  ret = 0;
876  } else if (!eof)
877  write_packet(of, pkt, ost, 0);
878 
879 finish:
880  if (ret < 0 && ret != AVERROR_EOF) {
881  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
882  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
883  if(exit_on_error)
884  exit_program(1);
885  }
886 }
887 
889 {
890  OutputFile *of = output_files[ost->file_index];
891 
892  if (of->recording_time != INT64_MAX &&
894  AV_TIME_BASE_Q) >= 0) {
895  close_output_stream(ost);
896  return 0;
897  }
898  return 1;
899 }
900 
902  AVFrame *frame)
903 {
904  AVCodecContext *enc = ost->enc_ctx;
905  AVPacket pkt;
906  int ret;
907 
908  av_init_packet(&pkt);
909  pkt.data = NULL;
910  pkt.size = 0;
911 
912  if (!check_recording_time(ost))
913  return;
914 
915  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
916  frame->pts = ost->sync_opts;
917  ost->sync_opts = frame->pts + frame->nb_samples;
918  ost->samples_encoded += frame->nb_samples;
919  ost->frames_encoded++;
920 
921  av_assert0(pkt.size || !pkt.data);
923  if (debug_ts) {
924  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
925  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
926  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
927  enc->time_base.num, enc->time_base.den);
928  }
929 
930  ret = avcodec_send_frame(enc, frame);
931  if (ret < 0)
932  goto error;
933 
934  while (1) {
935  ret = avcodec_receive_packet(enc, &pkt);
936  if (ret == AVERROR(EAGAIN))
937  break;
938  if (ret < 0)
939  goto error;
940 
941  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
942 
943  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
944 
945  if (debug_ts) {
946  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
947  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
948  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
949  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
950  }
951 
952  output_packet(of, &pkt, ost, 0);
953  }
954 
955  return;
956 error:
957  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
958  exit_program(1);
959 }
960 
961 static void do_subtitle_out(OutputFile *of,
962  OutputStream *ost,
963  AVSubtitle *sub)
964 {
965  int subtitle_out_max_size = 1024 * 1024;
966  int subtitle_out_size, nb, i;
967  AVCodecContext *enc;
968  AVPacket pkt;
969  int64_t pts;
970 
971  if (sub->pts == AV_NOPTS_VALUE) {
972  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
973  if (exit_on_error)
974  exit_program(1);
975  return;
976  }
977 
978  enc = ost->enc_ctx;
979 
980  if (!subtitle_out) {
981  subtitle_out = av_malloc(subtitle_out_max_size);
982  if (!subtitle_out) {
983  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
984  exit_program(1);
985  }
986  }
987 
988  /* Note: DVB subtitle need one packet to draw them and one other
989  packet to clear them */
990  /* XXX: signal it in the codec context ? */
992  nb = 2;
993  else
994  nb = 1;
995 
996  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
997  pts = sub->pts;
998  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
999  pts -= output_files[ost->file_index]->start_time;
1000  for (i = 0; i < nb; i++) {
1001  unsigned save_num_rects = sub->num_rects;
1002 
1003  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1004  if (!check_recording_time(ost))
1005  return;
1006 
1007  sub->pts = pts;
1008  // start_display_time is required to be 0
1009  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1010  sub->end_display_time -= sub->start_display_time;
1011  sub->start_display_time = 0;
1012  if (i == 1)
1013  sub->num_rects = 0;
1014 
1015  ost->frames_encoded++;
1016 
1017  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1018  subtitle_out_max_size, sub);
1019  if (i == 1)
1020  sub->num_rects = save_num_rects;
1021  if (subtitle_out_size < 0) {
1022  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1023  exit_program(1);
1024  }
1025 
1026  av_init_packet(&pkt);
1027  pkt.data = subtitle_out;
1028  pkt.size = subtitle_out_size;
1029  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1030  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1031  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1032  /* XXX: the pts correction is handled here. Maybe handling
1033  it in the codec would be better */
1034  if (i == 0)
1035  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1036  else
1037  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1038  }
1039  pkt.dts = pkt.pts;
1040  output_packet(of, &pkt, ost, 0);
1041  }
1042 }
1043 
1044 static void do_video_out(OutputFile *of,
1045  OutputStream *ost,
1046  AVFrame *next_picture,
1047  double sync_ipts)
1048 {
1049  int ret, format_video_sync;
1050  AVPacket pkt;
1051  AVCodecContext *enc = ost->enc_ctx;
1052  AVCodecParameters *mux_par = ost->st->codecpar;
1053  AVRational frame_rate;
1054  int nb_frames, nb0_frames, i;
1055  double delta, delta0;
1056  double duration = 0;
1057  int frame_size = 0;
1058  InputStream *ist = NULL;
1060 
1061  if (ost->source_index >= 0)
1062  ist = input_streams[ost->source_index];
1063 
1064  frame_rate = av_buffersink_get_frame_rate(filter);
1065  if (frame_rate.num > 0 && frame_rate.den > 0)
1066  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1067 
1068  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1069  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1070 
1071  if (!ost->filters_script &&
1072  !ost->filters &&
1073  (nb_filtergraphs == 0 || !filtergraphs[0]->graph_desc) &&
1074  next_picture &&
1075  ist &&
1076  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1077  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1078  }
1079 
1080  if (!next_picture) {
1081  //end, flushing
1082  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1083  ost->last_nb0_frames[1],
1084  ost->last_nb0_frames[2]);
1085  } else {
1086  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1087  delta = delta0 + duration;
1088 
1089  /* by default, we output a single frame */
1090  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1091  nb_frames = 1;
1092 
1093  format_video_sync = video_sync_method;
1094  if (format_video_sync == VSYNC_AUTO) {
1095  if(!strcmp(of->ctx->oformat->name, "avi")) {
1096  format_video_sync = VSYNC_VFR;
1097  } else
1098  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1099  if ( ist
1100  && format_video_sync == VSYNC_CFR
1101  && input_files[ist->file_index]->ctx->nb_streams == 1
1102  && input_files[ist->file_index]->input_ts_offset == 0) {
1103  format_video_sync = VSYNC_VSCFR;
1104  }
1105  if (format_video_sync == VSYNC_CFR && copy_ts) {
1106  format_video_sync = VSYNC_VSCFR;
1107  }
1108  }
1109  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1110 
1111  if (delta0 < 0 &&
1112  delta > 0 &&
1113  format_video_sync != VSYNC_PASSTHROUGH &&
1114  format_video_sync != VSYNC_DROP) {
1115  if (delta0 < -0.6) {
1116  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1117  } else
1118  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1119  sync_ipts = ost->sync_opts;
1120  duration += delta0;
1121  delta0 = 0;
1122  }
1123 
1124  switch (format_video_sync) {
1125  case VSYNC_VSCFR:
1126  if (ost->frame_number == 0 && delta0 >= 0.5) {
1127  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1128  delta = duration;
1129  delta0 = 0;
1130  ost->sync_opts = llrint(sync_ipts);
1131  }
1132  case VSYNC_CFR:
1133  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1134  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1135  nb_frames = 0;
1136  } else if (delta < -1.1)
1137  nb_frames = 0;
1138  else if (delta > 1.1) {
1139  nb_frames = lrintf(delta);
1140  if (delta0 > 1.1)
1141  nb0_frames = llrintf(delta0 - 0.6);
1142  }
1143  break;
1144  case VSYNC_VFR:
1145  if (delta <= -0.6)
1146  nb_frames = 0;
1147  else if (delta > 0.6)
1148  ost->sync_opts = llrint(sync_ipts);
1149  break;
1150  case VSYNC_DROP:
1151  case VSYNC_PASSTHROUGH:
1152  ost->sync_opts = llrint(sync_ipts);
1153  break;
1154  default:
1155  av_assert0(0);
1156  }
1157  }
1158 
1159  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1160  nb0_frames = FFMIN(nb0_frames, nb_frames);
1161 
1162  memmove(ost->last_nb0_frames + 1,
1163  ost->last_nb0_frames,
1164  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1165  ost->last_nb0_frames[0] = nb0_frames;
1166 
1167  if (nb0_frames == 0 && ost->last_dropped) {
1168  nb_frames_drop++;
1170  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1171  ost->frame_number, ost->st->index, ost->last_frame->pts);
1172  }
1173  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1174  if (nb_frames > dts_error_threshold * 30) {
1175  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1176  nb_frames_drop++;
1177  return;
1178  }
1179  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1180  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1181  if (nb_frames_dup > dup_warning) {
1182  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1183  dup_warning *= 10;
1184  }
1185  }
1186  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1187 
1188  /* duplicates frame if needed */
1189  for (i = 0; i < nb_frames; i++) {
1190  AVFrame *in_picture;
1191  int forced_keyframe = 0;
1192  double pts_time;
1193  av_init_packet(&pkt);
1194  pkt.data = NULL;
1195  pkt.size = 0;
1196 
1197  if (i < nb0_frames && ost->last_frame) {
1198  in_picture = ost->last_frame;
1199  } else
1200  in_picture = next_picture;
1201 
1202  if (!in_picture)
1203  return;
1204 
1205  in_picture->pts = ost->sync_opts;
1206 
1207  if (!check_recording_time(ost))
1208  return;
1209 
1211  ost->top_field_first >= 0)
1212  in_picture->top_field_first = !!ost->top_field_first;
1213 
1214  if (in_picture->interlaced_frame) {
1215  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1216  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1217  else
1218  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1219  } else
1220  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1221 
1222  in_picture->quality = enc->global_quality;
1223  in_picture->pict_type = 0;
1224 
1225  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1226  in_picture->pts != AV_NOPTS_VALUE)
1227  ost->forced_kf_ref_pts = in_picture->pts;
1228 
1229  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1230  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1231  if (ost->forced_kf_index < ost->forced_kf_count &&
1232  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1233  ost->forced_kf_index++;
1234  forced_keyframe = 1;
1235  } else if (ost->forced_keyframes_pexpr) {
1236  double res;
1237  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1240  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1246  res);
1247  if (res) {
1248  forced_keyframe = 1;
1254  }
1255 
1257  } else if ( ost->forced_keyframes
1258  && !strncmp(ost->forced_keyframes, "source", 6)
1259  && in_picture->key_frame==1
1260  && !i) {
1261  forced_keyframe = 1;
1262  }
1263 
1264  if (forced_keyframe) {
1265  in_picture->pict_type = AV_PICTURE_TYPE_I;
1266  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1267  }
1268 
1270  if (debug_ts) {
1271  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1272  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1273  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1274  enc->time_base.num, enc->time_base.den);
1275  }
1276 
1277  ost->frames_encoded++;
1278 
1279  ret = avcodec_send_frame(enc, in_picture);
1280  if (ret < 0)
1281  goto error;
1282  // Make sure Closed Captions will not be duplicated
1284 
1285  while (1) {
1286  ret = avcodec_receive_packet(enc, &pkt);
1287  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1288  if (ret == AVERROR(EAGAIN))
1289  break;
1290  if (ret < 0)
1291  goto error;
1292 
1293  if (debug_ts) {
1294  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1295  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1296  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1297  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1298  }
1299 
1300  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1301  pkt.pts = ost->sync_opts;
1302 
1303  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1304 
1305  if (debug_ts) {
1306  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1307  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1308  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1309  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1310  }
1311 
1312  frame_size = pkt.size;
1313  output_packet(of, &pkt, ost, 0);
1314 
1315  /* if two pass, output log */
1316  if (ost->logfile && enc->stats_out) {
1317  fprintf(ost->logfile, "%s", enc->stats_out);
1318  }
1319  }
1320  ost->sync_opts++;
1321  /*
1322  * For video, number of frames in == number of packets out.
1323  * But there may be reordering, so we can't throw away frames on encoder
1324  * flush, we need to limit them here, before they go into encoder.
1325  */
1326  ost->frame_number++;
1327 
1328  if (vstats_filename && frame_size)
1329  do_video_stats(ost, frame_size);
1330  }
1331 
1332  if (!ost->last_frame)
1333  ost->last_frame = av_frame_alloc();
1334  av_frame_unref(ost->last_frame);
1335  if (next_picture && ost->last_frame)
1336  av_frame_ref(ost->last_frame, next_picture);
1337  else
1338  av_frame_free(&ost->last_frame);
1339 
1340  return;
1341 error:
1342  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1343  exit_program(1);
1344 }
1345 
1346 static double psnr(double d)
1347 {
1348  return -10.0 * log10(d);
1349 }
1350 
1352 {
1353  AVCodecContext *enc;
1354  int frame_number;
1355  double ti1, bitrate, avg_bitrate;
1356 
1357  /* this is executed just the first time do_video_stats is called */
1358  if (!vstats_file) {
1359  vstats_file = fopen(vstats_filename, "w");
1360  if (!vstats_file) {
1361  perror("fopen");
1362  exit_program(1);
1363  }
1364  }
1365 
1366  enc = ost->enc_ctx;
1367  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1368  frame_number = ost->st->nb_frames;
1369  if (vstats_version <= 1) {
1370  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1371  ost->quality / (float)FF_QP2LAMBDA);
1372  } else {
1373  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1374  ost->quality / (float)FF_QP2LAMBDA);
1375  }
1376 
1377  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1378  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1379 
1380  fprintf(vstats_file,"f_size= %6d ", frame_size);
1381  /* compute pts value */
1382  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1383  if (ti1 < 0.01)
1384  ti1 = 0.01;
1385 
1386  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1387  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1388  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1389  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1390  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1391  }
1392 }
1393 
1394 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1395 
1397 {
1398  OutputFile *of = output_files[ost->file_index];
1399  int i;
1400 
1402 
1403  if (of->shortest) {
1404  for (i = 0; i < of->ctx->nb_streams; i++)
1405  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1406  }
1407 }
1408 
1409 /**
1410  * Get and encode new output from any of the filtergraphs, without causing
1411  * activity.
1412  *
1413  * @return 0 for success, <0 for severe errors
1414  */
1415 static int reap_filters(int flush)
1416 {
1417  AVFrame *filtered_frame = NULL;
1418  int i;
1419 
1420  /* Reap all buffers present in the buffer sinks */
1421  for (i = 0; i < nb_output_streams; i++) {
1422  OutputStream *ost = output_streams[i];
1423  OutputFile *of = output_files[ost->file_index];
1425  AVCodecContext *enc = ost->enc_ctx;
1426  int ret = 0;
1427 
1428  if (!ost->filter || !ost->filter->graph->graph)
1429  continue;
1430  filter = ost->filter->filter;
1431 
1432  if (!ost->initialized) {
1433  char error[1024] = "";
1434  ret = init_output_stream(ost, error, sizeof(error));
1435  if (ret < 0) {
1436  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1437  ost->file_index, ost->index, error);
1438  exit_program(1);
1439  }
1440  }
1441 
1442  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1443  return AVERROR(ENOMEM);
1444  }
1445  filtered_frame = ost->filtered_frame;
1446 
1447  while (1) {
1448  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1449  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1451  if (ret < 0) {
1452  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1454  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1455  } else if (flush && ret == AVERROR_EOF) {
1457  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1458  }
1459  break;
1460  }
1461  if (ost->finished) {
1462  av_frame_unref(filtered_frame);
1463  continue;
1464  }
1465  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1466  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1467  AVRational filter_tb = av_buffersink_get_time_base(filter);
1468  AVRational tb = enc->time_base;
1469  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1470 
1471  tb.den <<= extra_bits;
1472  float_pts =
1473  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1474  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1475  float_pts /= 1 << extra_bits;
1476  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1477  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1478 
1479  filtered_frame->pts =
1480  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1481  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1482  }
1483 
1484  switch (av_buffersink_get_type(filter)) {
1485  case AVMEDIA_TYPE_VIDEO:
1486  if (!ost->frame_aspect_ratio.num)
1487  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1488 
1489  if (debug_ts) {
1490  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1491  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1492  float_pts,
1493  enc->time_base.num, enc->time_base.den);
1494  }
1495 
1496  do_video_out(of, ost, filtered_frame, float_pts);
1497  break;
1498  case AVMEDIA_TYPE_AUDIO:
1499  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1500  enc->channels != filtered_frame->channels) {
1502  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1503  break;
1504  }
1505  do_audio_out(of, ost, filtered_frame);
1506  break;
1507  default:
1508  // TODO support subtitle filters
1509  av_assert0(0);
1510  }
1511 
1512  av_frame_unref(filtered_frame);
1513  }
1514  }
1515 
1516  return 0;
1517 }
1518 
1519 static void print_final_stats(int64_t total_size)
1520 {
1521  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1522  uint64_t subtitle_size = 0;
1523  uint64_t data_size = 0;
1524  float percent = -1.0;
1525  int i, j;
1526  int pass1_used = 1;
1527 
1528  for (i = 0; i < nb_output_streams; i++) {
1529  OutputStream *ost = output_streams[i];
1530  switch (ost->enc_ctx->codec_type) {
1531  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1532  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1533  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1534  default: other_size += ost->data_size; break;
1535  }
1536  extra_size += ost->enc_ctx->extradata_size;
1537  data_size += ost->data_size;
1540  pass1_used = 0;
1541  }
1542 
1543  if (data_size && total_size>0 && total_size >= data_size)
1544  percent = 100.0 * (total_size - data_size) / data_size;
1545 
1546  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1547  video_size / 1024.0,
1548  audio_size / 1024.0,
1549  subtitle_size / 1024.0,
1550  other_size / 1024.0,
1551  extra_size / 1024.0);
1552  if (percent >= 0.0)
1553  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1554  else
1555  av_log(NULL, AV_LOG_INFO, "unknown");
1556  av_log(NULL, AV_LOG_INFO, "\n");
1557 
1558  /* print verbose per-stream stats */
1559  for (i = 0; i < nb_input_files; i++) {
1560  InputFile *f = input_files[i];
1561  uint64_t total_packets = 0, total_size = 0;
1562 
1563  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1564  i, f->ctx->url);
1565 
1566  for (j = 0; j < f->nb_streams; j++) {
1567  InputStream *ist = input_streams[f->ist_index + j];
1568  enum AVMediaType type = ist->dec_ctx->codec_type;
1569 
1570  total_size += ist->data_size;
1571  total_packets += ist->nb_packets;
1572 
1573  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1574  i, j, media_type_string(type));
1575  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1576  ist->nb_packets, ist->data_size);
1577 
1578  if (ist->decoding_needed) {
1579  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1580  ist->frames_decoded);
1581  if (type == AVMEDIA_TYPE_AUDIO)
1582  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1583  av_log(NULL, AV_LOG_VERBOSE, "; ");
1584  }
1585 
1586  av_log(NULL, AV_LOG_VERBOSE, "\n");
1587  }
1588 
1589  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1590  total_packets, total_size);
1591  }
1592 
1593  for (i = 0; i < nb_output_files; i++) {
1594  OutputFile *of = output_files[i];
1595  uint64_t total_packets = 0, total_size = 0;
1596 
1597  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1598  i, of->ctx->url);
1599 
1600  for (j = 0; j < of->ctx->nb_streams; j++) {
1601  OutputStream *ost = output_streams[of->ost_index + j];
1602  enum AVMediaType type = ost->enc_ctx->codec_type;
1603 
1604  total_size += ost->data_size;
1605  total_packets += ost->packets_written;
1606 
1607  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1608  i, j, media_type_string(type));
1609  if (ost->encoding_needed) {
1610  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1611  ost->frames_encoded);
1612  if (type == AVMEDIA_TYPE_AUDIO)
1613  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1614  av_log(NULL, AV_LOG_VERBOSE, "; ");
1615  }
1616 
1617  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1618  ost->packets_written, ost->data_size);
1619 
1620  av_log(NULL, AV_LOG_VERBOSE, "\n");
1621  }
1622 
1623  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1624  total_packets, total_size);
1625  }
1626  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1627  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1628  if (pass1_used) {
1629  av_log(NULL, AV_LOG_WARNING, "\n");
1630  } else {
1631  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1632  }
1633  }
1634 }
1635 
1636 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1637 {
1638  AVBPrint buf, buf_script;
1639  OutputStream *ost;
1640  AVFormatContext *oc;
1641  int64_t total_size;
1642  AVCodecContext *enc;
1643  int frame_number, vid, i;
1644  double bitrate;
1645  double speed;
1646  int64_t pts = INT64_MIN + 1;
1647  static int64_t last_time = -1;
1648  static int qp_histogram[52];
1649  int hours, mins, secs, us;
1650  const char *hours_sign;
1651  int ret;
1652  float t;
1653 
1654  if (!print_stats && !is_last_report && !progress_avio)
1655  return;
1656 
1657  if (!is_last_report) {
1658  if (last_time == -1) {
1659  last_time = cur_time;
1660  return;
1661  }
1662  if ((cur_time - last_time) < 500000)
1663  return;
1664  last_time = cur_time;
1665  }
1666 
1667  t = (cur_time-timer_start) / 1000000.0;
1668 
1669 
1670  oc = output_files[0]->ctx;
1671 
1672  total_size = avio_size(oc->pb);
1673  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1674  total_size = avio_tell(oc->pb);
1675 
1676  vid = 0;
1678  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1679  for (i = 0; i < nb_output_streams; i++) {
1680  float q = -1;
1681  ost = output_streams[i];
1682  enc = ost->enc_ctx;
1683  if (!ost->stream_copy)
1684  q = ost->quality / (float) FF_QP2LAMBDA;
1685 
1686  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1687  av_bprintf(&buf, "q=%2.1f ", q);
1688  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1689  ost->file_index, ost->index, q);
1690  }
1691  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1692  float fps;
1693 
1694  frame_number = ost->frame_number;
1695  fps = t > 1 ? frame_number / t : 0;
1696  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1697  frame_number, fps < 9.95, fps, q);
1698  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1699  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1700  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1701  ost->file_index, ost->index, q);
1702  if (is_last_report)
1703  av_bprintf(&buf, "L");
1704  if (qp_hist) {
1705  int j;
1706  int qp = lrintf(q);
1707  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1708  qp_histogram[qp]++;
1709  for (j = 0; j < 32; j++)
1710  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1711  }
1712 
1713  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1714  int j;
1715  double error, error_sum = 0;
1716  double scale, scale_sum = 0;
1717  double p;
1718  char type[3] = { 'Y','U','V' };
1719  av_bprintf(&buf, "PSNR=");
1720  for (j = 0; j < 3; j++) {
1721  if (is_last_report) {
1722  error = enc->error[j];
1723  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1724  } else {
1725  error = ost->error[j];
1726  scale = enc->width * enc->height * 255.0 * 255.0;
1727  }
1728  if (j)
1729  scale /= 4;
1730  error_sum += error;
1731  scale_sum += scale;
1732  p = psnr(error / scale);
1733  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1734  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1735  ost->file_index, ost->index, type[j] | 32, p);
1736  }
1737  p = psnr(error_sum / scale_sum);
1738  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1739  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1740  ost->file_index, ost->index, p);
1741  }
1742  vid = 1;
1743  }
1744  /* compute min output value */
1746  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1747  ost->st->time_base, AV_TIME_BASE_Q));
1748  if (is_last_report)
1749  nb_frames_drop += ost->last_dropped;
1750  }
1751 
1752  secs = FFABS(pts) / AV_TIME_BASE;
1753  us = FFABS(pts) % AV_TIME_BASE;
1754  mins = secs / 60;
1755  secs %= 60;
1756  hours = mins / 60;
1757  mins %= 60;
1758  hours_sign = (pts < 0) ? "-" : "";
1759 
1760  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1761  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1762 
1763  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1764  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1765  if (pts == AV_NOPTS_VALUE) {
1766  av_bprintf(&buf, "N/A ");
1767  } else {
1768  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1769  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1770  }
1771 
1772  if (bitrate < 0) {
1773  av_bprintf(&buf, "bitrate=N/A");
1774  av_bprintf(&buf_script, "bitrate=N/A\n");
1775  }else{
1776  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1777  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1778  }
1779 
1780  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1781  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1782  if (pts == AV_NOPTS_VALUE) {
1783  av_bprintf(&buf_script, "out_time_us=N/A\n");
1784  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1785  av_bprintf(&buf_script, "out_time=N/A\n");
1786  } else {
1787  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1788  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1789  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1790  hours_sign, hours, mins, secs, us);
1791  }
1792 
1794  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1795  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1796  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1797 
1798  if (speed < 0) {
1799  av_bprintf(&buf, " speed=N/A");
1800  av_bprintf(&buf_script, "speed=N/A\n");
1801  } else {
1802  av_bprintf(&buf, " speed=%4.3gx", speed);
1803  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1804  }
1805 
1806  if (print_stats || is_last_report) {
1807  const char end = is_last_report ? '\n' : '\r';
1808  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1809  fprintf(stderr, "%s %c", buf.str, end);
1810  } else
1811  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1812 
1813  fflush(stderr);
1814  }
1815  av_bprint_finalize(&buf, NULL);
1816 
1817  if (progress_avio) {
1818  av_bprintf(&buf_script, "progress=%s\n",
1819  is_last_report ? "end" : "continue");
1820  avio_write(progress_avio, buf_script.str,
1821  FFMIN(buf_script.len, buf_script.size - 1));
1822  avio_flush(progress_avio);
1823  av_bprint_finalize(&buf_script, NULL);
1824  if (is_last_report) {
1825  if ((ret = avio_closep(&progress_avio)) < 0)
1827  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1828  }
1829  }
1830 
1831  if (is_last_report)
1832  print_final_stats(total_size);
1833 }
1834 
1836 {
1837  // We never got any input. Set a fake format, which will
1838  // come from libavformat.
1839  ifilter->format = par->format;
1840  ifilter->sample_rate = par->sample_rate;
1841  ifilter->channels = par->channels;
1842  ifilter->channel_layout = par->channel_layout;
1843  ifilter->width = par->width;
1844  ifilter->height = par->height;
1845  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1846 }
1847 
1848 static void flush_encoders(void)
1849 {
1850  int i, ret;
1851 
1852  for (i = 0; i < nb_output_streams; i++) {
1853  OutputStream *ost = output_streams[i];
1854  AVCodecContext *enc = ost->enc_ctx;
1855  OutputFile *of = output_files[ost->file_index];
1856 
1857  if (!ost->encoding_needed)
1858  continue;
1859 
1860  // Try to enable encoding with no input frames.
1861  // Maybe we should just let encoding fail instead.
1862  if (!ost->initialized) {
1863  FilterGraph *fg = ost->filter->graph;
1864  char error[1024] = "";
1865 
1867  "Finishing stream %d:%d without any data written to it.\n",
1868  ost->file_index, ost->st->index);
1869 
1870  if (ost->filter && !fg->graph) {
1871  int x;
1872  for (x = 0; x < fg->nb_inputs; x++) {
1873  InputFilter *ifilter = fg->inputs[x];
1874  if (ifilter->format < 0)
1875  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1876  }
1877 
1879  continue;
1880 
1881  ret = configure_filtergraph(fg);
1882  if (ret < 0) {
1883  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1884  exit_program(1);
1885  }
1886 
1887  finish_output_stream(ost);
1888  }
1889 
1890  ret = init_output_stream(ost, error, sizeof(error));
1891  if (ret < 0) {
1892  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1893  ost->file_index, ost->index, error);
1894  exit_program(1);
1895  }
1896  }
1897 
1899  continue;
1900 
1901  for (;;) {
1902  const char *desc = NULL;
1903  AVPacket pkt;
1904  int pkt_size;
1905 
1906  switch (enc->codec_type) {
1907  case AVMEDIA_TYPE_AUDIO:
1908  desc = "audio";
1909  break;
1910  case AVMEDIA_TYPE_VIDEO:
1911  desc = "video";
1912  break;
1913  default:
1914  av_assert0(0);
1915  }
1916 
1917  av_init_packet(&pkt);
1918  pkt.data = NULL;
1919  pkt.size = 0;
1920 
1922 
1923  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1924  ret = avcodec_send_frame(enc, NULL);
1925  if (ret < 0) {
1926  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1927  desc,
1928  av_err2str(ret));
1929  exit_program(1);
1930  }
1931  }
1932 
1933  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1934  if (ret < 0 && ret != AVERROR_EOF) {
1935  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1936  desc,
1937  av_err2str(ret));
1938  exit_program(1);
1939  }
1940  if (ost->logfile && enc->stats_out) {
1941  fprintf(ost->logfile, "%s", enc->stats_out);
1942  }
1943  if (ret == AVERROR_EOF) {
1944  output_packet(of, &pkt, ost, 1);
1945  break;
1946  }
1947  if (ost->finished & MUXER_FINISHED) {
1948  av_packet_unref(&pkt);
1949  continue;
1950  }
1951  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1952  pkt_size = pkt.size;
1953  output_packet(of, &pkt, ost, 0);
1955  do_video_stats(ost, pkt_size);
1956  }
1957  }
1958  }
1959 }
1960 
1961 /*
1962  * Check whether a packet from ist should be written into ost at this time
1963  */
1965 {
1966  OutputFile *of = output_files[ost->file_index];
1967  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1968 
1969  if (ost->source_index != ist_index)
1970  return 0;
1971 
1972  if (ost->finished)
1973  return 0;
1974 
1975  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1976  return 0;
1977 
1978  return 1;
1979 }
1980 
1981 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
1982 {
1983  OutputFile *of = output_files[ost->file_index];
1984  InputFile *f = input_files [ist->file_index];
1985  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1986  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
1987  AVPacket opkt;
1988 
1989  // EOF: flush output bitstream filters.
1990  if (!pkt) {
1991  av_init_packet(&opkt);
1992  opkt.data = NULL;
1993  opkt.size = 0;
1994  output_packet(of, &opkt, ost, 1);
1995  return;
1996  }
1997 
1998  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2000  return;
2001 
2002  if (!ost->frame_number && !ost->copy_prior_start) {
2003  int64_t comp_start = start_time;
2004  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2005  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2006  if (pkt->pts == AV_NOPTS_VALUE ?
2007  ist->pts < comp_start :
2008  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2009  return;
2010  }
2011 
2012  if (of->recording_time != INT64_MAX &&
2013  ist->pts >= of->recording_time + start_time) {
2014  close_output_stream(ost);
2015  return;
2016  }
2017 
2018  if (f->recording_time != INT64_MAX) {
2019  start_time = f->ctx->start_time;
2020  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2021  start_time += f->start_time;
2022  if (ist->pts >= f->recording_time + start_time) {
2023  close_output_stream(ost);
2024  return;
2025  }
2026  }
2027 
2028  /* force the input stream PTS */
2029  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2030  ost->sync_opts++;
2031 
2032  if (av_packet_ref(&opkt, pkt) < 0)
2033  exit_program(1);
2034 
2035  if (pkt->pts != AV_NOPTS_VALUE)
2036  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2037 
2038  if (pkt->dts == AV_NOPTS_VALUE) {
2039  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2040  } else if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO) {
2042  if(!duration)
2043  duration = ist->dec_ctx->frame_size;
2044  opkt.dts = av_rescale_delta(ist->st->time_base, pkt->dts,
2045  (AVRational){1, ist->dec_ctx->sample_rate}, duration,
2047  /* dts will be set immediately afterwards to what pts is now */
2048  opkt.pts = opkt.dts - ost_tb_start_time;
2049  } else
2050  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2051  opkt.dts -= ost_tb_start_time;
2052 
2053  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2054 
2055  output_packet(of, &opkt, ost, 0);
2056 }
2057 
2059 {
2060  AVCodecContext *dec = ist->dec_ctx;
2061 
2062  if (!dec->channel_layout) {
2063  char layout_name[256];
2064 
2065  if (dec->channels > ist->guess_layout_max)
2066  return 0;
2068  if (!dec->channel_layout)
2069  return 0;
2070  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2071  dec->channels, dec->channel_layout);
2072  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2073  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2074  }
2075  return 1;
2076 }
2077 
2078 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2079 {
2080  if (*got_output || ret<0)
2081  decode_error_stat[ret<0] ++;
2082 
2083  if (ret < 0 && exit_on_error)
2084  exit_program(1);
2085 
2086  if (*got_output && ist) {
2089  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2090  if (exit_on_error)
2091  exit_program(1);
2092  }
2093  }
2094 }
2095 
2096 // Filters can be configured only if the formats of all inputs are known.
2098 {
2099  int i;
2100  for (i = 0; i < fg->nb_inputs; i++) {
2101  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2102  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2103  return 0;
2104  }
2105  return 1;
2106 }
2107 
2109 {
2110  FilterGraph *fg = ifilter->graph;
2111  int need_reinit, ret, i;
2112 
2113  /* determine if the parameters for this input changed */
2114  need_reinit = ifilter->format != frame->format;
2115 
2116  switch (ifilter->ist->st->codecpar->codec_type) {
2117  case AVMEDIA_TYPE_AUDIO:
2118  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2119  ifilter->channels != frame->channels ||
2120  ifilter->channel_layout != frame->channel_layout;
2121  break;
2122  case AVMEDIA_TYPE_VIDEO:
2123  need_reinit |= ifilter->width != frame->width ||
2124  ifilter->height != frame->height;
2125  break;
2126  }
2127 
2128  if (!ifilter->ist->reinit_filters && fg->graph)
2129  need_reinit = 0;
2130 
2131  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2132  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2133  need_reinit = 1;
2134 
2135  if (need_reinit) {
2136  ret = ifilter_parameters_from_frame(ifilter, frame);
2137  if (ret < 0)
2138  return ret;
2139  }
2140 
2141  /* (re)init the graph if possible, otherwise buffer the frame and return */
2142  if (need_reinit || !fg->graph) {
2143  for (i = 0; i < fg->nb_inputs; i++) {
2144  if (!ifilter_has_all_input_formats(fg)) {
2145  AVFrame *tmp = av_frame_clone(frame);
2146  if (!tmp)
2147  return AVERROR(ENOMEM);
2148  av_frame_unref(frame);
2149 
2150  if (!av_fifo_space(ifilter->frame_queue)) {
2151  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2152  if (ret < 0) {
2153  av_frame_free(&tmp);
2154  return ret;
2155  }
2156  }
2157  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2158  return 0;
2159  }
2160  }
2161 
2162  ret = reap_filters(1);
2163  if (ret < 0 && ret != AVERROR_EOF) {
2164  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2165  return ret;
2166  }
2167 
2168  ret = configure_filtergraph(fg);
2169  if (ret < 0) {
2170  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2171  return ret;
2172  }
2173  }
2174 
2176  if (ret < 0) {
2177  if (ret != AVERROR_EOF)
2178  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2179  return ret;
2180  }
2181 
2182  return 0;
2183 }
2184 
2186 {
2187  int ret;
2188 
2189  ifilter->eof = 1;
2190 
2191  if (ifilter->filter) {
2192  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2193  if (ret < 0)
2194  return ret;
2195  } else {
2196  // the filtergraph was never configured
2197  if (ifilter->format < 0)
2198  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2199  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2200  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2201  return AVERROR_INVALIDDATA;
2202  }
2203  }
2204 
2205  return 0;
2206 }
2207 
2208 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2209 // There is the following difference: if you got a frame, you must call
2210 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2211 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2212 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2213 {
2214  int ret;
2215 
2216  *got_frame = 0;
2217 
2218  if (pkt) {
2219  ret = avcodec_send_packet(avctx, pkt);
2220  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2221  // decoded frames with avcodec_receive_frame() until done.
2222  if (ret < 0 && ret != AVERROR_EOF)
2223  return ret;
2224  }
2225 
2226  ret = avcodec_receive_frame(avctx, frame);
2227  if (ret < 0 && ret != AVERROR(EAGAIN))
2228  return ret;
2229  if (ret >= 0)
2230  *got_frame = 1;
2231 
2232  return 0;
2233 }
2234 
2236 {
2237  int i, ret;
2238  AVFrame *f;
2239 
2240  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2241  for (i = 0; i < ist->nb_filters; i++) {
2242  if (i < ist->nb_filters - 1) {
2243  f = ist->filter_frame;
2244  ret = av_frame_ref(f, decoded_frame);
2245  if (ret < 0)
2246  break;
2247  } else
2248  f = decoded_frame;
2249  ret = ifilter_send_frame(ist->filters[i], f);
2250  if (ret == AVERROR_EOF)
2251  ret = 0; /* ignore */
2252  if (ret < 0) {
2254  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2255  break;
2256  }
2257  }
2258  return ret;
2259 }
2260 
2262  int *decode_failed)
2263 {
2265  AVCodecContext *avctx = ist->dec_ctx;
2266  int ret, err = 0;
2267  AVRational decoded_frame_tb;
2268 
2269  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2270  return AVERROR(ENOMEM);
2271  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2272  return AVERROR(ENOMEM);
2273  decoded_frame = ist->decoded_frame;
2274 
2276  ret = decode(avctx, decoded_frame, got_output, pkt);
2277  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2278  if (ret < 0)
2279  *decode_failed = 1;
2280 
2281  if (ret >= 0 && avctx->sample_rate <= 0) {
2282  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2283  ret = AVERROR_INVALIDDATA;
2284  }
2285 
2286  if (ret != AVERROR_EOF)
2287  check_decode_result(ist, got_output, ret);
2288 
2289  if (!*got_output || ret < 0)
2290  return ret;
2291 
2292  ist->samples_decoded += decoded_frame->nb_samples;
2293  ist->frames_decoded++;
2294 
2295  /* increment next_dts to use for the case where the input stream does not
2296  have timestamps or there are multiple frames in the packet */
2297  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2298  avctx->sample_rate;
2299  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2300  avctx->sample_rate;
2301 
2302  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2303  decoded_frame_tb = ist->st->time_base;
2304  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2305  decoded_frame->pts = pkt->pts;
2306  decoded_frame_tb = ist->st->time_base;
2307  }else {
2308  decoded_frame->pts = ist->dts;
2309  decoded_frame_tb = AV_TIME_BASE_Q;
2310  }
2311  if (decoded_frame->pts != AV_NOPTS_VALUE)
2312  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2313  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2314  (AVRational){1, avctx->sample_rate});
2315  ist->nb_samples = decoded_frame->nb_samples;
2316  err = send_frame_to_filters(ist, decoded_frame);
2317 
2319  av_frame_unref(decoded_frame);
2320  return err < 0 ? err : ret;
2321 }
2322 
2323 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2324  int *decode_failed)
2325 {
2327  int i, ret = 0, err = 0;
2328  int64_t best_effort_timestamp;
2330  AVPacket avpkt;
2331 
2332  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2333  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2334  // skip the packet.
2335  if (!eof && pkt && pkt->size == 0)
2336  return 0;
2337 
2338  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2339  return AVERROR(ENOMEM);
2340  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2341  return AVERROR(ENOMEM);
2342  decoded_frame = ist->decoded_frame;
2343  if (ist->dts != AV_NOPTS_VALUE)
2344  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2345  if (pkt) {
2346  avpkt = *pkt;
2347  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2348  }
2349 
2350  // The old code used to set dts on the drain packet, which does not work
2351  // with the new API anymore.
2352  if (eof) {
2353  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2354  if (!new)
2355  return AVERROR(ENOMEM);
2356  ist->dts_buffer = new;
2357  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2358  }
2359 
2361  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2362  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2363  if (ret < 0)
2364  *decode_failed = 1;
2365 
2366  // The following line may be required in some cases where there is no parser
2367  // or the parser does not has_b_frames correctly
2368  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2369  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2370  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2371  } else
2373  "video_delay is larger in decoder than demuxer %d > %d.\n"
2374  "If you want to help, upload a sample "
2375  "of this file to https://streams.videolan.org/upload/ "
2376  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2377  ist->dec_ctx->has_b_frames,
2378  ist->st->codecpar->video_delay);
2379  }
2380 
2381  if (ret != AVERROR_EOF)
2382  check_decode_result(ist, got_output, ret);
2383 
2384  if (*got_output && ret >= 0) {
2385  if (ist->dec_ctx->width != decoded_frame->width ||
2386  ist->dec_ctx->height != decoded_frame->height ||
2387  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2388  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2389  decoded_frame->width,
2390  decoded_frame->height,
2391  decoded_frame->format,
2392  ist->dec_ctx->width,
2393  ist->dec_ctx->height,
2394  ist->dec_ctx->pix_fmt);
2395  }
2396  }
2397 
2398  if (!*got_output || ret < 0)
2399  return ret;
2400 
2401  if(ist->top_field_first>=0)
2402  decoded_frame->top_field_first = ist->top_field_first;
2403 
2404  ist->frames_decoded++;
2405 
2406  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2407  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2408  if (err < 0)
2409  goto fail;
2410  }
2411  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2412 
2413  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2414  *duration_pts = decoded_frame->pkt_duration;
2415 
2416  if (ist->framerate.num)
2417  best_effort_timestamp = ist->cfr_next_pts++;
2418 
2419  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2420  best_effort_timestamp = ist->dts_buffer[0];
2421 
2422  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2423  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2424  ist->nb_dts_buffer--;
2425  }
2426 
2427  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2428  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2429 
2430  if (ts != AV_NOPTS_VALUE)
2431  ist->next_pts = ist->pts = ts;
2432  }
2433 
2434  if (debug_ts) {
2435  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2436  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2437  ist->st->index, av_ts2str(decoded_frame->pts),
2438  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2439  best_effort_timestamp,
2440  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2441  decoded_frame->key_frame, decoded_frame->pict_type,
2442  ist->st->time_base.num, ist->st->time_base.den);
2443  }
2444 
2445  if (ist->st->sample_aspect_ratio.num)
2446  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2447 
2448  err = send_frame_to_filters(ist, decoded_frame);
2449 
2450 fail:
2452  av_frame_unref(decoded_frame);
2453  return err < 0 ? err : ret;
2454 }
2455 
2457  int *decode_failed)
2458 {
2460  int free_sub = 1;
2461  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2462  &subtitle, got_output, pkt);
2463 
2464  check_decode_result(NULL, got_output, ret);
2465 
2466  if (ret < 0 || !*got_output) {
2467  *decode_failed = 1;
2468  if (!pkt->size)
2469  sub2video_flush(ist);
2470  return ret;
2471  }
2472 
2473  if (ist->fix_sub_duration) {
2474  int end = 1;
2475  if (ist->prev_sub.got_output) {
2476  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2477  1000, AV_TIME_BASE);
2478  if (end < ist->prev_sub.subtitle.end_display_time) {
2479  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2480  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2482  end <= 0 ? ", dropping it" : "");
2484  }
2485  }
2486  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2487  FFSWAP(int, ret, ist->prev_sub.ret);
2488  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2489  if (end <= 0)
2490  goto out;
2491  }
2492 
2493  if (!*got_output)
2494  return ret;
2495 
2496  if (ist->sub2video.frame) {
2497  sub2video_update(ist, INT64_MIN, &subtitle);
2498  } else if (ist->nb_filters) {
2499  if (!ist->sub2video.sub_queue)
2500  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2501  if (!ist->sub2video.sub_queue)
2502  exit_program(1);
2503  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2505  if (ret < 0)
2506  exit_program(1);
2507  }
2508  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2509  free_sub = 0;
2510  }
2511 
2512  if (!subtitle.num_rects)
2513  goto out;
2514 
2515  ist->frames_decoded++;
2516 
2517  for (i = 0; i < nb_output_streams; i++) {
2518  OutputStream *ost = output_streams[i];
2519 
2520  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2521  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2522  continue;
2523 
2524  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2525  }
2526 
2527 out:
2528  if (free_sub)
2529  avsubtitle_free(&subtitle);
2530  return ret;
2531 }
2532 
2534 {
2535  int i, ret;
2536  /* TODO keep pts also in stream time base to avoid converting back */
2539 
2540  for (i = 0; i < ist->nb_filters; i++) {
2541  ret = ifilter_send_eof(ist->filters[i], pts);
2542  if (ret < 0)
2543  return ret;
2544  }
2545  return 0;
2546 }
2547 
2548 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2549 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2550 {
2551  int ret = 0, i;
2552  int repeating = 0;
2553  int eof_reached = 0;
2554 
2555  AVPacket avpkt;
2556  if (!ist->saw_first_ts) {
2557  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2558  ist->pts = 0;
2559  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2560  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2561  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2562  }
2563  ist->saw_first_ts = 1;
2564  }
2565 
2566  if (ist->next_dts == AV_NOPTS_VALUE)
2567  ist->next_dts = ist->dts;
2568  if (ist->next_pts == AV_NOPTS_VALUE)
2569  ist->next_pts = ist->pts;
2570 
2571  if (!pkt) {
2572  /* EOF handling */
2573  av_init_packet(&avpkt);
2574  avpkt.data = NULL;
2575  avpkt.size = 0;
2576  } else {
2577  avpkt = *pkt;
2578  }
2579 
2580  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2581  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2582  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2583  ist->next_pts = ist->pts = ist->dts;
2584  }
2585 
2586  // while we have more to decode or while the decoder did output something on EOF
2587  while (ist->decoding_needed) {
2588  int64_t duration_dts = 0;
2589  int64_t duration_pts = 0;
2590  int got_output = 0;
2591  int decode_failed = 0;
2592 
2593  ist->pts = ist->next_pts;
2594  ist->dts = ist->next_dts;
2595 
2596  switch (ist->dec_ctx->codec_type) {
2597  case AVMEDIA_TYPE_AUDIO:
2598  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2599  &decode_failed);
2600  break;
2601  case AVMEDIA_TYPE_VIDEO:
2602  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2603  &decode_failed);
2604  if (!repeating || !pkt || got_output) {
2605  if (pkt && pkt->duration) {
2606  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2607  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2609  duration_dts = ((int64_t)AV_TIME_BASE *
2610  ist->dec_ctx->framerate.den * ticks) /
2612  }
2613 
2614  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2615  ist->next_dts += duration_dts;
2616  }else
2617  ist->next_dts = AV_NOPTS_VALUE;
2618  }
2619 
2620  if (got_output) {
2621  if (duration_pts > 0) {
2622  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2623  } else {
2624  ist->next_pts += duration_dts;
2625  }
2626  }
2627  break;
2628  case AVMEDIA_TYPE_SUBTITLE:
2629  if (repeating)
2630  break;
2631  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2632  if (!pkt && ret >= 0)
2633  ret = AVERROR_EOF;
2634  break;
2635  default:
2636  return -1;
2637  }
2638 
2639  if (ret == AVERROR_EOF) {
2640  eof_reached = 1;
2641  break;
2642  }
2643 
2644  if (ret < 0) {
2645  if (decode_failed) {
2646  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2647  ist->file_index, ist->st->index, av_err2str(ret));
2648  } else {
2649  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2650  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2651  }
2652  if (!decode_failed || exit_on_error)
2653  exit_program(1);
2654  break;
2655  }
2656 
2657  if (got_output)
2658  ist->got_output = 1;
2659 
2660  if (!got_output)
2661  break;
2662 
2663  // During draining, we might get multiple output frames in this loop.
2664  // ffmpeg.c does not drain the filter chain on configuration changes,
2665  // which means if we send multiple frames at once to the filters, and
2666  // one of those frames changes configuration, the buffered frames will
2667  // be lost. This can upset certain FATE tests.
2668  // Decode only 1 frame per call on EOF to appease these FATE tests.
2669  // The ideal solution would be to rewrite decoding to use the new
2670  // decoding API in a better way.
2671  if (!pkt)
2672  break;
2673 
2674  repeating = 1;
2675  }
2676 
2677  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2678  /* except when looping we need to flush but not to send an EOF */
2679  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2680  int ret = send_filter_eof(ist);
2681  if (ret < 0) {
2682  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2683  exit_program(1);
2684  }
2685  }
2686 
2687  /* handle stream copy */
2688  if (!ist->decoding_needed && pkt) {
2689  ist->dts = ist->next_dts;
2690  switch (ist->dec_ctx->codec_type) {
2691  case AVMEDIA_TYPE_AUDIO:
2692  av_assert1(pkt->duration >= 0);
2693  if (ist->dec_ctx->sample_rate) {
2694  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2695  ist->dec_ctx->sample_rate;
2696  } else {
2697  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2698  }
2699  break;
2700  case AVMEDIA_TYPE_VIDEO:
2701  if (ist->framerate.num) {
2702  // TODO: Remove work-around for c99-to-c89 issue 7
2703  AVRational time_base_q = AV_TIME_BASE_Q;
2704  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2705  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2706  } else if (pkt->duration) {
2707  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2708  } else if(ist->dec_ctx->framerate.num != 0) {
2709  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2710  ist->next_dts += ((int64_t)AV_TIME_BASE *
2711  ist->dec_ctx->framerate.den * ticks) /
2713  }
2714  break;
2715  }
2716  ist->pts = ist->dts;
2717  ist->next_pts = ist->next_dts;
2718  }
2719  for (i = 0; i < nb_output_streams; i++) {
2720  OutputStream *ost = output_streams[i];
2721 
2722  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2723  continue;
2724 
2725  do_streamcopy(ist, ost, pkt);
2726  }
2727 
2728  return !eof_reached;
2729 }
2730 
2731 static void print_sdp(void)
2732 {
2733  char sdp[16384];
2734  int i;
2735  int j;
2736  AVIOContext *sdp_pb;
2737  AVFormatContext **avc;
2738 
2739  for (i = 0; i < nb_output_files; i++) {
2740  if (!output_files[i]->header_written)
2741  return;
2742  }
2743 
2744  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2745  if (!avc)
2746  exit_program(1);
2747  for (i = 0, j = 0; i < nb_output_files; i++) {
2748  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2749  avc[j] = output_files[i]->ctx;
2750  j++;
2751  }
2752  }
2753 
2754  if (!j)
2755  goto fail;
2756 
2757  av_sdp_create(avc, j, sdp, sizeof(sdp));
2758 
2759  if (!sdp_filename) {
2760  printf("SDP:\n%s\n", sdp);
2761  fflush(stdout);
2762  } else {
2763  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2764  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2765  } else {
2766  avio_print(sdp_pb, sdp);
2767  avio_closep(&sdp_pb);
2769  }
2770  }
2771 
2772 fail:
2773  av_freep(&avc);
2774 }
2775 
2777 {
2778  InputStream *ist = s->opaque;
2779  const enum AVPixelFormat *p;
2780  int ret;
2781 
2782  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2784  const AVCodecHWConfig *config = NULL;
2785  int i;
2786 
2787  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2788  break;
2789 
2790  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2791  ist->hwaccel_id == HWACCEL_AUTO) {
2792  for (i = 0;; i++) {
2793  config = avcodec_get_hw_config(s->codec, i);
2794  if (!config)
2795  break;
2796  if (!(config->methods &
2798  continue;
2799  if (config->pix_fmt == *p)
2800  break;
2801  }
2802  }
2803  if (config) {
2804  if (config->device_type != ist->hwaccel_device_type) {
2805  // Different hwaccel offered, ignore.
2806  continue;
2807  }
2808 
2809  ret = hwaccel_decode_init(s);
2810  if (ret < 0) {
2811  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2813  "%s hwaccel requested for input stream #%d:%d, "
2814  "but cannot be initialized.\n",
2816  ist->file_index, ist->st->index);
2817  return AV_PIX_FMT_NONE;
2818  }
2819  continue;
2820  }
2821  } else {
2822  const HWAccel *hwaccel = NULL;
2823  int i;
2824  for (i = 0; hwaccels[i].name; i++) {
2825  if (hwaccels[i].pix_fmt == *p) {
2826  hwaccel = &hwaccels[i];
2827  break;
2828  }
2829  }
2830  if (!hwaccel) {
2831  // No hwaccel supporting this pixfmt.
2832  continue;
2833  }
2834  if (hwaccel->id != ist->hwaccel_id) {
2835  // Does not match requested hwaccel.
2836  continue;
2837  }
2838 
2839  ret = hwaccel->init(s);
2840  if (ret < 0) {
2842  "%s hwaccel requested for input stream #%d:%d, "
2843  "but cannot be initialized.\n", hwaccel->name,
2844  ist->file_index, ist->st->index);
2845  return AV_PIX_FMT_NONE;
2846  }
2847  }
2848 
2849  if (ist->hw_frames_ctx) {
2851  if (!s->hw_frames_ctx)
2852  return AV_PIX_FMT_NONE;
2853  }
2854 
2855  ist->hwaccel_pix_fmt = *p;
2856  break;
2857  }
2858 
2859  return *p;
2860 }
2861 
2863 {
2864  InputStream *ist = s->opaque;
2865 
2866  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2867  return ist->hwaccel_get_buffer(s, frame, flags);
2868 
2869  return avcodec_default_get_buffer2(s, frame, flags);
2870 }
2871 
2872 static int init_input_stream(int ist_index, char *error, int error_len)
2873 {
2874  int ret;
2875  InputStream *ist = input_streams[ist_index];
2876 
2877  if (ist->decoding_needed) {
2878  AVCodec *codec = ist->dec;
2879  if (!codec) {
2880  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2881  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2882  return AVERROR(EINVAL);
2883  }
2884 
2885  ist->dec_ctx->opaque = ist;
2886  ist->dec_ctx->get_format = get_format;
2887  ist->dec_ctx->get_buffer2 = get_buffer;
2888  ist->dec_ctx->thread_safe_callbacks = 1;
2889 
2890  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2891  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2892  (ist->decoding_needed & DECODING_FOR_OST)) {
2893  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2895  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2896  }
2897 
2898  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2899 
2900  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2901  * audio, and video decoders such as cuvid or mediacodec */
2902  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2903 
2904  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2905  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2906  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2908  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2909 
2910  ret = hw_device_setup_for_decode(ist);
2911  if (ret < 0) {
2912  snprintf(error, error_len, "Device setup failed for "
2913  "decoder on input stream #%d:%d : %s",
2914  ist->file_index, ist->st->index, av_err2str(ret));
2915  return ret;
2916  }
2917 
2918  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2919  if (ret == AVERROR_EXPERIMENTAL)
2920  abort_codec_experimental(codec, 0);
2921 
2922  snprintf(error, error_len,
2923  "Error while opening decoder for input stream "
2924  "#%d:%d : %s",
2925  ist->file_index, ist->st->index, av_err2str(ret));
2926  return ret;
2927  }
2929  }
2930 
2931  ist->next_pts = AV_NOPTS_VALUE;
2932  ist->next_dts = AV_NOPTS_VALUE;
2933 
2934  return 0;
2935 }
2936 
2938 {
2939  if (ost->source_index >= 0)
2940  return input_streams[ost->source_index];
2941  return NULL;
2942 }
2943 
2944 static int compare_int64(const void *a, const void *b)
2945 {
2946  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2947 }
2948 
2949 /* open the muxer when all the streams are initialized */
2951 {
2952  int ret, i;
2953 
2954  for (i = 0; i < of->ctx->nb_streams; i++) {
2955  OutputStream *ost = output_streams[of->ost_index + i];
2956  if (!ost->initialized)
2957  return 0;
2958  }
2959 
2960  of->ctx->interrupt_callback = int_cb;
2961 
2962  ret = avformat_write_header(of->ctx, &of->opts);
2963  if (ret < 0) {
2965  "Could not write header for output file #%d "
2966  "(incorrect codec parameters ?): %s\n",
2967  file_index, av_err2str(ret));
2968  return ret;
2969  }
2970  //assert_avoptions(of->opts);
2971  of->header_written = 1;
2972 
2973  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
2974 
2975  if (sdp_filename || want_sdp)
2976  print_sdp();
2977 
2978  /* flush the muxing queues */
2979  for (i = 0; i < of->ctx->nb_streams; i++) {
2980  OutputStream *ost = output_streams[of->ost_index + i];
2981 
2982  /* try to improve muxing time_base (only possible if nothing has been written yet) */
2983  if (!av_fifo_size(ost->muxing_queue))
2984  ost->mux_timebase = ost->st->time_base;
2985 
2986  while (av_fifo_size(ost->muxing_queue)) {
2987  AVPacket pkt;
2988  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
2989  write_packet(of, &pkt, ost, 1);
2990  }
2991  }
2992 
2993  return 0;
2994 }
2995 
2997 {
2998  AVBSFContext *ctx = ost->bsf_ctx;
2999  int ret;
3000 
3001  if (!ctx)
3002  return 0;
3003 
3004  ret = avcodec_parameters_copy(ctx->par_in, ost->st->codecpar);
3005  if (ret < 0)
3006  return ret;
3007 
3008  ctx->time_base_in = ost->st->time_base;
3009 
3010  ret = av_bsf_init(ctx);
3011  if (ret < 0) {
3012  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3013  ctx->filter->name);
3014  return ret;
3015  }
3016 
3017  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3018  if (ret < 0)
3019  return ret;
3020  ost->st->time_base = ctx->time_base_out;
3021 
3022  return 0;
3023 }
3024 
3026 {
3027  OutputFile *of = output_files[ost->file_index];
3028  InputStream *ist = get_input_stream(ost);
3029  AVCodecParameters *par_dst = ost->st->codecpar;
3030  AVCodecParameters *par_src = ost->ref_par;
3031  AVRational sar;
3032  int i, ret;
3033  uint32_t codec_tag = par_dst->codec_tag;
3034 
3035  av_assert0(ist && !ost->filter);
3036 
3037  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3038  if (ret >= 0)
3039  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3040  if (ret < 0) {
3042  "Error setting up codec context options.\n");
3043  return ret;
3044  }
3045 
3046  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3047  if (ret < 0) {
3049  "Error getting reference codec parameters.\n");
3050  return ret;
3051  }
3052 
3053  if (!codec_tag) {
3054  unsigned int codec_tag_tmp;
3055  if (!of->ctx->oformat->codec_tag ||
3056  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3057  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3058  codec_tag = par_src->codec_tag;
3059  }
3060 
3061  ret = avcodec_parameters_copy(par_dst, par_src);
3062  if (ret < 0)
3063  return ret;
3064 
3065  par_dst->codec_tag = codec_tag;
3066 
3067  if (!ost->frame_rate.num)
3068  ost->frame_rate = ist->framerate;
3069  ost->st->avg_frame_rate = ost->frame_rate;
3070 
3072  if (ret < 0)
3073  return ret;
3074 
3075  // copy timebase while removing common factors
3076  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3078 
3079  // copy estimated duration as a hint to the muxer
3080  if (ost->st->duration <= 0 && ist->st->duration > 0)
3081  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3082 
3083  // copy disposition
3084  ost->st->disposition = ist->st->disposition;
3085 
3086  if (ist->st->nb_side_data) {
3087  for (i = 0; i < ist->st->nb_side_data; i++) {
3088  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3089  uint8_t *dst_data;
3090 
3091  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3092  if (!dst_data)
3093  return AVERROR(ENOMEM);
3094  memcpy(dst_data, sd_src->data, sd_src->size);
3095  }
3096  }
3097 
3098  if (ost->rotate_overridden) {
3100  sizeof(int32_t) * 9);
3101  if (sd)
3103  }
3104 
3105  switch (par_dst->codec_type) {
3106  case AVMEDIA_TYPE_AUDIO:
3107  if (audio_volume != 256) {
3108  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3109  exit_program(1);
3110  }
3111  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3112  par_dst->block_align= 0;
3113  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3114  par_dst->block_align= 0;
3115  break;
3116  case AVMEDIA_TYPE_VIDEO:
3117  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3118  sar =
3120  (AVRational){ par_dst->height, par_dst->width });
3121  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3122  "with stream copy may produce invalid files\n");
3123  }
3124  else if (ist->st->sample_aspect_ratio.num)
3125  sar = ist->st->sample_aspect_ratio;
3126  else
3127  sar = par_src->sample_aspect_ratio;
3128  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3129  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3130  ost->st->r_frame_rate = ist->st->r_frame_rate;
3131  break;
3132  }
3133 
3134  ost->mux_timebase = ist->st->time_base;
3135 
3136  return 0;
3137 }
3138 
3140 {
3141  AVDictionaryEntry *e;
3142 
3143  uint8_t *encoder_string;
3144  int encoder_string_len;
3145  int format_flags = 0;
3146  int codec_flags = ost->enc_ctx->flags;
3147 
3148  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3149  return;
3150 
3151  e = av_dict_get(of->opts, "fflags", NULL, 0);
3152  if (e) {
3153  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3154  if (!o)
3155  return;
3156  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3157  }
3158  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3159  if (e) {
3160  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3161  if (!o)
3162  return;
3163  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3164  }
3165 
3166  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3167  encoder_string = av_mallocz(encoder_string_len);
3168  if (!encoder_string)
3169  exit_program(1);
3170 
3171  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3172  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3173  else
3174  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3175  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3176  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3178 }
3179 
3180 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3181  AVCodecContext *avctx)
3182 {
3183  char *p;
3184  int n = 1, i, size, index = 0;
3185  int64_t t, *pts;
3186 
3187  for (p = kf; *p; p++)
3188  if (*p == ',')
3189  n++;
3190  size = n;
3191  pts = av_malloc_array(size, sizeof(*pts));
3192  if (!pts) {
3193  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3194  exit_program(1);
3195  }
3196 
3197  p = kf;
3198  for (i = 0; i < n; i++) {
3199  char *next = strchr(p, ',');
3200 
3201  if (next)
3202  *next++ = 0;
3203 
3204  if (!memcmp(p, "chapters", 8)) {
3205 
3206  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3207  int j;
3208 
3209  if (avf->nb_chapters > INT_MAX - size ||
3210  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3211  sizeof(*pts)))) {
3213  "Could not allocate forced key frames array.\n");
3214  exit_program(1);
3215  }
3216  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3217  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3218 
3219  for (j = 0; j < avf->nb_chapters; j++) {
3220  AVChapter *c = avf->chapters[j];
3221  av_assert1(index < size);
3222  pts[index++] = av_rescale_q(c->start, c->time_base,
3223  avctx->time_base) + t;
3224  }
3225 
3226  } else {
3227 
3228  t = parse_time_or_die("force_key_frames", p, 1);
3229  av_assert1(index < size);
3230  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3231 
3232  }
3233 
3234  p = next;
3235  }
3236 
3237  av_assert0(index == size);
3238  qsort(pts, size, sizeof(*pts), compare_int64);
3239  ost->forced_kf_count = size;
3240  ost->forced_kf_pts = pts;
3241 }
3242 
3243 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3244 {
3245  InputStream *ist = get_input_stream(ost);
3246  AVCodecContext *enc_ctx = ost->enc_ctx;
3247  AVFormatContext *oc;
3248 
3249  if (ost->enc_timebase.num > 0) {
3250  enc_ctx->time_base = ost->enc_timebase;
3251  return;
3252  }
3253 
3254  if (ost->enc_timebase.num < 0) {
3255  if (ist) {
3256  enc_ctx->time_base = ist->st->time_base;
3257  return;
3258  }
3259 
3260  oc = output_files[ost->file_index]->ctx;
3261  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3262  }
3263 
3264  enc_ctx->time_base = default_time_base;
3265 }
3266 
3268 {
3269  InputStream *ist = get_input_stream(ost);
3270  AVCodecContext *enc_ctx = ost->enc_ctx;
3272  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3273  int j, ret;
3274 
3275  set_encoder_id(output_files[ost->file_index], ost);
3276 
3277  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3278  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3279  // which have to be filtered out to prevent leaking them to output files.
3280  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3281 
3282  if (ist) {
3283  ost->st->disposition = ist->st->disposition;
3284 
3285  dec_ctx = ist->dec_ctx;
3286 
3287  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3288  } else {
3289  for (j = 0; j < oc->nb_streams; j++) {
3290  AVStream *st = oc->streams[j];
3291  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3292  break;
3293  }
3294  if (j == oc->nb_streams)
3295  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3298  }
3299 
3300  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3301  if (!ost->frame_rate.num)
3303  if (ist && !ost->frame_rate.num)
3304  ost->frame_rate = ist->framerate;
3305  if (ist && !ost->frame_rate.num)
3306  ost->frame_rate = ist->st->r_frame_rate;
3307  if (ist && !ost->frame_rate.num) {
3308  ost->frame_rate = (AVRational){25, 1};
3310  "No information "
3311  "about the input framerate is available. Falling "
3312  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3313  "if you want a different framerate.\n",
3314  ost->file_index, ost->index);
3315  }
3316 
3317  if (ost->enc->supported_framerates && !ost->force_fps) {
3318  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3319  ost->frame_rate = ost->enc->supported_framerates[idx];
3320  }
3321  // reduce frame rate for mpeg4 to be within the spec limits
3322  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3323  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3324  ost->frame_rate.num, ost->frame_rate.den, 65535);
3325  }
3326  }
3327 
3328  switch (enc_ctx->codec_type) {
3329  case AVMEDIA_TYPE_AUDIO:
3331  if (dec_ctx)
3332  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3333  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3337 
3338  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3339  break;
3340 
3341  case AVMEDIA_TYPE_VIDEO:
3343 
3344  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3346  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3348  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3349  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3350  }
3351 
3352  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3353  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3354  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3355  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3356  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3358 
3359  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3360  if (dec_ctx)
3361  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3362  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3363 
3364  enc_ctx->framerate = ost->frame_rate;
3365 
3366  ost->st->avg_frame_rate = ost->frame_rate;
3367 
3368  if (!dec_ctx ||
3369  enc_ctx->width != dec_ctx->width ||
3370  enc_ctx->height != dec_ctx->height ||
3371  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3373  }
3374 
3375  if (ost->top_field_first == 0) {
3376  enc_ctx->field_order = AV_FIELD_BB;
3377  } else if (ost->top_field_first == 1) {
3378  enc_ctx->field_order = AV_FIELD_TT;
3379  }
3380 
3381  if (ost->forced_keyframes) {
3382  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3385  if (ret < 0) {
3387  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3388  return ret;
3389  }
3394 
3395  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3396  // parse it only for static kf timings
3397  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3399  }
3400  }
3401  break;
3402  case AVMEDIA_TYPE_SUBTITLE:
3403  enc_ctx->time_base = AV_TIME_BASE_Q;
3404  if (!enc_ctx->width) {
3405  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3406  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3407  }
3408  break;
3409  case AVMEDIA_TYPE_DATA:
3410  break;
3411  default:
3412  abort();
3413  break;
3414  }
3415 
3416  ost->mux_timebase = enc_ctx->time_base;
3417 
3418  return 0;
3419 }
3420 
3421 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3422 {
3423  int ret = 0;
3424 
3425  if (ost->encoding_needed) {
3426  AVCodec *codec = ost->enc;
3427  AVCodecContext *dec = NULL;
3428  InputStream *ist;
3429 
3430  ret = init_output_stream_encode(ost);
3431  if (ret < 0)
3432  return ret;
3433 
3434  if ((ist = get_input_stream(ost)))
3435  dec = ist->dec_ctx;
3436  if (dec && dec->subtitle_header) {
3437  /* ASS code assumes this buffer is null terminated so add extra byte. */
3439  if (!ost->enc_ctx->subtitle_header)
3440  return AVERROR(ENOMEM);
3441  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3443  }
3444  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3445  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3446  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3447  !codec->defaults &&
3448  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3449  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3450  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3451 
3452  ret = hw_device_setup_for_encode(ost);
3453  if (ret < 0) {
3454  snprintf(error, error_len, "Device setup failed for "
3455  "encoder on output stream #%d:%d : %s",
3456  ost->file_index, ost->index, av_err2str(ret));
3457  return ret;
3458  }
3459 
3460  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3461  int input_props = 0, output_props = 0;
3462  AVCodecDescriptor const *input_descriptor =
3464  AVCodecDescriptor const *output_descriptor =
3466  if (input_descriptor)
3467  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3468  if (output_descriptor)
3469  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3470  if (input_props && output_props && input_props != output_props) {
3471  snprintf(error, error_len,
3472  "Subtitle encoding currently only possible from text to text "
3473  "or bitmap to bitmap");
3474  return AVERROR_INVALIDDATA;
3475  }
3476  }
3477 
3478  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3479  if (ret == AVERROR_EXPERIMENTAL)
3480  abort_codec_experimental(codec, 1);
3481  snprintf(error, error_len,
3482  "Error while opening encoder for output stream #%d:%d - "
3483  "maybe incorrect parameters such as bit_rate, rate, width or height",
3484  ost->file_index, ost->index);
3485  return ret;
3486  }
3487  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3488  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3490  ost->enc_ctx->frame_size);
3492  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3493  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3494  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3495  " It takes bits/s as argument, not kbits/s\n");
3496 
3498  if (ret < 0) {
3500  "Error initializing the output stream codec context.\n");
3501  exit_program(1);
3502  }
3503  /*
3504  * FIXME: ost->st->codec should't be needed here anymore.
3505  */
3506  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3507  if (ret < 0)
3508  return ret;
3509 
3510  if (ost->enc_ctx->nb_coded_side_data) {
3511  int i;
3512 
3513  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3514  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3515  uint8_t *dst_data;
3516 
3517  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3518  if (!dst_data)
3519  return AVERROR(ENOMEM);
3520  memcpy(dst_data, sd_src->data, sd_src->size);
3521  }
3522  }
3523 
3524  /*
3525  * Add global input side data. For now this is naive, and copies it
3526  * from the input stream's global side data. All side data should
3527  * really be funneled over AVFrame and libavfilter, then added back to
3528  * packet side data, and then potentially using the first packet for
3529  * global side data.
3530  */
3531  if (ist) {
3532  int i;
3533  for (i = 0; i < ist->st->nb_side_data; i++) {
3534  AVPacketSideData *sd = &ist->st->side_data[i];
3535  if (sd->type != AV_PKT_DATA_CPB_PROPERTIES) {
3536  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3537  if (!dst)
3538  return AVERROR(ENOMEM);
3539  memcpy(dst, sd->data, sd->size);
3540  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3541  av_display_rotation_set((uint32_t *)dst, 0);
3542  }
3543  }
3544  }
3545 
3546  // copy timebase while removing common factors
3547  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3548  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3549 
3550  // copy estimated duration as a hint to the muxer
3551  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3552  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3553 
3554  ost->st->codec->codec= ost->enc_ctx->codec;
3555  } else if (ost->stream_copy) {
3556  ret = init_output_stream_streamcopy(ost);
3557  if (ret < 0)
3558  return ret;
3559  }
3560 
3561  // parse user provided disposition, and update stream values
3562  if (ost->disposition) {
3563  static const AVOption opts[] = {
3564  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3565  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3566  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3567  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3568  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3569  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3570  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3571  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3572  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3573  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3574  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3575  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3576  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3577  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3578  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3579  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3580  { NULL },
3581  };
3582  static const AVClass class = {
3583  .class_name = "",
3584  .item_name = av_default_item_name,
3585  .option = opts,
3586  .version = LIBAVUTIL_VERSION_INT,
3587  };
3588  const AVClass *pclass = &class;
3589 
3590  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3591  if (ret < 0)
3592  return ret;
3593  }
3594 
3595  /* initialize bitstream filters for the output stream
3596  * needs to be done here, because the codec id for streamcopy is not
3597  * known until now */
3598  ret = init_output_bsfs(ost);
3599  if (ret < 0)
3600  return ret;
3601 
3602  ost->initialized = 1;
3603 
3604  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3605  if (ret < 0)
3606  return ret;
3607 
3608  return ret;
3609 }
3610 
3611 static void report_new_stream(int input_index, AVPacket *pkt)
3612 {
3613  InputFile *file = input_files[input_index];
3614  AVStream *st = file->ctx->streams[pkt->stream_index];
3615 
3616  if (pkt->stream_index < file->nb_streams_warn)
3617  return;
3618  av_log(file->ctx, AV_LOG_WARNING,
3619  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3621  input_index, pkt->stream_index,
3622  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3623  file->nb_streams_warn = pkt->stream_index + 1;
3624 }
3625 
3626 static int transcode_init(void)
3627 {
3628  int ret = 0, i, j, k;
3629  AVFormatContext *oc;
3630  OutputStream *ost;
3631  InputStream *ist;
3632  char error[1024] = {0};
3633 
3634  for (i = 0; i < nb_filtergraphs; i++) {
3635  FilterGraph *fg = filtergraphs[i];
3636  for (j = 0; j < fg->nb_outputs; j++) {
3637  OutputFilter *ofilter = fg->outputs[j];
3638  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3639  continue;
3640  if (fg->nb_inputs != 1)
3641  continue;
3642  for (k = nb_input_streams-1; k >= 0 ; k--)
3643  if (fg->inputs[0]->ist == input_streams[k])
3644  break;
3645  ofilter->ost->source_index = k;
3646  }
3647  }
3648 
3649  /* init framerate emulation */
3650  for (i = 0; i < nb_input_files; i++) {
3651  InputFile *ifile = input_files[i];
3652  if (ifile->rate_emu)
3653  for (j = 0; j < ifile->nb_streams; j++)
3654  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3655  }
3656 
3657  /* init input streams */
3658  for (i = 0; i < nb_input_streams; i++)
3659  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3660  for (i = 0; i < nb_output_streams; i++) {
3661  ost = output_streams[i];
3662  avcodec_close(ost->enc_ctx);
3663  }
3664  goto dump_format;
3665  }
3666 
3667  /* open each encoder */
3668  for (i = 0; i < nb_output_streams; i++) {
3669  // skip streams fed from filtergraphs until we have a frame for them
3670  if (output_streams[i]->filter)
3671  continue;
3672 
3673  ret = init_output_stream(output_streams[i], error, sizeof(error));
3674  if (ret < 0)
3675  goto dump_format;
3676  }
3677 
3678  /* discard unused programs */
3679  for (i = 0; i < nb_input_files; i++) {
3680  InputFile *ifile = input_files[i];
3681  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3682  AVProgram *p = ifile->ctx->programs[j];
3683  int discard = AVDISCARD_ALL;
3684 
3685  for (k = 0; k < p->nb_stream_indexes; k++)
3686  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3687  discard = AVDISCARD_DEFAULT;
3688  break;
3689  }
3690  p->discard = discard;
3691  }
3692  }
3693 
3694  /* write headers for files with no streams */
3695  for (i = 0; i < nb_output_files; i++) {
3696  oc = output_files[i]->ctx;
3697  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3698  ret = check_init_output_file(output_files[i], i);
3699  if (ret < 0)
3700  goto dump_format;
3701  }
3702  }
3703 
3704  dump_format:
3705  /* dump the stream mapping */
3706  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3707  for (i = 0; i < nb_input_streams; i++) {
3708  ist = input_streams[i];
3709 
3710  for (j = 0; j < ist->nb_filters; j++) {
3711  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3712  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3713  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3714  ist->filters[j]->name);
3715  if (nb_filtergraphs > 1)
3716  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3717  av_log(NULL, AV_LOG_INFO, "\n");
3718  }
3719  }
3720  }
3721 
3722  for (i = 0; i < nb_output_streams; i++) {
3723  ost = output_streams[i];
3724 
3725  if (ost->attachment_filename) {
3726  /* an attached file */
3727  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3728  ost->attachment_filename, ost->file_index, ost->index);
3729  continue;
3730  }
3731 
3732  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3733  /* output from a complex graph */
3734  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3735  if (nb_filtergraphs > 1)
3736  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3737 
3738  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3739  ost->index, ost->enc ? ost->enc->name : "?");
3740  continue;
3741  }
3742 
3743  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3744  input_streams[ost->source_index]->file_index,
3745  input_streams[ost->source_index]->st->index,
3746  ost->file_index,
3747  ost->index);
3748  if (ost->sync_ist != input_streams[ost->source_index])
3749  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3750  ost->sync_ist->file_index,
3751  ost->sync_ist->st->index);
3752  if (ost->stream_copy)
3753  av_log(NULL, AV_LOG_INFO, " (copy)");
3754  else {
3755  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3756  const AVCodec *out_codec = ost->enc;
3757  const char *decoder_name = "?";
3758  const char *in_codec_name = "?";
3759  const char *encoder_name = "?";
3760  const char *out_codec_name = "?";
3761  const AVCodecDescriptor *desc;
3762 
3763  if (in_codec) {
3764  decoder_name = in_codec->name;
3765  desc = avcodec_descriptor_get(in_codec->id);
3766  if (desc)
3767  in_codec_name = desc->name;
3768  if (!strcmp(decoder_name, in_codec_name))
3769  decoder_name = "native";
3770  }
3771 
3772  if (out_codec) {
3773  encoder_name = out_codec->name;
3774  desc = avcodec_descriptor_get(out_codec->id);
3775  if (desc)
3776  out_codec_name = desc->name;
3777  if (!strcmp(encoder_name, out_codec_name))
3778  encoder_name = "native";
3779  }
3780 
3781  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3782  in_codec_name, decoder_name,
3783  out_codec_name, encoder_name);
3784  }
3785  av_log(NULL, AV_LOG_INFO, "\n");
3786  }
3787 
3788  if (ret) {
3789  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3790  return ret;
3791  }
3792 
3794 
3795  return 0;
3796 }
3797 
3798 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3799 static int need_output(void)
3800 {
3801  int i;
3802 
3803  for (i = 0; i < nb_output_streams; i++) {
3804  OutputStream *ost = output_streams[i];
3805  OutputFile *of = output_files[ost->file_index];
3806  AVFormatContext *os = output_files[ost->file_index]->ctx;
3807 
3808  if (ost->finished ||
3809  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3810  continue;
3811  if (ost->frame_number >= ost->max_frames) {
3812  int j;
3813  for (j = 0; j < of->ctx->nb_streams; j++)
3814  close_output_stream(output_streams[of->ost_index + j]);
3815  continue;
3816  }
3817 
3818  return 1;
3819  }
3820 
3821  return 0;
3822 }
3823 
3824 /**
3825  * Select the output stream to process.
3826  *
3827  * @return selected output stream, or NULL if none available
3828  */
3830 {
3831  int i;
3832  int64_t opts_min = INT64_MAX;
3833  OutputStream *ost_min = NULL;
3834 
3835  for (i = 0; i < nb_output_streams; i++) {
3836  OutputStream *ost = output_streams[i];
3837  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3838  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3839  AV_TIME_BASE_Q);
3840  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3842  "cur_dts is invalid st:%d (%d) [init:%d i_done:%d finish:%d] (this is harmless if it occurs once at the start per stream)\n",
3843  ost->st->index, ost->st->id, ost->initialized, ost->inputs_done, ost->finished);
3844 
3845  if (!ost->initialized && !ost->inputs_done)
3846  return ost;
3847 
3848  if (!ost->finished && opts < opts_min) {
3849  opts_min = opts;
3850  ost_min = ost->unavailable ? NULL : ost;
3851  }
3852  }
3853  return ost_min;
3854 }
3855 
3856 static void set_tty_echo(int on)
3857 {
3858 #if HAVE_TERMIOS_H
3859  struct termios tty;
3860  if (tcgetattr(0, &tty) == 0) {
3861  if (on) tty.c_lflag |= ECHO;
3862  else tty.c_lflag &= ~ECHO;
3863  tcsetattr(0, TCSANOW, &tty);
3864  }
3865 #endif
3866 }
3867 
3869 {
3870  int i, ret, key;
3871  static int64_t last_time;
3872  if (received_nb_signals)
3873  return AVERROR_EXIT;
3874  /* read_key() returns 0 on EOF */
3875  if(cur_time - last_time >= 100000 && !run_as_daemon){
3876  key = read_key();
3877  last_time = cur_time;
3878  }else
3879  key = -1;
3880  if (key == 'q')
3881  return AVERROR_EXIT;
3882  if (key == '+') av_log_set_level(av_log_get_level()+10);
3883  if (key == '-') av_log_set_level(av_log_get_level()-10);
3884  if (key == 's') qp_hist ^= 1;
3885  if (key == 'h'){
3886  if (do_hex_dump){
3887  do_hex_dump = do_pkt_dump = 0;
3888  } else if(do_pkt_dump){
3889  do_hex_dump = 1;
3890  } else
3891  do_pkt_dump = 1;
3893  }
3894  if (key == 'c' || key == 'C'){
3895  char buf[4096], target[64], command[256], arg[256] = {0};
3896  double time;
3897  int k, n = 0;
3898  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3899  i = 0;
3900  set_tty_echo(1);
3901  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3902  if (k > 0)
3903  buf[i++] = k;
3904  buf[i] = 0;
3905  set_tty_echo(0);
3906  fprintf(stderr, "\n");
3907  if (k > 0 &&
3908  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3909  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3910  target, time, command, arg);
3911  for (i = 0; i < nb_filtergraphs; i++) {
3912  FilterGraph *fg = filtergraphs[i];
3913  if (fg->graph) {
3914  if (time < 0) {
3915  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3916  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3917  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3918  } else if (key == 'c') {
3919  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3920  ret = AVERROR_PATCHWELCOME;
3921  } else {
3922  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3923  if (ret < 0)
3924  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3925  }
3926  }
3927  }
3928  } else {
3930  "Parse error, at least 3 arguments were expected, "
3931  "only %d given in string '%s'\n", n, buf);
3932  }
3933  }
3934  if (key == 'd' || key == 'D'){
3935  int debug=0;
3936  if(key == 'D') {
3937  debug = input_streams[0]->st->codec->debug<<1;
3938  if(!debug) debug = 1;
3939  while(debug & (FF_DEBUG_DCT_COEFF
3940 #if FF_API_DEBUG_MV
3941  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3942 #endif
3943  )) //unsupported, would just crash
3944  debug += debug;
3945  }else{
3946  char buf[32];
3947  int k = 0;
3948  i = 0;
3949  set_tty_echo(1);
3950  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3951  if (k > 0)
3952  buf[i++] = k;
3953  buf[i] = 0;
3954  set_tty_echo(0);
3955  fprintf(stderr, "\n");
3956  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
3957  fprintf(stderr,"error parsing debug value\n");
3958  }
3959  for(i=0;i<nb_input_streams;i++) {
3960  input_streams[i]->st->codec->debug = debug;
3961  }
3962  for(i=0;i<nb_output_streams;i++) {
3963  OutputStream *ost = output_streams[i];
3964  ost->enc_ctx->debug = debug;
3965  }
3966  if(debug) av_log_set_level(AV_LOG_DEBUG);
3967  fprintf(stderr,"debug=%d\n", debug);
3968  }
3969  if (key == '?'){
3970  fprintf(stderr, "key function\n"
3971  "? show this help\n"
3972  "+ increase verbosity\n"
3973  "- decrease verbosity\n"
3974  "c Send command to first matching filter supporting it\n"
3975  "C Send/Queue command to all matching filters\n"
3976  "D cycle through available debug modes\n"
3977  "h dump packets/hex press to cycle through the 3 states\n"
3978  "q quit\n"
3979  "s Show QP histogram\n"
3980  );
3981  }
3982  return 0;
3983 }
3984 
3985 #if HAVE_THREADS
3986 static void *input_thread(void *arg)
3987 {
3988  InputFile *f = arg;
3989  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
3990  int ret = 0;
3991 
3992  while (1) {
3993  AVPacket pkt;
3994  ret = av_read_frame(f->ctx, &pkt);
3995 
3996  if (ret == AVERROR(EAGAIN)) {
3997  av_usleep(10000);
3998  continue;
3999  }
4000  if (ret < 0) {
4001  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4002  break;
4003  }
4004  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4005  if (flags && ret == AVERROR(EAGAIN)) {
4006  flags = 0;
4007  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4009  "Thread message queue blocking; consider raising the "
4010  "thread_queue_size option (current value: %d)\n",
4011  f->thread_queue_size);
4012  }
4013  if (ret < 0) {
4014  if (ret != AVERROR_EOF)
4015  av_log(f->ctx, AV_LOG_ERROR,
4016  "Unable to send packet to main thread: %s\n",
4017  av_err2str(ret));
4018  av_packet_unref(&pkt);
4019  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4020  break;
4021  }
4022  }
4023 
4024  return NULL;
4025 }
4026 
4027 static void free_input_thread(int i)
4028 {
4029  InputFile *f = input_files[i];
4030  AVPacket pkt;
4031 
4032  if (!f || !f->in_thread_queue)
4033  return;
4035  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4036  av_packet_unref(&pkt);
4037 
4038  pthread_join(f->thread, NULL);
4039  f->joined = 1;
4040  av_thread_message_queue_free(&f->in_thread_queue);
4041 }
4042 
4043 static void free_input_threads(void)
4044 {
4045  int i;
4046 
4047  for (i = 0; i < nb_input_files; i++)
4048  free_input_thread(i);
4049 }
4050 
4051 static int init_input_thread(int i)
4052 {
4053  int ret;
4054  InputFile *f = input_files[i];
4055 
4056  if (nb_input_files == 1)
4057  return 0;
4058 
4059  if (f->ctx->pb ? !f->ctx->pb->seekable :
4060  strcmp(f->ctx->iformat->name, "lavfi"))
4061  f->non_blocking = 1;
4062  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4063  f->thread_queue_size, sizeof(AVPacket));
4064  if (ret < 0)
4065  return ret;
4066 
4067  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4068  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4069  av_thread_message_queue_free(&f->in_thread_queue);
4070  return AVERROR(ret);
4071  }
4072 
4073  return 0;
4074 }
4075 
4076 static int init_input_threads(void)
4077 {
4078  int i, ret;
4079 
4080  for (i = 0; i < nb_input_files; i++) {
4081  ret = init_input_thread(i);
4082  if (ret < 0)
4083  return ret;
4084  }
4085  return 0;
4086 }
4087 
4088 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4089 {
4090  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4091  f->non_blocking ?
4093 }
4094 #endif
4095 
4097 {
4098  if (f->rate_emu) {
4099  int i;
4100  for (i = 0; i < f->nb_streams; i++) {
4101  InputStream *ist = input_streams[f->ist_index + i];
4102  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4103  int64_t now = av_gettime_relative() - ist->start;
4104  if (pts > now)
4105  return AVERROR(EAGAIN);
4106  }
4107  }
4108 
4109 #if HAVE_THREADS
4110  if (nb_input_files > 1)
4111  return get_input_packet_mt(f, pkt);
4112 #endif
4113  return av_read_frame(f->ctx, pkt);
4114 }
4115 
4116 static int got_eagain(void)
4117 {
4118  int i;
4119  for (i = 0; i < nb_output_streams; i++)
4120  if (output_streams[i]->unavailable)
4121  return 1;
4122  return 0;
4123 }
4124 
4125 static void reset_eagain(void)
4126 {
4127  int i;
4128  for (i = 0; i < nb_input_files; i++)
4129  input_files[i]->eagain = 0;
4130  for (i = 0; i < nb_output_streams; i++)
4131  output_streams[i]->unavailable = 0;
4132 }
4133 
4134 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4136  AVRational time_base)
4137 {
4138  int ret;
4139 
4140  if (!*duration) {
4141  *duration = tmp;
4142  return tmp_time_base;
4143  }
4144 
4145  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4146  if (ret < 0) {
4147  *duration = tmp;
4148  return tmp_time_base;
4149  }
4150 
4151  return time_base;
4152 }
4153 
4155 {
4156  InputStream *ist;
4157  AVCodecContext *avctx;
4158  int i, ret, has_audio = 0;
4159  int64_t duration = 0;
4160 
4161  ret = avformat_seek_file(is, -1, INT64_MIN, is->start_time, is->start_time, 0);
4162  if (ret < 0)
4163  return ret;
4164 
4165  for (i = 0; i < ifile->nb_streams; i++) {
4166  ist = input_streams[ifile->ist_index + i];
4167  avctx = ist->dec_ctx;
4168 
4169  /* duration is the length of the last frame in a stream
4170  * when audio stream is present we don't care about
4171  * last video frame length because it's not defined exactly */
4172  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4173  has_audio = 1;
4174  }
4175 
4176  for (i = 0; i < ifile->nb_streams; i++) {
4177  ist = input_streams[ifile->ist_index + i];
4178  avctx = ist->dec_ctx;
4179 
4180  if (has_audio) {
4181  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4182  AVRational sample_rate = {1, avctx->sample_rate};
4183 
4184  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4185  } else {
4186  continue;
4187  }
4188  } else {
4189  if (ist->framerate.num) {
4190  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4191  } else if (ist->st->avg_frame_rate.num) {
4192  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4193  } else {
4194  duration = 1;
4195  }
4196  }
4197  if (!ifile->duration)
4198  ifile->time_base = ist->st->time_base;
4199  /* the total duration of the stream, max_pts - min_pts is
4200  * the duration of the stream without the last frame */
4201  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4202  duration += ist->max_pts - ist->min_pts;
4203  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4204  ifile->time_base);
4205  }
4206 
4207  if (ifile->loop > 0)
4208  ifile->loop--;
4209 
4210  return ret;
4211 }
4212 
4213 /*
4214  * Return
4215  * - 0 -- one packet was read and processed
4216  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4217  * this function should be called again
4218  * - AVERROR_EOF -- this function should not be called again
4219  */
4220 static int process_input(int file_index)
4221 {
4222  InputFile *ifile = input_files[file_index];
4224  InputStream *ist;
4225  AVPacket pkt;
4226  int ret, thread_ret, i, j;
4227  int64_t duration;
4228  int64_t pkt_dts;
4229  int disable_discontinuity_correction = copy_ts;
4230 
4231  is = ifile->ctx;
4232  ret = get_input_packet(ifile, &pkt);
4233 
4234  if (ret == AVERROR(EAGAIN)) {
4235  ifile->eagain = 1;
4236  return ret;
4237  }
4238  if (ret < 0 && ifile->loop) {
4239  AVCodecContext *avctx;
4240  for (i = 0; i < ifile->nb_streams; i++) {
4241  ist = input_streams[ifile->ist_index + i];
4242  avctx = ist->dec_ctx;
4243  if (ist->decoding_needed) {
4244  ret = process_input_packet(ist, NULL, 1);
4245  if (ret>0)
4246  return 0;
4247  avcodec_flush_buffers(avctx);
4248  }
4249  }
4250 #if HAVE_THREADS
4251  free_input_thread(file_index);
4252 #endif
4253  ret = seek_to_start(ifile, is);
4254 #if HAVE_THREADS
4255  thread_ret = init_input_thread(file_index);
4256  if (thread_ret < 0)
4257  return thread_ret;
4258 #endif
4259  if (ret < 0)
4260  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4261  else
4262  ret = get_input_packet(ifile, &pkt);
4263  if (ret == AVERROR(EAGAIN)) {
4264  ifile->eagain = 1;
4265  return ret;
4266  }
4267  }
4268  if (ret < 0) {
4269  if (ret != AVERROR_EOF) {
4270  print_error(is->url, ret);
4271  if (exit_on_error)
4272  exit_program(1);
4273  }
4274 
4275  for (i = 0; i < ifile->nb_streams; i++) {
4276  ist = input_streams[ifile->ist_index + i];
4277  if (ist->decoding_needed) {
4278  ret = process_input_packet(ist, NULL, 0);
4279  if (ret>0)
4280  return 0;
4281  }
4282 
4283  /* mark all outputs that don't go through lavfi as finished */
4284  for (j = 0; j < nb_output_streams; j++) {
4285  OutputStream *ost = output_streams[j];
4286 
4287  if (ost->source_index == ifile->ist_index + i &&
4288  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4289  finish_output_stream(ost);
4290  }
4291  }
4292 
4293  ifile->eof_reached = 1;
4294  return AVERROR(EAGAIN);
4295  }
4296 
4297  reset_eagain();
4298 
4299  if (do_pkt_dump) {
4301  is->streams[pkt.stream_index]);
4302  }
4303  /* the following test is needed in case new streams appear
4304  dynamically in stream : we ignore them */
4305  if (pkt.stream_index >= ifile->nb_streams) {
4306  report_new_stream(file_index, &pkt);
4307  goto discard_packet;
4308  }
4309 
4310  ist = input_streams[ifile->ist_index + pkt.stream_index];
4311 
4312  ist->data_size += pkt.size;
4313  ist->nb_packets++;
4314 
4315  if (ist->discard)
4316  goto discard_packet;
4317 
4318  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4320  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4321  if (exit_on_error)
4322  exit_program(1);
4323  }
4324 
4325  if (debug_ts) {
4326  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4327  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4331  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4332  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4333  av_ts2str(input_files[ist->file_index]->ts_offset),
4334  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4335  }
4336 
4337  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4338  int64_t stime, stime2;
4339  // Correcting starttime based on the enabled streams
4340  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4341  // so we instead do it here as part of discontinuity handling
4342  if ( ist->next_dts == AV_NOPTS_VALUE
4343  && ifile->ts_offset == -is->start_time
4344  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4345  int64_t new_start_time = INT64_MAX;
4346  for (i=0; i<is->nb_streams; i++) {
4347  AVStream *st = is->streams[i];
4348  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4349  continue;
4350  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4351  }
4352  if (new_start_time > is->start_time) {
4353  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4354  ifile->ts_offset = -new_start_time;
4355  }
4356  }
4357 
4358  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4359  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4360  ist->wrap_correction_done = 1;
4361 
4362  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4363  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4364  ist->wrap_correction_done = 0;
4365  }
4366  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4367  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4368  ist->wrap_correction_done = 0;
4369  }
4370  }
4371 
4372  /* add the stream-global side data to the first packet */
4373  if (ist->nb_packets == 1) {
4374  for (i = 0; i < ist->st->nb_side_data; i++) {
4375  AVPacketSideData *src_sd = &ist->st->side_data[i];
4376  uint8_t *dst_data;
4377 
4378  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4379  continue;
4380 
4381  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4382  continue;
4383 
4384  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4385  if (!dst_data)
4386  exit_program(1);
4387 
4388  memcpy(dst_data, src_sd->data, src_sd->size);
4389  }
4390  }
4391 
4392  if (pkt.dts != AV_NOPTS_VALUE)
4393  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4394  if (pkt.pts != AV_NOPTS_VALUE)
4395  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4396 
4397  if (pkt.pts != AV_NOPTS_VALUE)
4398  pkt.pts *= ist->ts_scale;
4399  if (pkt.dts != AV_NOPTS_VALUE)
4400  pkt.dts *= ist->ts_scale;
4401 
4403  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4405  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4406  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4407  int64_t delta = pkt_dts - ifile->last_ts;
4408  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4409  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4410  ifile->ts_offset -= delta;
4412  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4413  delta, ifile->ts_offset);
4414  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4415  if (pkt.pts != AV_NOPTS_VALUE)
4416  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4417  }
4418  }
4419 
4420  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4421  if (pkt.pts != AV_NOPTS_VALUE) {
4422  pkt.pts += duration;
4423  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4424  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4425  }
4426 
4427  if (pkt.dts != AV_NOPTS_VALUE)
4428  pkt.dts += duration;
4429 
4431 
4432  if (copy_ts && pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4433  (is->iformat->flags & AVFMT_TS_DISCONT) && ist->st->pts_wrap_bits < 60) {
4434  int64_t wrap_dts = av_rescale_q_rnd(pkt.dts + (1LL<<ist->st->pts_wrap_bits),
4435  ist->st->time_base, AV_TIME_BASE_Q,
4437  if (FFABS(wrap_dts - ist->next_dts) < FFABS(pkt_dts - ist->next_dts)/10)
4438  disable_discontinuity_correction = 0;
4439  }
4440 
4441  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4443  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4444  !disable_discontinuity_correction) {
4445  int64_t delta = pkt_dts - ist->next_dts;
4446  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4447  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4448  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4449  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4450  ifile->ts_offset -= delta;
4452  "timestamp discontinuity for stream #%d:%d "
4453  "(id=%d, type=%s): %"PRId64", new offset= %"PRId64"\n",
4454  ist->file_index, ist->st->index, ist->st->id,
4456  delta, ifile->ts_offset);
4457  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4458  if (pkt.pts != AV_NOPTS_VALUE)
4459  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4460  }
4461  } else {
4462  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4463  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4464  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4465  pkt.dts = AV_NOPTS_VALUE;
4466  }
4467  if (pkt.pts != AV_NOPTS_VALUE){
4468  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4469  delta = pkt_pts - ist->next_dts;
4470  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4471  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4472  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4473  pkt.pts = AV_NOPTS_VALUE;
4474  }
4475  }
4476  }
4477  }
4478 
4479  if (pkt.dts != AV_NOPTS_VALUE)
4480  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4481 
4482  if (debug_ts) {
4483  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4485  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4486  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4487  av_ts2str(input_files[ist->file_index]->ts_offset),
4488  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4489  }
4490 
4491  sub2video_heartbeat(ist, pkt.pts);
4492 
4493  process_input_packet(ist, &pkt, 0);
4494 
4495 discard_packet:
4496  av_packet_unref(&pkt);
4497 
4498  return 0;
4499 }
4500 
4501 /**
4502  * Perform a step of transcoding for the specified filter graph.
4503  *
4504  * @param[in] graph filter graph to consider
4505  * @param[out] best_ist input stream where a frame would allow to continue
4506  * @return 0 for success, <0 for error
4507  */
4508 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4509 {
4510  int i, ret;
4511  int nb_requests, nb_requests_max = 0;
4512  InputFilter *ifilter;
4513  InputStream *ist;
4514 
4515  *best_ist = NULL;
4516  ret = avfilter_graph_request_oldest(graph->graph);
4517  if (ret >= 0)
4518  return reap_filters(0);
4519 
4520  if (ret == AVERROR_EOF) {
4521  ret = reap_filters(1);
4522  for (i = 0; i < graph->nb_outputs; i++)
4523  close_output_stream(graph->outputs[i]->ost);
4524  return ret;
4525  }
4526  if (ret != AVERROR(EAGAIN))
4527  return ret;
4528 
4529  for (i = 0; i < graph->nb_inputs; i++) {
4530  ifilter = graph->inputs[i];
4531  ist = ifilter->ist;
4532  if (input_files[ist->file_index]->eagain ||
4533  input_files[ist->file_index]->eof_reached)
4534  continue;
4535  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4536  if (nb_requests > nb_requests_max) {
4537  nb_requests_max = nb_requests;
4538  *best_ist = ist;
4539  }
4540  }
4541 
4542  if (!*best_ist)
4543  for (i = 0; i < graph->nb_outputs; i++)
4544  graph->outputs[i]->ost->unavailable = 1;
4545 
4546  return 0;
4547 }
4548 
4549 /**
4550  * Run a single step of transcoding.
4551  *
4552  * @return 0 for success, <0 for error
4553  */
4554 static int transcode_step(void)
4555 {
4556  OutputStream *ost;
4557  InputStream *ist = NULL;
4558  int ret;
4559 
4560  ost = choose_output();
4561  if (!ost) {
4562  if (got_eagain()) {
4563  reset_eagain();
4564  av_usleep(10000);
4565  return 0;
4566  }
4567  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4568  return AVERROR_EOF;
4569  }
4570 
4571  if (ost->filter && !ost->filter->graph->graph) {
4573  ret = configure_filtergraph(ost->filter->graph);
4574  if (ret < 0) {
4575  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4576  return ret;
4577  }
4578  }
4579  }
4580 
4581  if (ost->filter && ost->filter->graph->graph) {
4582  if (!ost->initialized) {
4583  char error[1024] = {0};
4584  ret = init_output_stream(ost, error, sizeof(error));
4585  if (ret < 0) {
4586  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4587  ost->file_index, ost->index, error);
4588  exit_program(1);
4589  }
4590  }
4591  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4592  return ret;
4593  if (!ist)
4594  return 0;
4595  } else if (ost->filter) {
4596  int i;
4597  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4598  InputFilter *ifilter = ost->filter->graph->inputs[i];
4599  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4600  ist = ifilter->ist;
4601  break;
4602  }
4603  }
4604  if (!ist) {
4605  ost->inputs_done = 1;
4606  return 0;
4607  }
4608  } else {
4609  av_assert0(ost->source_index >= 0);
4610  ist = input_streams[ost->source_index];
4611  }
4612 
4613  ret = process_input(ist->file_index);
4614  if (ret == AVERROR(EAGAIN)) {
4615  if (input_files[ist->file_index]->eagain)
4616  ost->unavailable = 1;
4617  return 0;
4618  }
4619 
4620  if (ret < 0)
4621  return ret == AVERROR_EOF ? 0 : ret;
4622 
4623  return reap_filters(0);
4624 }
4625 
4626 /*
4627  * The following code is the main loop of the file converter
4628  */
4629 static int transcode(void)
4630 {
4631  int ret, i;
4632  AVFormatContext *os;
4633  OutputStream *ost;
4634  InputStream *ist;
4635  int64_t timer_start;
4636  int64_t total_packets_written = 0;
4637 
4638  ret = transcode_init();
4639  if (ret < 0)
4640  goto fail;
4641 
4642  if (stdin_interaction) {
4643  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4644  }
4645 
4646  timer_start = av_gettime_relative();
4647 
4648 #if HAVE_THREADS
4649  if ((ret = init_input_threads()) < 0)
4650  goto fail;
4651 #endif
4652 
4653  while (!received_sigterm) {
4654  int64_t cur_time= av_gettime_relative();
4655 
4656  /* if 'q' pressed, exits */
4657  if (stdin_interaction)
4658  if (check_keyboard_interaction(cur_time) < 0)
4659  break;
4660 
4661  /* check if there's any stream where output is still needed */
4662  if (!need_output()) {
4663  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4664  break;
4665  }
4666 
4667  ret = transcode_step();
4668  if (ret < 0 && ret != AVERROR_EOF) {
4669  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4670  break;
4671  }
4672 
4673  /* dump report by using the output first video and audio streams */
4674  print_report(0, timer_start, cur_time);
4675  }
4676 #if HAVE_THREADS
4677  free_input_threads();
4678 #endif
4679 
4680  /* at the end of stream, we must flush the decoder buffers */
4681  for (i = 0; i < nb_input_streams; i++) {
4682  ist = input_streams[i];
4683  if (!input_files[ist->file_index]->eof_reached) {
4684  process_input_packet(ist, NULL, 0);
4685  }
4686  }
4687  flush_encoders();
4688 
4689  term_exit();
4690 
4691  /* write the trailer if needed and close file */
4692  for (i = 0; i < nb_output_files; i++) {
4693  os = output_files[i]->ctx;
4694  if (!output_files[i]->header_written) {
4696  "Nothing was written into output file %d (%s), because "
4697  "at least one of its streams received no packets.\n",
4698  i, os->url);
4699  continue;
4700  }
4701  if ((ret = av_write_trailer(os)) < 0) {
4702  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4703  if (exit_on_error)
4704  exit_program(1);
4705  }
4706  }
4707 
4708  /* dump report by using the first video and audio streams */
4709  print_report(1, timer_start, av_gettime_relative());
4710 
4711  /* close each encoder */
4712  for (i = 0; i < nb_output_streams; i++) {
4713  ost = output_streams[i];
4714  if (ost->encoding_needed) {
4715  av_freep(&ost->enc_ctx->stats_in);
4716  }
4717  total_packets_written += ost->packets_written;
4719  av_log(NULL, AV_LOG_FATAL, "Empty output on stream %d.\n", i);
4720  exit_program(1);
4721  }
4722  }
4723 
4724  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4725  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4726  exit_program(1);
4727  }
4728 
4729  /* close each decoder */
4730  for (i = 0; i < nb_input_streams; i++) {
4731  ist = input_streams[i];
4732  if (ist->decoding_needed) {
4733  avcodec_close(ist->dec_ctx);
4734  if (ist->hwaccel_uninit)
4735  ist->hwaccel_uninit(ist->dec_ctx);
4736  }
4737  }
4738 
4740 
4741  /* finished ! */
4742  ret = 0;
4743 
4744  fail:
4745 #if HAVE_THREADS
4746  free_input_threads();
4747 #endif
4748 
4749  if (output_streams) {
4750  for (i = 0; i < nb_output_streams; i++) {
4751  ost = output_streams[i];
4752  if (ost) {
4753  if (ost->logfile) {
4754  if (fclose(ost->logfile))
4756  "Error closing logfile, loss of information possible: %s\n",
4757  av_err2str(AVERROR(errno)));
4758  ost->logfile = NULL;
4759  }
4760  av_freep(&ost->forced_kf_pts);
4761  av_freep(&ost->apad);
4762  av_freep(&ost->disposition);
4763  av_dict_free(&ost->encoder_opts);
4764  av_dict_free(&ost->sws_dict);
4765  av_dict_free(&ost->swr_opts);
4766  av_dict_free(&ost->resample_opts);
4767  }
4768  }
4769  }
4770  return ret;
4771 }
4772 
4774 {
4775  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4776 #if HAVE_GETRUSAGE
4777  struct rusage rusage;
4778 
4779  getrusage(RUSAGE_SELF, &rusage);
4780  time_stamps.user_usec =
4781  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4782  time_stamps.sys_usec =
4783  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4784 #elif HAVE_GETPROCESSTIMES
4785  HANDLE proc;
4786  FILETIME c, e, k, u;
4787  proc = GetCurrentProcess();
4788  GetProcessTimes(proc, &c, &e, &k, &u);
4789  time_stamps.user_usec =
4790  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4791  time_stamps.sys_usec =
4792  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4793 #else
4794  time_stamps.user_usec = time_stamps.sys_usec = 0;
4795 #endif
4796  return time_stamps;
4797 }
4798 
4799 static int64_t getmaxrss(void)
4800 {
4801 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4802  struct rusage rusage;
4803  getrusage(RUSAGE_SELF, &rusage);
4804  return (int64_t)rusage.ru_maxrss * 1024;
4805 #elif HAVE_GETPROCESSMEMORYINFO
4806  HANDLE proc;
4807  PROCESS_MEMORY_COUNTERS memcounters;
4808  proc = GetCurrentProcess();
4809  memcounters.cb = sizeof(memcounters);
4810  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4811  return memcounters.PeakPagefileUsage;
4812 #else
4813  return 0;
4814 #endif
4815 }
4816 
4817 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4818 {
4819 }
4820 
4821 int main(int argc, char **argv)
4822 {
4823  int i, ret;
4825 
4826  init_dynload();
4827 
4829 
4830  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4831 
4833  parse_loglevel(argc, argv, options);
4834 
4835  if(argc>1 && !strcmp(argv[1], "-d")){
4836  run_as_daemon=1;
4838  argc--;
4839  argv++;
4840  }
4841 
4842 #if CONFIG_AVDEVICE
4844 #endif
4846 
4847  show_banner(argc, argv, options);
4848 
4849  /* parse options and open all input/output files */
4850  ret = ffmpeg_parse_options(argc, argv);
4851  if (ret < 0)
4852  exit_program(1);
4853 
4854  if (nb_output_files <= 0 && nb_input_files == 0) {
4855  show_usage();
4856  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4857  exit_program(1);
4858  }
4859 
4860  /* file converter / grab */
4861  if (nb_output_files <= 0) {
4862  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4863  exit_program(1);
4864  }
4865 
4866  for (i = 0; i < nb_output_files; i++) {
4867  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4868  want_sdp = 0;
4869  }
4870 
4871  current_time = ti = get_benchmark_time_stamps();
4872  if (transcode() < 0)
4873  exit_program(1);
4874  if (do_benchmark) {
4875  int64_t utime, stime, rtime;
4876  current_time = get_benchmark_time_stamps();
4877  utime = current_time.user_usec - ti.user_usec;
4878  stime = current_time.sys_usec - ti.sys_usec;
4879  rtime = current_time.real_usec - ti.real_usec;
4881  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4882  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4883  }
4884  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4887  exit_program(69);
4888 
4890  return main_return_code;
4891 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:117
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:778
AVRational enc_timebase
Definition: ffmpeg.h:461
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:703
int got_output
Definition: ffmpeg.h:340
#define AV_DISPOSITION_METADATA
Definition: avformat.h:858
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1981
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1078
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2058
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:449
const struct AVCodec * codec
Definition: avcodec.h:535
Definition: ffmpeg.h:425
AVRational framerate
Definition: avcodec.h:2069
struct InputStream::@2 prev_sub
enum AVFieldOrder field_order
Video only.
Definition: codec_par.h:141
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:862
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:961
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
void term_init(void)
Definition: ffmpeg.c:395
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:334
AVCodecParameters * par_out
Parameters of the output stream.
Definition: bsf.h:83
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:262
int nb_outputs
Definition: ffmpeg.h:291
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
AVDictionary * swr_opts
Definition: ffmpeg.h:509
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:301
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3025
void term_exit(void)
Definition: ffmpeg.c:336
int stream_copy
Definition: ffmpeg.h:514
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1236
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2660
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVOption.
Definition: opt.h:246
AVRational frame_rate
Definition: ffmpeg.h:477
int64_t * forced_kf_pts
Definition: ffmpeg.h:488
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:312
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:504
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
static int process_input(int file_index)
Definition: ffmpeg.c:4220
int exit_on_error
Definition: ffmpeg_opt.c:164
int64_t cfr_next_pts
Definition: ffmpeg.h:325
int64_t forced_kf_ref_pts
Definition: ffmpeg.h:487
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3421
static atomic_int transcode_init_done
Definition: ffmpeg.c:344
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: packet.h:375
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:134
Memory buffer source API.
const char * desc
Definition: nvenc.c:79
void av_log_set_level(int level)
Set the log level.
Definition: log.c:440
AVRational framerate
Definition: ffmpeg.h:332
void avfilter_inout_free(AVFilterInOut **inout)
Free the supplied list of AVFilterInOut and set *inout to NULL.
Definition: graphparser.c:212
AVRational sample_aspect_ratio
Video only.
Definition: codec_par.h:136
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
int height
Definition: ffmpeg.h:246
int64_t max_pts
Definition: ffmpeg.h:321
AVFilterInOut * out_tmp
Definition: ffmpeg.h:265
int decoding_needed
Definition: ffmpeg.h:299
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: codec_par.h:60
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: bsf.h:58
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:938
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: bsf.h:49
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1636
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:481
int index
stream index in AVFormatContext
Definition: avformat.h:877
int size
Definition: packet.h:356
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4799
int max_muxing_queue_size
Definition: ffmpeg.h:542
const char * b
Definition: vf_curves.c:116
static int nb_frames_dup
Definition: ffmpeg.c:135
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:276
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2937
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:274
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:675
#define AV_DISPOSITION_DUB
Definition: avformat.h:822
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
int eagain
Definition: ffmpeg.h:396
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: codec_desc.h:102
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1184
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3267
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:668
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:834
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
int quality
Definition: ffmpeg.h:540
unsigned num_rects
Definition: avcodec.h:2698
AVFrame * filter_frame
Definition: ffmpeg.h:306
static int transcode_init(void)
Definition: ffmpeg.c:3626
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2944
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2549
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:1553
int do_benchmark_all
Definition: ffmpeg_opt.c:157
enum AVMediaType type
Definition: codec.h:203
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:836
const char * key
int last_dropped
Definition: ffmpeg.h:471
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
discard all
Definition: avcodec.h:236
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:978
int64_t input_ts_offset
Definition: ffmpeg.h:402
int do_hex_dump
Definition: ffmpeg_opt.c:158
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
int nb_input_streams
Definition: ffmpeg.c:148
static void error(const char *err)
const char * name
Definition: ffmpeg.h:67
intptr_t atomic_int
Definition: stdatomic.h:55
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:978
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3611
uint64_t packets_written
Definition: ffmpeg.h:534
AVCodec.
Definition: codec.h:190
#define VSYNC_VFR
Definition: ffmpeg.h:52
int nb_dts_buffer
Definition: ffmpeg.h:388
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:472
This struct describes the properties of an encoded stream.
Definition: codec_par.h:52
int print_stats
Definition: ffmpeg_opt.c:166
float dts_error_threshold
Definition: ffmpeg_opt.c:149
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:282
uint64_t data_size
Definition: ffmpeg.h:532
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:485
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:826
struct FilterGraph * graph
Definition: ffmpeg.h:237
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2235
Undefined.
Definition: avutil.h:273
AVSubtitleRect ** rects
Definition: avcodec.h:2699
int encoding_needed
Definition: ffmpeg.h:448
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:673
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4817
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:639
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3868
Format I/O context.
Definition: avformat.h:1351
uint64_t samples_decoded
Definition: ffmpeg.h:385
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:236
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2323
#define AV_RL64
Definition: intreadwrite.h:173
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:69
int64_t cur_dts
Definition: avformat.h:1079
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:2662
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: codec.h:75
uint64_t frames_decoded
Definition: ffmpeg.h:384
int header_written
Definition: ffmpeg.h:564
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:285
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:499
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:196
static int main_return_code
Definition: ffmpeg.c:346
static int64_t start_time
Definition: ffplay.c:332
int copy_initial_nonkeyframes
Definition: ffmpeg.h:524
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:128
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:2996
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:1194
int64_t * dts_buffer
Definition: ffmpeg.h:387
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:519
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
Opaque data information usually continuous.
Definition: avutil.h:203
AVDictionary * sws_dict
Definition: ffmpeg.h:508
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: codec_par.h:126
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
AVOptions.
int subtitle_header_size
Definition: avcodec.h:2015
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_NOTIMESTAMPS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:664
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5781
int stdin_interaction
Definition: ffmpeg_opt.c:168
FILE * logfile
Definition: ffmpeg.h:500
#define f(width, name)
Definition: cbs_vp9.c:255
AVDictionary * opts
Definition: ffmpeg.h:556
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
#define media_type_string
Definition: cmdutils.h:620
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: packet.h:373
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: packet.h:108
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
int id
Format-specific stream ID.
Definition: avformat.h:883
#define ECHO(name, type, min, max)
Definition: af_aecho.c:188
#define FF_API_DEBUG_MV
Definition: version.h:58
static int need_output(void)
Definition: ffmpeg.c:3799
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
enum AVPixelFormat pix_fmt
For decoders, a hardware pixel format which that decoder may be able to decode to if suitable hardwar...
Definition: codec.h:434
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:982
static double psnr(double d)
Definition: ffmpeg.c:1346
int do_benchmark
Definition: ffmpeg_opt.c:156
int audio_sync_method
Definition: ffmpeg_opt.c:152
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:262
int shortest
Definition: ffmpeg.h:562
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2193
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
static AVFrame * frame
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1835
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:114
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
const char * name
Definition: bsf.h:99
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:345
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:859
int nb_streams
Definition: ffmpeg.h:409
uint8_t * data
Definition: packet.h:355
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
enum AVMediaType type
Definition: ffmpeg.h:239
static void set_tty_echo(int on)
Definition: ffmpeg.c:3856
AVDictionary * resample_opts
Definition: ffmpeg.h:510
#define llrintf(x)
Definition: libm.h:399
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:663
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3180
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:259
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4154
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5059
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
int * formats
Definition: ffmpeg.h:276
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:150
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:433
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1415
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:345
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:845
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1351
uint8_t * data
Definition: packet.h:299
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:447
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:503
#define max(a, b)
Definition: cuda_runtime.h:33
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:2663
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:304
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1271
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1545
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:213
int top_field_first
Definition: ffmpeg.h:333
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1499
int nb_output_streams
Definition: ffmpeg.c:153
int file_index
Definition: ffmpeg.h:295
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:1168
unsigned int * stream_index
Definition: avformat.h:1272
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
int wrap_correction_done
Definition: ffmpeg.h:316
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:318
uint64_t channel_layout
Audio only.
Definition: codec_par.h:162
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:280
void sub2video_update(InputStream *ist, int64_t heartbeat_pts, AVSubtitle *sub)
Definition: ffmpeg.c:240
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:856
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:614
int64_t next_dts
Definition: ffmpeg.h:311
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2107
unsigned int initialize
marks if sub2video_update should force an initialization
Definition: ffmpeg.h:351
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:600
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:348
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
#define src
Definition: vp8dsp.c:254
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1152
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:2083
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2456
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5843
libswresample public header
enum AVCodecID id
Definition: codec.h:204
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
AVRational sample_aspect_ratio
Definition: ffmpeg.h:247
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: codec.h:257
int rate_emu
Definition: ffmpeg.h:412
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
int sample_rate
Definition: ffmpeg.h:249
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1396
static void reset_eagain(void)
Definition: ffmpeg.c:4125
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
void av_packet_rescale_ts(AVPacket *pkt, AVRational src_tb, AVRational dst_tb)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:712
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3312
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:490
AVFilterContext * filter
Definition: ffmpeg.h:235
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:464
#define atomic_load(object)
Definition: stdatomic.h:93
int64_t start
Definition: ffmpeg.h:308
int loop
Definition: ffmpeg.h:398
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:2661
uint64_t nb_packets
Definition: ffmpeg.h:382
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:458
int video_sync_method
Definition: ffmpeg_opt.c:153
int format
Definition: ffmpeg.h:244
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:353
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
char * sdp_filename
Definition: ffmpeg_opt.c:145
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int last_nb0_frames[3]
Definition: ffmpeg.h:472
Display matrix.
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: codec.h:439
The codec supports this format via the hw_device_ctx interface.
Definition: codec.h:393
ff_const59 struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1363
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int video_delay
Video only.
Definition: codec_par.h:155
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:649
const char * r
Definition: vf_curves.c:114
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: codec.h:209
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:144
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:255
unsigned int nb_programs
Definition: avformat.h:1530
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: codec_id.h:411
enum AVMediaType codec_type
General type of the encoded data.
Definition: codec_par.h:56
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:586
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:366
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: bsf.h:89
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: packet.h:301
int av_log_get_level(void)
Get the current log level.
Definition: log.c:435
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
const char * name
Name of the codec implementation.
Definition: codec.h:197
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:888
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:650
int eof
Definition: ffmpeg.h:255
int force_fps
Definition: ffmpeg.h:479
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:419
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:949
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:117
int qp_hist
Definition: ffmpeg_opt.c:167
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:123
float frame_drop_threshold
Definition: ffmpeg_opt.c:154
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: codec.h:122
int64_t error[4]
Definition: ffmpeg.h:551
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:144
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:1237
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:2697
static int want_sdp
Definition: ffmpeg.c:140
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:2700
OutputFilter * filter
Definition: ffmpeg.h:502
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2078
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:477
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: codec_desc.h:54
AVRational frame_aspect_ratio
Definition: ffmpeg.h:484
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:825
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2185
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1660
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:833
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
AVRational mux_timebase
Definition: ffmpeg.h:460
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
AVDictionary * opts
Definition: movenc.c:50
int block_align
Audio only.
Definition: codec_par.h:177
static int nb_frames_drop
Definition: ffmpeg.c:137
A bitmap, pict will be set.
Definition: avcodec.h:2642
int linesize[4]
Definition: avcodec.h:2678
int nb_output_files
Definition: ffmpeg.c:155
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
int channels
number of audio channels, only used for audio.
Definition: frame.h:606
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:478
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
static int transcode(void)
Definition: ffmpeg.c:4629
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:333
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:537
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:505
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:2201
uint64_t * channel_layouts
Definition: ffmpeg.h:277
#define VSYNC_AUTO
Definition: ffmpeg.h:49
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:455
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:216
int saw_first_ts
Definition: ffmpeg.h:330
int abort_on_flags
Definition: ffmpeg_opt.c:165
This side data contains quality related information from the encoder.
Definition: packet.h:132
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:260
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:511
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:2226
int64_t nb_samples
Definition: ffmpeg.h:327
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: bsf.h:95
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:303
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:493
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:671
int64_t duration
Definition: ffmpeg.h:399
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:525
const char * name
Definition: avformat.h:500
int width
Definition: ffmpeg.h:246
int32_t
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2261
int nb_filtergraphs
Definition: ffmpeg.c:158
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:94
int64_t last_ts
Definition: ffmpeg.h:405
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
int do_pkt_dump
Definition: ffmpeg_opt.c:159
int64_t max_frames
Definition: ffmpeg.h:468
#define AV_RL32
Definition: intreadwrite.h:146
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:328
int audio_channels_mapped
Definition: ffmpeg.h:497
AVDictionary * metadata
Definition: avformat.h:940
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5561
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:658
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define is(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:284
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:2677
static int got_eagain(void)
Definition: ffmpeg.c:4116
int inputs_done
Definition: ffmpeg.h:521
int vstats_version
Definition: ffmpeg_opt.c:173
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:844
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:133
AVCodecContext * enc
Definition: muxing.c:55
#define av_log2
Definition: intmath.h:83
ff_const59 struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1370
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:835
int ret
Definition: ffmpeg.h:341
int audio_volume
Definition: ffmpeg_opt.c:151
int64_t sys_usec
Definition: ffmpeg.c:126
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:876
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal codec state / flush internal buffers.
Definition: utils.c:1089
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:462
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:373
InputFilter ** filters
Definition: ffmpeg.h:358
int fix_sub_duration
Definition: ffmpeg.h:338
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4773
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:586
#define VSYNC_DROP
Definition: ffmpeg.h:54
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2097
int64_t recording_time
Definition: ffmpeg.h:408
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5071
Definition: ffmpeg.h:66
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:1206
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:80
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:68
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2950
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:821
AVStream * st
Definition: ffmpeg.h:296
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
int frame_size
Definition: mxfenc.c:2137
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:197
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:857
int ost_index
Definition: ffmpeg.h:557
struct InputStream * sync_ist
Definition: ffmpeg.h:452
#define AV_BPRINT_SIZE_AUTOMATIC
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1649
enum AVMediaType codec_type
Definition: avcodec.h:534
double ts_scale
Definition: ffmpeg.h:329
int unavailable
Definition: ffmpeg.h:513
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:579
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2872
enum AVCodecID codec_id
Definition: avcodec.h:536
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1655
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
float max_error_rate
Definition: ffmpeg_opt.c:170
int sample_rate
samples per second
Definition: avcodec.h:1186
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
uint64_t frames_encoded
Definition: ffmpeg.h:536
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:545
int ist_index
Definition: ffmpeg.h:397
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:525
static int loop
Definition: ffplay.c:341
int debug
debug
Definition: avcodec.h:1611
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static void print_sdp(void)
Definition: ffmpeg.c:2731
const char * graph_desc
Definition: ffmpeg.h:283
int guess_layout_max
Definition: ffmpeg.h:334
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:406
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1857
main external API structure.
Definition: avcodec.h:526
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
int64_t user_usec
Definition: ffmpeg.c:125
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:370
long long int64_t
Definition: coverity.c:34
uint8_t * data
The data buffer.
Definition: buffer.h:89
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:223
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:492
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:871
int * sample_rates
Definition: ffmpeg.h:278
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1133
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:388
const char * attachment_filename
Definition: ffmpeg.h:523
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1964
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
AVRational time_base
Definition: ffmpeg.h:401
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: codec_desc.h:97
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:659
AVCodecContext * enc_ctx
Definition: ffmpeg.h:465
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
Remove and free all side data instances of the given type.
Definition: frame.c:813
AVFrame * decoded_frame
Definition: ffmpeg.h:305
int extradata_size
Definition: avcodec.h:628
Perform non-blocking operation.
Definition: threadmessage.h:31
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:253
Immediately push the frame to the output.
Definition: buffersrc.h:46
#define llrint(x)
Definition: libm.h:394
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4508
int nb_coded_side_data
Definition: avcodec.h:2202
int channels
Definition: ffmpeg.h:250
int * audio_channels_map
Definition: ffmpeg.h:496
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:472
int configure_filtergraph(FilterGraph *fg)
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:1341
OutputStream ** output_streams
Definition: ffmpeg.c:152
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:92
int index
Definition: gxfenc.c:89
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:1623
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:444
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
double rotate_override_value
Definition: ffmpeg.h:482
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2136
int64_t sync_opts
Definition: ffmpeg.h:453
char * vstats_filename
Definition: ffmpeg_opt.c:144
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:162
AVCodecContext * dec_ctx
Definition: ffmpeg.h:303
char * disposition
Definition: ffmpeg.h:526
cl_device_type type
int filtergraph_is_simple(FilterGraph *fg)
void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:233
#define mid_pred
Definition: mathops.h:97
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:231
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1141
int av_buffersink_get_w(const AVFilterContext *ctx)
int nb_streams_warn
Definition: ffmpeg.h:411
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3243
AVDictionary * decoder_opts
Definition: ffmpeg.h:331
int autorotate
Definition: ffmpeg.h:336
const char * name
Name of the codec described by this descriptor.
Definition: codec_desc.h:46
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:574
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:564
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:169
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4455
int64_t ts_offset
Definition: ffmpeg.h:404
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:287
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4554
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:505
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:595
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1808
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1780
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4135
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags)
Seek to timestamp ts.
Definition: utils.c:2550
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: codec_desc.h:38
AVFrame * filtered_frame
Definition: ffmpeg.h:469
int source_index
Definition: ffmpeg.h:446
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
static volatile int received_nb_signals
Definition: ffmpeg.c:343
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
int copy_prior_start
Definition: ffmpeg.h:525
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
Keep a reference to the frame.
Definition: buffersrc.h:53
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:592
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:694
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
#define flags(name, subs,...)
Definition: cbs_av1.c:576
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:359
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2776
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1456
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
uint8_t level
Definition: svq3.c:210
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:492
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t real_usec
Definition: ffmpeg.c:124
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:312
int forced_kf_count
Definition: ffmpeg.h:489
int64_t start
Definition: avformat.h:1311
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:925
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:512
char * forced_keyframes
Definition: ffmpeg.h:491
int sample_rate
Audio only.
Definition: codec_par.h:170
uint64_t data_size
Definition: ffmpeg.h:380
int64_t bitrate
Definition: h264_levels.c:131
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:65
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:314
static AVStream * ost
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1085
struct FilterGraph * graph
Definition: ffmpeg.h:261
uint64_t limit_filesize
Definition: ffmpeg.h:560
const OptionDef options[]
Definition: ffmpeg_opt.c:3388
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1519
AVIOContext * progress_avio
Definition: ffmpeg.c:143
int main(int argc, char **argv)
Definition: ffmpeg.c:4821
int reinit_filters
Definition: ffmpeg.h:361
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:458
#define avio_print(s,...)
Write strings (const char *) to the context.
Definition: avio.h:594
AVCodecParameters * ref_par
Definition: ffmpeg.h:466
#define VSYNC_CFR
Definition: ffmpeg.h:51
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: codec.h:114
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:915
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:467
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:929
uint32_t start_display_time
Definition: avcodec.h:2696
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1068
uint64_t samples_encoded
Definition: ffmpeg.h:537
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1310
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1215
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:1814
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:927
char * key
Definition: dict.h:86
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:87
AVFrame * last_frame
Definition: ffmpeg.h:470
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:155
uint64_t channel_layout
Definition: ffmpeg.h:251
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: packet.h:389
AVBSFContext * bsf_ctx
Definition: ffmpeg.h:463
int copy_ts
Definition: ffmpeg_opt.c:160
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1044
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4498
AVFormatContext * ctx
Definition: ffmpeg.h:394
int pict_type
Definition: ffmpeg.h:548
AVSubtitle subtitle
Definition: ffmpeg.h:342
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
int eof_reached
Definition: ffmpeg.h:395
int forced_kf_index
Definition: ffmpeg.h:490
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:465
char * avfilter
Definition: ffmpeg.h:503
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:516
uint8_t * name
Definition: ffmpeg.h:238
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:452
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
float dts_delta_threshold
Definition: ffmpeg_opt.c:148
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
int channels
number of audio channels
Definition: avcodec.h:1187
int top_field_first
Definition: ffmpeg.h:480
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputFilter ** outputs
Definition: ffmpeg.h:290
InputFile ** input_files
Definition: ffmpeg.c:149
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2862
void av_log_set_flags(int arg)
Definition: log.c:445
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2108
AVFormatContext * ctx
Definition: ffmpeg.h:555
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3394
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:824
void show_usage(void)
Definition: ffmpeg_opt.c:3261
int channels
Audio only.
Definition: codec_par.h:166
An instance of a filter.
Definition: avfilter.h:338
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:901
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:366
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:354
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:507
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1251
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:3371
int height
Definition: frame.h:358
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:288
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:1183
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:650
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2212
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2533
void av_bsf_free(AVBSFContext **pctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:40
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1023
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: codec.h:446
static void flush_encoders(void)
Definition: ffmpeg.c:1848
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: codec_par.h:64
int copy_tb
Definition: ffmpeg_opt.c:162
int64_t min_pts
Definition: ffmpeg.h:320
int initialized
Definition: ffmpeg.h:519
static volatile int received_sigterm
Definition: ffmpeg.c:342
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:297
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4096
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
int stream_index
Definition: packet.h:357
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:905
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
#define ABORT_ON_FLAG_EMPTY_OUTPUT_STREAM
Definition: ffmpeg.h:434
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:2680
int64_t first_pts
Definition: ffmpeg.h:456
int nb_inputs
Definition: ffmpeg.h:289
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2118
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:931
#define DECODING_FOR_OST
Definition: ffmpeg.h:300
int index
Definition: ffmpeg.h:445
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:1000
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
OSTFinished
Definition: ffmpeg.h:438
This structure stores compressed data.
Definition: packet.h:332
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1170
AVCodecParameters * par_in
Parameters of the input stream.
Definition: bsf.h:77
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:347
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:241
int debug_ts
Definition: ffmpeg_opt.c:163
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3829
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
static void sigterm_handler(int sig)
Definition: ffmpeg.c:349
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:136
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:568
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1531
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:823
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:147
InputStream ** input_streams
Definition: ffmpeg.c:147
static unsigned dup_warning
Definition: ffmpeg.c:136
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:133
Definition: ffmpeg.h:429
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:840
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:2014
static uint8_t tmp[11]
Definition: aes_ctr.c:26