FFmpeg  4.1.11
ffmpeg.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2000-2003 Fabrice Bellard
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * multimedia converter based on the FFmpeg libraries
24  */
25 
26 #include "config.h"
27 #include <ctype.h>
28 #include <string.h>
29 #include <math.h>
30 #include <stdlib.h>
31 #include <errno.h>
32 #include <limits.h>
33 #include <stdatomic.h>
34 #include <stdint.h>
35 
36 #if HAVE_IO_H
37 #include <io.h>
38 #endif
39 #if HAVE_UNISTD_H
40 #include <unistd.h>
41 #endif
42 
43 #include "libavformat/avformat.h"
44 #include "libavdevice/avdevice.h"
46 #include "libavutil/opt.h"
48 #include "libavutil/parseutils.h"
49 #include "libavutil/samplefmt.h"
50 #include "libavutil/fifo.h"
51 #include "libavutil/hwcontext.h"
52 #include "libavutil/internal.h"
53 #include "libavutil/intreadwrite.h"
54 #include "libavutil/dict.h"
55 #include "libavutil/display.h"
56 #include "libavutil/mathematics.h"
57 #include "libavutil/pixdesc.h"
58 #include "libavutil/avstring.h"
59 #include "libavutil/libm.h"
60 #include "libavutil/imgutils.h"
61 #include "libavutil/timestamp.h"
62 #include "libavutil/bprint.h"
63 #include "libavutil/time.h"
64 #include "libavutil/thread.h"
66 #include "libavcodec/mathops.h"
67 #include "libavformat/os_support.h"
68 
69 # include "libavfilter/avfilter.h"
70 # include "libavfilter/buffersrc.h"
71 # include "libavfilter/buffersink.h"
72 
73 #if HAVE_SYS_RESOURCE_H
74 #include <sys/time.h>
75 #include <sys/types.h>
76 #include <sys/resource.h>
77 #elif HAVE_GETPROCESSTIMES
78 #include <windows.h>
79 #endif
80 #if HAVE_GETPROCESSMEMORYINFO
81 #include <windows.h>
82 #include <psapi.h>
83 #endif
84 #if HAVE_SETCONSOLECTRLHANDLER
85 #include <windows.h>
86 #endif
87 
88 
89 #if HAVE_SYS_SELECT_H
90 #include <sys/select.h>
91 #endif
92 
93 #if HAVE_TERMIOS_H
94 #include <fcntl.h>
95 #include <sys/ioctl.h>
96 #include <sys/time.h>
97 #include <termios.h>
98 #elif HAVE_KBHIT
99 #include <conio.h>
100 #endif
101 
102 #include <time.h>
103 
104 #include "ffmpeg.h"
105 #include "cmdutils.h"
106 
107 #include "libavutil/avassert.h"
108 
109 const char program_name[] = "ffmpeg";
110 const int program_birth_year = 2000;
111 
112 static FILE *vstats_file;
113 
114 const char *const forced_keyframes_const_names[] = {
115  "n",
116  "n_forced",
117  "prev_forced_n",
118  "prev_forced_t",
119  "t",
120  NULL
121 };
122 
123 typedef struct BenchmarkTimeStamps {
124  int64_t real_usec;
125  int64_t user_usec;
126  int64_t sys_usec;
128 
129 static void do_video_stats(OutputStream *ost, int frame_size);
131 static int64_t getmaxrss(void);
133 
134 static int run_as_daemon = 0;
135 static int nb_frames_dup = 0;
136 static unsigned dup_warning = 1000;
137 static int nb_frames_drop = 0;
138 static int64_t decode_error_stat[2];
139 
140 static int want_sdp = 1;
141 
144 
146 
151 
156 
159 
160 #if HAVE_TERMIOS_H
161 
162 /* init terminal so that we can grab keys */
163 static struct termios oldtty;
164 static int restore_tty;
165 #endif
166 
167 #if HAVE_THREADS
168 static void free_input_threads(void);
169 #endif
170 
171 /* sub2video hack:
172  Convert subtitles to video with alpha to insert them in filter graphs.
173  This is a temporary solution until libavfilter gets real subtitles support.
174  */
175 
177 {
178  int ret;
179  AVFrame *frame = ist->sub2video.frame;
180 
181  av_frame_unref(frame);
182  ist->sub2video.frame->width = ist->dec_ctx->width ? ist->dec_ctx->width : ist->sub2video.w;
183  ist->sub2video.frame->height = ist->dec_ctx->height ? ist->dec_ctx->height : ist->sub2video.h;
185  if ((ret = av_frame_get_buffer(frame, 32)) < 0)
186  return ret;
187  memset(frame->data[0], 0, frame->height * frame->linesize[0]);
188  return 0;
189 }
190 
191 static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h,
192  AVSubtitleRect *r)
193 {
194  uint32_t *pal, *dst2;
195  uint8_t *src, *src2;
196  int x, y;
197 
198  if (r->type != SUBTITLE_BITMAP) {
199  av_log(NULL, AV_LOG_WARNING, "sub2video: non-bitmap subtitle\n");
200  return;
201  }
202  if (r->x < 0 || r->x + r->w > w || r->y < 0 || r->y + r->h > h) {
203  av_log(NULL, AV_LOG_WARNING, "sub2video: rectangle (%d %d %d %d) overflowing %d %d\n",
204  r->x, r->y, r->w, r->h, w, h
205  );
206  return;
207  }
208 
209  dst += r->y * dst_linesize + r->x * 4;
210  src = r->data[0];
211  pal = (uint32_t *)r->data[1];
212  for (y = 0; y < r->h; y++) {
213  dst2 = (uint32_t *)dst;
214  src2 = src;
215  for (x = 0; x < r->w; x++)
216  *(dst2++) = pal[*(src2++)];
217  dst += dst_linesize;
218  src += r->linesize[0];
219  }
220 }
221 
222 static void sub2video_push_ref(InputStream *ist, int64_t pts)
223 {
224  AVFrame *frame = ist->sub2video.frame;
225  int i;
226  int ret;
227 
228  av_assert1(frame->data[0]);
229  ist->sub2video.last_pts = frame->pts = pts;
230  for (i = 0; i < ist->nb_filters; i++) {
231  ret = av_buffersrc_add_frame_flags(ist->filters[i]->filter, frame,
234  if (ret != AVERROR_EOF && ret < 0)
235  av_log(NULL, AV_LOG_WARNING, "Error while add the frame to buffer source(%s).\n",
236  av_err2str(ret));
237  }
238 }
239 
241 {
242  AVFrame *frame = ist->sub2video.frame;
243  int8_t *dst;
244  int dst_linesize;
245  int num_rects, i;
246  int64_t pts, end_pts;
247 
248  if (!frame)
249  return;
250  if (sub) {
251  pts = av_rescale_q(sub->pts + sub->start_display_time * 1000LL,
252  AV_TIME_BASE_Q, ist->st->time_base);
253  end_pts = av_rescale_q(sub->pts + sub->end_display_time * 1000LL,
254  AV_TIME_BASE_Q, ist->st->time_base);
255  num_rects = sub->num_rects;
256  } else {
257  pts = ist->sub2video.end_pts;
258  end_pts = INT64_MAX;
259  num_rects = 0;
260  }
261  if (sub2video_get_blank_frame(ist) < 0) {
263  "Impossible to get a blank canvas.\n");
264  return;
265  }
266  dst = frame->data [0];
267  dst_linesize = frame->linesize[0];
268  for (i = 0; i < num_rects; i++)
269  sub2video_copy_rect(dst, dst_linesize, frame->width, frame->height, sub->rects[i]);
270  sub2video_push_ref(ist, pts);
271  ist->sub2video.end_pts = end_pts;
272 }
273 
274 static void sub2video_heartbeat(InputStream *ist, int64_t pts)
275 {
276  InputFile *infile = input_files[ist->file_index];
277  int i, j, nb_reqs;
278  int64_t pts2;
279 
280  /* When a frame is read from a file, examine all sub2video streams in
281  the same file and send the sub2video frame again. Otherwise, decoded
282  video frames could be accumulating in the filter graph while a filter
283  (possibly overlay) is desperately waiting for a subtitle frame. */
284  for (i = 0; i < infile->nb_streams; i++) {
285  InputStream *ist2 = input_streams[infile->ist_index + i];
286  if (!ist2->sub2video.frame)
287  continue;
288  /* subtitles seem to be usually muxed ahead of other streams;
289  if not, subtracting a larger time here is necessary */
290  pts2 = av_rescale_q(pts, ist->st->time_base, ist2->st->time_base) - 1;
291  /* do not send the heartbeat frame if the subtitle is already ahead */
292  if (pts2 <= ist2->sub2video.last_pts)
293  continue;
294  if (pts2 >= ist2->sub2video.end_pts ||
295  (!ist2->sub2video.frame->data[0] && ist2->sub2video.end_pts < INT64_MAX))
296  sub2video_update(ist2, NULL);
297  for (j = 0, nb_reqs = 0; j < ist2->nb_filters; j++)
298  nb_reqs += av_buffersrc_get_nb_failed_requests(ist2->filters[j]->filter);
299  if (nb_reqs)
300  sub2video_push_ref(ist2, pts2);
301  }
302 }
303 
304 static void sub2video_flush(InputStream *ist)
305 {
306  int i;
307  int ret;
308 
309  if (ist->sub2video.end_pts < INT64_MAX)
310  sub2video_update(ist, NULL);
311  for (i = 0; i < ist->nb_filters; i++) {
312  ret = av_buffersrc_add_frame(ist->filters[i]->filter, NULL);
313  if (ret != AVERROR_EOF && ret < 0)
314  av_log(NULL, AV_LOG_WARNING, "Flush the frame error.\n");
315  }
316 }
317 
318 /* end of sub2video hack */
319 
320 static void term_exit_sigsafe(void)
321 {
322 #if HAVE_TERMIOS_H
323  if(restore_tty)
324  tcsetattr (0, TCSANOW, &oldtty);
325 #endif
326 }
327 
328 void term_exit(void)
329 {
330  av_log(NULL, AV_LOG_QUIET, "%s", "");
332 }
333 
334 static volatile int received_sigterm = 0;
335 static volatile int received_nb_signals = 0;
337 static volatile int ffmpeg_exited = 0;
338 static int main_return_code = 0;
339 
340 static void
342 {
343  int ret;
344  received_sigterm = sig;
347  if(received_nb_signals > 3) {
348  ret = write(2/*STDERR_FILENO*/, "Received > 3 system signals, hard exiting\n",
349  strlen("Received > 3 system signals, hard exiting\n"));
350  if (ret < 0) { /* Do nothing */ };
351  exit(123);
352  }
353 }
354 
355 #if HAVE_SETCONSOLECTRLHANDLER
356 static BOOL WINAPI CtrlHandler(DWORD fdwCtrlType)
357 {
358  av_log(NULL, AV_LOG_DEBUG, "\nReceived windows signal %ld\n", fdwCtrlType);
359 
360  switch (fdwCtrlType)
361  {
362  case CTRL_C_EVENT:
363  case CTRL_BREAK_EVENT:
364  sigterm_handler(SIGINT);
365  return TRUE;
366 
367  case CTRL_CLOSE_EVENT:
368  case CTRL_LOGOFF_EVENT:
369  case CTRL_SHUTDOWN_EVENT:
370  sigterm_handler(SIGTERM);
371  /* Basically, with these 3 events, when we return from this method the
372  process is hard terminated, so stall as long as we need to
373  to try and let the main thread(s) clean up and gracefully terminate
374  (we have at most 5 seconds, but should be done far before that). */
375  while (!ffmpeg_exited) {
376  Sleep(0);
377  }
378  return TRUE;
379 
380  default:
381  av_log(NULL, AV_LOG_ERROR, "Received unknown windows signal %ld\n", fdwCtrlType);
382  return FALSE;
383  }
384 }
385 #endif
386 
387 void term_init(void)
388 {
389 #if HAVE_TERMIOS_H
391  struct termios tty;
392  if (tcgetattr (0, &tty) == 0) {
393  oldtty = tty;
394  restore_tty = 1;
395 
396  tty.c_iflag &= ~(IGNBRK|BRKINT|PARMRK|ISTRIP
397  |INLCR|IGNCR|ICRNL|IXON);
398  tty.c_oflag |= OPOST;
399  tty.c_lflag &= ~(ECHO|ECHONL|ICANON|IEXTEN);
400  tty.c_cflag &= ~(CSIZE|PARENB);
401  tty.c_cflag |= CS8;
402  tty.c_cc[VMIN] = 1;
403  tty.c_cc[VTIME] = 0;
404 
405  tcsetattr (0, TCSANOW, &tty);
406  }
407  signal(SIGQUIT, sigterm_handler); /* Quit (POSIX). */
408  }
409 #endif
410 
411  signal(SIGINT , sigterm_handler); /* Interrupt (ANSI). */
412  signal(SIGTERM, sigterm_handler); /* Termination (ANSI). */
413 #ifdef SIGXCPU
414  signal(SIGXCPU, sigterm_handler);
415 #endif
416 #ifdef SIGPIPE
417  signal(SIGPIPE, SIG_IGN); /* Broken pipe (POSIX). */
418 #endif
419 #if HAVE_SETCONSOLECTRLHANDLER
420  SetConsoleCtrlHandler((PHANDLER_ROUTINE) CtrlHandler, TRUE);
421 #endif
422 }
423 
424 /* read a key without blocking */
425 static int read_key(void)
426 {
427  unsigned char ch;
428 #if HAVE_TERMIOS_H
429  int n = 1;
430  struct timeval tv;
431  fd_set rfds;
432 
433  FD_ZERO(&rfds);
434  FD_SET(0, &rfds);
435  tv.tv_sec = 0;
436  tv.tv_usec = 0;
437  n = select(1, &rfds, NULL, NULL, &tv);
438  if (n > 0) {
439  n = read(0, &ch, 1);
440  if (n == 1)
441  return ch;
442 
443  return n;
444  }
445 #elif HAVE_KBHIT
446 # if HAVE_PEEKNAMEDPIPE
447  static int is_pipe;
448  static HANDLE input_handle;
449  DWORD dw, nchars;
450  if(!input_handle){
451  input_handle = GetStdHandle(STD_INPUT_HANDLE);
452  is_pipe = !GetConsoleMode(input_handle, &dw);
453  }
454 
455  if (is_pipe) {
456  /* When running under a GUI, you will end here. */
457  if (!PeekNamedPipe(input_handle, NULL, 0, NULL, &nchars, NULL)) {
458  // input pipe may have been closed by the program that ran ffmpeg
459  return -1;
460  }
461  //Read it
462  if(nchars != 0) {
463  read(0, &ch, 1);
464  return ch;
465  }else{
466  return -1;
467  }
468  }
469 # endif
470  if(kbhit())
471  return(getch());
472 #endif
473  return -1;
474 }
475 
476 static int decode_interrupt_cb(void *ctx)
477 {
479 }
480 
482 
483 static void ffmpeg_cleanup(int ret)
484 {
485  int i, j;
486 
487  if (do_benchmark) {
488  int maxrss = getmaxrss() / 1024;
489  av_log(NULL, AV_LOG_INFO, "bench: maxrss=%ikB\n", maxrss);
490  }
491 
492  for (i = 0; i < nb_filtergraphs; i++) {
493  FilterGraph *fg = filtergraphs[i];
495  for (j = 0; j < fg->nb_inputs; j++) {
496  while (av_fifo_size(fg->inputs[j]->frame_queue)) {
497  AVFrame *frame;
498  av_fifo_generic_read(fg->inputs[j]->frame_queue, &frame,
499  sizeof(frame), NULL);
500  av_frame_free(&frame);
501  }
502  av_fifo_freep(&fg->inputs[j]->frame_queue);
503  if (fg->inputs[j]->ist->sub2video.sub_queue) {
504  while (av_fifo_size(fg->inputs[j]->ist->sub2video.sub_queue)) {
505  AVSubtitle sub;
507  &sub, sizeof(sub), NULL);
508  avsubtitle_free(&sub);
509  }
511  }
513  av_freep(&fg->inputs[j]->name);
514  av_freep(&fg->inputs[j]);
515  }
516  av_freep(&fg->inputs);
517  for (j = 0; j < fg->nb_outputs; j++) {
518  av_freep(&fg->outputs[j]->name);
519  av_freep(&fg->outputs[j]->formats);
520  av_freep(&fg->outputs[j]->channel_layouts);
521  av_freep(&fg->outputs[j]->sample_rates);
522  av_freep(&fg->outputs[j]);
523  }
524  av_freep(&fg->outputs);
525  av_freep(&fg->graph_desc);
526 
527  av_freep(&filtergraphs[i]);
528  }
529  av_freep(&filtergraphs);
530 
532 
533  /* close files */
534  for (i = 0; i < nb_output_files; i++) {
535  OutputFile *of = output_files[i];
537  if (!of)
538  continue;
539  s = of->ctx;
540  if (s && s->oformat && !(s->oformat->flags & AVFMT_NOFILE))
541  avio_closep(&s->pb);
543  av_dict_free(&of->opts);
544 
545  av_freep(&output_files[i]);
546  }
547  for (i = 0; i < nb_output_streams; i++) {
548  OutputStream *ost = output_streams[i];
549 
550  if (!ost)
551  continue;
552 
553  for (j = 0; j < ost->nb_bitstream_filters; j++)
554  av_bsf_free(&ost->bsf_ctx[j]);
555  av_freep(&ost->bsf_ctx);
556 
558  av_frame_free(&ost->last_frame);
559  av_dict_free(&ost->encoder_opts);
560 
561  av_freep(&ost->forced_keyframes);
563  av_freep(&ost->avfilter);
564  av_freep(&ost->logfile_prefix);
565 
567  ost->audio_channels_mapped = 0;
568 
569  av_dict_free(&ost->sws_dict);
570  av_dict_free(&ost->swr_opts);
571 
574 
575  if (ost->muxing_queue) {
576  while (av_fifo_size(ost->muxing_queue)) {
577  AVPacket pkt;
578  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
579  av_packet_unref(&pkt);
580  }
582  }
583 
584  av_freep(&output_streams[i]);
585  }
586 #if HAVE_THREADS
587  free_input_threads();
588 #endif
589  for (i = 0; i < nb_input_files; i++) {
590  avformat_close_input(&input_files[i]->ctx);
591  av_freep(&input_files[i]);
592  }
593  for (i = 0; i < nb_input_streams; i++) {
594  InputStream *ist = input_streams[i];
595 
598  av_dict_free(&ist->decoder_opts);
601  av_freep(&ist->filters);
602  av_freep(&ist->hwaccel_device);
603  av_freep(&ist->dts_buffer);
604 
606 
607  av_freep(&input_streams[i]);
608  }
609 
610  if (vstats_file) {
611  if (fclose(vstats_file))
613  "Error closing vstats file, loss of information possible: %s\n",
614  av_err2str(AVERROR(errno)));
615  }
617 
618  av_freep(&input_streams);
619  av_freep(&input_files);
620  av_freep(&output_streams);
621  av_freep(&output_files);
622 
623  uninit_opts();
624 
626 
627  if (received_sigterm) {
628  av_log(NULL, AV_LOG_INFO, "Exiting normally, received signal %d.\n",
629  (int) received_sigterm);
630  } else if (ret && atomic_load(&transcode_init_done)) {
631  av_log(NULL, AV_LOG_INFO, "Conversion failed!\n");
632  }
633  term_exit();
634  ffmpeg_exited = 1;
635 }
636 
638 {
639  AVDictionaryEntry *t = NULL;
640 
641  while ((t = av_dict_get(b, "", t, AV_DICT_IGNORE_SUFFIX))) {
643  }
644 }
645 
647 {
649  if ((t = av_dict_get(m, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
650  av_log(NULL, AV_LOG_FATAL, "Option %s not found.\n", t->key);
651  exit_program(1);
652  }
653 }
654 
655 static void abort_codec_experimental(AVCodec *c, int encoder)
656 {
657  exit_program(1);
658 }
659 
660 static void update_benchmark(const char *fmt, ...)
661 {
662  if (do_benchmark_all) {
664  va_list va;
665  char buf[1024];
666 
667  if (fmt) {
668  va_start(va, fmt);
669  vsnprintf(buf, sizeof(buf), fmt, va);
670  va_end(va);
672  "bench: %8" PRIu64 " user %8" PRIu64 " sys %8" PRIu64 " real %s \n",
673  t.user_usec - current_time.user_usec,
674  t.sys_usec - current_time.sys_usec,
675  t.real_usec - current_time.real_usec, buf);
676  }
677  current_time = t;
678  }
679 }
680 
682 {
683  int i;
684  for (i = 0; i < nb_output_streams; i++) {
685  OutputStream *ost2 = output_streams[i];
686  ost2->finished |= ost == ost2 ? this_stream : others;
687  }
688 }
689 
690 static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
691 {
692  AVFormatContext *s = of->ctx;
693  AVStream *st = ost->st;
694  int ret;
695 
696  /*
697  * Audio encoders may split the packets -- #frames in != #packets out.
698  * But there is no reordering, so we can limit the number of output packets
699  * by simply dropping them here.
700  * Counting encoded video frames needs to be done separately because of
701  * reordering, see do_video_out().
702  * Do not count the packet when unqueued because it has been counted when queued.
703  */
704  if (!(st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO && ost->encoding_needed) && !unqueue) {
705  if (ost->frame_number >= ost->max_frames) {
706  av_packet_unref(pkt);
707  return;
708  }
709  ost->frame_number++;
710  }
711 
712  if (!of->header_written) {
713  AVPacket tmp_pkt = {0};
714  /* the muxer is not initialized yet, buffer the packet */
715  if (!av_fifo_space(ost->muxing_queue)) {
716  int new_size = FFMIN(2 * av_fifo_size(ost->muxing_queue),
717  ost->max_muxing_queue_size);
718  if (new_size <= av_fifo_size(ost->muxing_queue)) {
720  "Too many packets buffered for output stream %d:%d.\n",
721  ost->file_index, ost->st->index);
722  exit_program(1);
723  }
724  ret = av_fifo_realloc2(ost->muxing_queue, new_size);
725  if (ret < 0)
726  exit_program(1);
727  }
728  ret = av_packet_make_refcounted(pkt);
729  if (ret < 0)
730  exit_program(1);
731  av_packet_move_ref(&tmp_pkt, pkt);
732  av_fifo_generic_write(ost->muxing_queue, &tmp_pkt, sizeof(tmp_pkt), NULL);
733  return;
734  }
735 
738  pkt->pts = pkt->dts = AV_NOPTS_VALUE;
739 
740  if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
741  int i;
743  NULL);
744  ost->quality = sd ? AV_RL32(sd) : -1;
745  ost->pict_type = sd ? sd[4] : AV_PICTURE_TYPE_NONE;
746 
747  for (i = 0; i<FF_ARRAY_ELEMS(ost->error); i++) {
748  if (sd && i < sd[5])
749  ost->error[i] = AV_RL64(sd + 8 + 8*i);
750  else
751  ost->error[i] = -1;
752  }
753 
754  if (ost->frame_rate.num && ost->is_cfr) {
755  if (pkt->duration > 0)
756  av_log(NULL, AV_LOG_WARNING, "Overriding packet duration by frame rate, this should not happen\n");
757  pkt->duration = av_rescale_q(1, av_inv_q(ost->frame_rate),
758  ost->mux_timebase);
759  }
760  }
761 
762  av_packet_rescale_ts(pkt, ost->mux_timebase, ost->st->time_base);
763 
764  if (!(s->oformat->flags & AVFMT_NOTIMESTAMPS)) {
765  if (pkt->dts != AV_NOPTS_VALUE &&
766  pkt->pts != AV_NOPTS_VALUE &&
767  pkt->dts > pkt->pts) {
768  av_log(s, AV_LOG_WARNING, "Invalid DTS: %"PRId64" PTS: %"PRId64" in output stream %d:%d, replacing by guess\n",
769  pkt->dts, pkt->pts,
770  ost->file_index, ost->st->index);
771  pkt->pts =
772  pkt->dts = pkt->pts + pkt->dts + ost->last_mux_dts + 1
773  - FFMIN3(pkt->pts, pkt->dts, ost->last_mux_dts + 1)
774  - FFMAX3(pkt->pts, pkt->dts, ost->last_mux_dts + 1);
775  }
777  pkt->dts != AV_NOPTS_VALUE &&
778  !(st->codecpar->codec_id == AV_CODEC_ID_VP9 && ost->stream_copy) &&
779  ost->last_mux_dts != AV_NOPTS_VALUE) {
780  int64_t max = ost->last_mux_dts + !(s->oformat->flags & AVFMT_TS_NONSTRICT);
781  if (pkt->dts < max) {
782  int loglevel = max - pkt->dts > 2 || st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO ? AV_LOG_WARNING : AV_LOG_DEBUG;
783  av_log(s, loglevel, "Non-monotonous DTS in output stream "
784  "%d:%d; previous: %"PRId64", current: %"PRId64"; ",
785  ost->file_index, ost->st->index, ost->last_mux_dts, pkt->dts);
786  if (exit_on_error) {
787  av_log(NULL, AV_LOG_FATAL, "aborting.\n");
788  exit_program(1);
789  }
790  av_log(s, loglevel, "changing to %"PRId64". This may result "
791  "in incorrect timestamps in the output file.\n",
792  max);
793  if (pkt->pts >= pkt->dts)
794  pkt->pts = FFMAX(pkt->pts, max);
795  pkt->dts = max;
796  }
797  }
798  }
799  ost->last_mux_dts = pkt->dts;
800 
801  ost->data_size += pkt->size;
802  ost->packets_written++;
803 
804  pkt->stream_index = ost->index;
805 
806  if (debug_ts) {
807  av_log(NULL, AV_LOG_INFO, "muxer <- type:%s "
808  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s size:%d\n",
810  av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, &ost->st->time_base),
811  av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, &ost->st->time_base),
812  pkt->size
813  );
814  }
815 
816  ret = av_interleaved_write_frame(s, pkt);
817  if (ret < 0) {
818  print_error("av_interleaved_write_frame()", ret);
819  main_return_code = 1;
821  }
822  av_packet_unref(pkt);
823 }
824 
826 {
827  OutputFile *of = output_files[ost->file_index];
828 
829  ost->finished |= ENCODER_FINISHED;
830  if (of->shortest) {
831  int64_t end = av_rescale_q(ost->sync_opts - ost->first_pts, ost->enc_ctx->time_base, AV_TIME_BASE_Q);
832  of->recording_time = FFMIN(of->recording_time, end);
833  }
834 }
835 
836 /*
837  * Send a single packet to the output, applying any bitstream filters
838  * associated with the output stream. This may result in any number
839  * of packets actually being written, depending on what bitstream
840  * filters are applied. The supplied packet is consumed and will be
841  * blank (as if newly-allocated) when this function returns.
842  *
843  * If eof is set, instead indicate EOF to all bitstream filters and
844  * therefore flush any delayed packets to the output. A blank packet
845  * must be supplied in this case.
846  */
848  OutputStream *ost, int eof)
849 {
850  int ret = 0;
851 
852  /* apply the output bitstream filters, if any */
853  if (ost->nb_bitstream_filters) {
854  int idx;
855 
856  ret = av_bsf_send_packet(ost->bsf_ctx[0], eof ? NULL : pkt);
857  if (ret < 0)
858  goto finish;
859 
860  eof = 0;
861  idx = 1;
862  while (idx) {
863  /* get a packet from the previous filter up the chain */
864  ret = av_bsf_receive_packet(ost->bsf_ctx[idx - 1], pkt);
865  if (ret == AVERROR(EAGAIN)) {
866  ret = 0;
867  idx--;
868  continue;
869  } else if (ret == AVERROR_EOF) {
870  eof = 1;
871  } else if (ret < 0)
872  goto finish;
873 
874  /* send it to the next filter down the chain or to the muxer */
875  if (idx < ost->nb_bitstream_filters) {
876  ret = av_bsf_send_packet(ost->bsf_ctx[idx], eof ? NULL : pkt);
877  if (ret < 0)
878  goto finish;
879  idx++;
880  eof = 0;
881  } else if (eof)
882  goto finish;
883  else
884  write_packet(of, pkt, ost, 0);
885  }
886  } else if (!eof)
887  write_packet(of, pkt, ost, 0);
888 
889 finish:
890  if (ret < 0 && ret != AVERROR_EOF) {
891  av_log(NULL, AV_LOG_ERROR, "Error applying bitstream filters to an output "
892  "packet for stream #%d:%d.\n", ost->file_index, ost->index);
893  if(exit_on_error)
894  exit_program(1);
895  }
896 }
897 
899 {
900  OutputFile *of = output_files[ost->file_index];
901 
902  if (of->recording_time != INT64_MAX &&
904  AV_TIME_BASE_Q) >= 0) {
905  close_output_stream(ost);
906  return 0;
907  }
908  return 1;
909 }
910 
912  AVFrame *frame)
913 {
914  AVCodecContext *enc = ost->enc_ctx;
915  AVPacket pkt;
916  int ret;
917 
918  av_init_packet(&pkt);
919  pkt.data = NULL;
920  pkt.size = 0;
921 
922  if (!check_recording_time(ost))
923  return;
924 
925  if (frame->pts == AV_NOPTS_VALUE || audio_sync_method < 0)
926  frame->pts = ost->sync_opts;
927  ost->sync_opts = frame->pts + frame->nb_samples;
928  ost->samples_encoded += frame->nb_samples;
929  ost->frames_encoded++;
930 
931  av_assert0(pkt.size || !pkt.data);
933  if (debug_ts) {
934  av_log(NULL, AV_LOG_INFO, "encoder <- type:audio "
935  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
936  av_ts2str(frame->pts), av_ts2timestr(frame->pts, &enc->time_base),
937  enc->time_base.num, enc->time_base.den);
938  }
939 
940  ret = avcodec_send_frame(enc, frame);
941  if (ret < 0)
942  goto error;
943 
944  while (1) {
945  ret = avcodec_receive_packet(enc, &pkt);
946  if (ret == AVERROR(EAGAIN))
947  break;
948  if (ret < 0)
949  goto error;
950 
951  update_benchmark("encode_audio %d.%d", ost->file_index, ost->index);
952 
953  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
954 
955  if (debug_ts) {
956  av_log(NULL, AV_LOG_INFO, "encoder -> type:audio "
957  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
958  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
959  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
960  }
961 
962  output_packet(of, &pkt, ost, 0);
963  }
964 
965  return;
966 error:
967  av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
968  exit_program(1);
969 }
970 
971 static void do_subtitle_out(OutputFile *of,
972  OutputStream *ost,
973  AVSubtitle *sub)
974 {
975  int subtitle_out_max_size = 1024 * 1024;
976  int subtitle_out_size, nb, i;
977  AVCodecContext *enc;
978  AVPacket pkt;
979  int64_t pts;
980 
981  if (sub->pts == AV_NOPTS_VALUE) {
982  av_log(NULL, AV_LOG_ERROR, "Subtitle packets must have a pts\n");
983  if (exit_on_error)
984  exit_program(1);
985  return;
986  }
987 
988  enc = ost->enc_ctx;
989 
990  if (!subtitle_out) {
991  subtitle_out = av_malloc(subtitle_out_max_size);
992  if (!subtitle_out) {
993  av_log(NULL, AV_LOG_FATAL, "Failed to allocate subtitle_out\n");
994  exit_program(1);
995  }
996  }
997 
998  /* Note: DVB subtitle need one packet to draw them and one other
999  packet to clear them */
1000  /* XXX: signal it in the codec context ? */
1001  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE)
1002  nb = 2;
1003  else
1004  nb = 1;
1005 
1006  /* shift timestamp to honor -ss and make check_recording_time() work with -t */
1007  pts = sub->pts;
1008  if (output_files[ost->file_index]->start_time != AV_NOPTS_VALUE)
1009  pts -= output_files[ost->file_index]->start_time;
1010  for (i = 0; i < nb; i++) {
1011  unsigned save_num_rects = sub->num_rects;
1012 
1013  ost->sync_opts = av_rescale_q(pts, AV_TIME_BASE_Q, enc->time_base);
1014  if (!check_recording_time(ost))
1015  return;
1016 
1017  sub->pts = pts;
1018  // start_display_time is required to be 0
1019  sub->pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, AV_TIME_BASE_Q);
1020  sub->end_display_time -= sub->start_display_time;
1021  sub->start_display_time = 0;
1022  if (i == 1)
1023  sub->num_rects = 0;
1024 
1025  ost->frames_encoded++;
1026 
1027  subtitle_out_size = avcodec_encode_subtitle(enc, subtitle_out,
1028  subtitle_out_max_size, sub);
1029  if (i == 1)
1030  sub->num_rects = save_num_rects;
1031  if (subtitle_out_size < 0) {
1032  av_log(NULL, AV_LOG_FATAL, "Subtitle encoding failed\n");
1033  exit_program(1);
1034  }
1035 
1036  av_init_packet(&pkt);
1037  pkt.data = subtitle_out;
1038  pkt.size = subtitle_out_size;
1039  pkt.pts = av_rescale_q(sub->pts, AV_TIME_BASE_Q, ost->mux_timebase);
1040  pkt.duration = av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1041  if (enc->codec_id == AV_CODEC_ID_DVB_SUBTITLE) {
1042  /* XXX: the pts correction is handled here. Maybe handling
1043  it in the codec would be better */
1044  if (i == 0)
1045  pkt.pts += av_rescale_q(sub->start_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1046  else
1047  pkt.pts += av_rescale_q(sub->end_display_time, (AVRational){ 1, 1000 }, ost->mux_timebase);
1048  }
1049  pkt.dts = pkt.pts;
1050  output_packet(of, &pkt, ost, 0);
1051  }
1052 }
1053 
1054 static void do_video_out(OutputFile *of,
1055  OutputStream *ost,
1056  AVFrame *next_picture,
1057  double sync_ipts)
1058 {
1059  int ret, format_video_sync;
1060  AVPacket pkt;
1061  AVCodecContext *enc = ost->enc_ctx;
1062  AVCodecParameters *mux_par = ost->st->codecpar;
1063  AVRational frame_rate;
1064  int nb_frames, nb0_frames, i;
1065  double delta, delta0;
1066  double duration = 0;
1067  int frame_size = 0;
1068  InputStream *ist = NULL;
1070 
1071  if (ost->source_index >= 0)
1072  ist = input_streams[ost->source_index];
1073 
1074  frame_rate = av_buffersink_get_frame_rate(filter);
1075  if (frame_rate.num > 0 && frame_rate.den > 0)
1076  duration = 1/(av_q2d(frame_rate) * av_q2d(enc->time_base));
1077 
1078  if(ist && ist->st->start_time != AV_NOPTS_VALUE && ist->st->first_dts != AV_NOPTS_VALUE && ost->frame_rate.num)
1079  duration = FFMIN(duration, 1/(av_q2d(ost->frame_rate) * av_q2d(enc->time_base)));
1080 
1081  if (!ost->filters_script &&
1082  !ost->filters &&
1083  next_picture &&
1084  ist &&
1085  lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base)) > 0) {
1086  duration = lrintf(next_picture->pkt_duration * av_q2d(ist->st->time_base) / av_q2d(enc->time_base));
1087  }
1088 
1089  if (!next_picture) {
1090  //end, flushing
1091  nb0_frames = nb_frames = mid_pred(ost->last_nb0_frames[0],
1092  ost->last_nb0_frames[1],
1093  ost->last_nb0_frames[2]);
1094  } else {
1095  delta0 = sync_ipts - ost->sync_opts; // delta0 is the "drift" between the input frame (next_picture) and where it would fall in the output.
1096  delta = delta0 + duration;
1097 
1098  /* by default, we output a single frame */
1099  nb0_frames = 0; // tracks the number of times the PREVIOUS frame should be duplicated, mostly for variable framerate (VFR)
1100  nb_frames = 1;
1101 
1102  format_video_sync = video_sync_method;
1103  if (format_video_sync == VSYNC_AUTO) {
1104  if(!strcmp(of->ctx->oformat->name, "avi")) {
1105  format_video_sync = VSYNC_VFR;
1106  } else
1107  format_video_sync = (of->ctx->oformat->flags & AVFMT_VARIABLE_FPS) ? ((of->ctx->oformat->flags & AVFMT_NOTIMESTAMPS) ? VSYNC_PASSTHROUGH : VSYNC_VFR) : VSYNC_CFR;
1108  if ( ist
1109  && format_video_sync == VSYNC_CFR
1110  && input_files[ist->file_index]->ctx->nb_streams == 1
1111  && input_files[ist->file_index]->input_ts_offset == 0) {
1112  format_video_sync = VSYNC_VSCFR;
1113  }
1114  if (format_video_sync == VSYNC_CFR && copy_ts) {
1115  format_video_sync = VSYNC_VSCFR;
1116  }
1117  }
1118  ost->is_cfr = (format_video_sync == VSYNC_CFR || format_video_sync == VSYNC_VSCFR);
1119 
1120  if (delta0 < 0 &&
1121  delta > 0 &&
1122  format_video_sync != VSYNC_PASSTHROUGH &&
1123  format_video_sync != VSYNC_DROP) {
1124  if (delta0 < -0.6) {
1125  av_log(NULL, AV_LOG_VERBOSE, "Past duration %f too large\n", -delta0);
1126  } else
1127  av_log(NULL, AV_LOG_DEBUG, "Clipping frame in rate conversion by %f\n", -delta0);
1128  sync_ipts = ost->sync_opts;
1129  duration += delta0;
1130  delta0 = 0;
1131  }
1132 
1133  switch (format_video_sync) {
1134  case VSYNC_VSCFR:
1135  if (ost->frame_number == 0 && delta0 >= 0.5) {
1136  av_log(NULL, AV_LOG_DEBUG, "Not duplicating %d initial frames\n", (int)lrintf(delta0));
1137  delta = duration;
1138  delta0 = 0;
1139  ost->sync_opts = lrint(sync_ipts);
1140  }
1141  case VSYNC_CFR:
1142  // FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
1143  if (frame_drop_threshold && delta < frame_drop_threshold && ost->frame_number) {
1144  nb_frames = 0;
1145  } else if (delta < -1.1)
1146  nb_frames = 0;
1147  else if (delta > 1.1) {
1148  nb_frames = lrintf(delta);
1149  if (delta0 > 1.1)
1150  nb0_frames = lrintf(delta0 - 0.6);
1151  }
1152  break;
1153  case VSYNC_VFR:
1154  if (delta <= -0.6)
1155  nb_frames = 0;
1156  else if (delta > 0.6)
1157  ost->sync_opts = lrint(sync_ipts);
1158  break;
1159  case VSYNC_DROP:
1160  case VSYNC_PASSTHROUGH:
1161  ost->sync_opts = lrint(sync_ipts);
1162  break;
1163  default:
1164  av_assert0(0);
1165  }
1166  }
1167 
1168  nb_frames = FFMIN(nb_frames, ost->max_frames - ost->frame_number);
1169  nb0_frames = FFMIN(nb0_frames, nb_frames);
1170 
1171  memmove(ost->last_nb0_frames + 1,
1172  ost->last_nb0_frames,
1173  sizeof(ost->last_nb0_frames[0]) * (FF_ARRAY_ELEMS(ost->last_nb0_frames) - 1));
1174  ost->last_nb0_frames[0] = nb0_frames;
1175 
1176  if (nb0_frames == 0 && ost->last_dropped) {
1177  nb_frames_drop++;
1179  "*** dropping frame %d from stream %d at ts %"PRId64"\n",
1180  ost->frame_number, ost->st->index, ost->last_frame->pts);
1181  }
1182  if (nb_frames > (nb0_frames && ost->last_dropped) + (nb_frames > nb0_frames)) {
1183  if (nb_frames > dts_error_threshold * 30) {
1184  av_log(NULL, AV_LOG_ERROR, "%d frame duplication too large, skipping\n", nb_frames - 1);
1185  nb_frames_drop++;
1186  return;
1187  }
1188  nb_frames_dup += nb_frames - (nb0_frames && ost->last_dropped) - (nb_frames > nb0_frames);
1189  av_log(NULL, AV_LOG_VERBOSE, "*** %d dup!\n", nb_frames - 1);
1190  if (nb_frames_dup > dup_warning) {
1191  av_log(NULL, AV_LOG_WARNING, "More than %d frames duplicated\n", dup_warning);
1192  dup_warning *= 10;
1193  }
1194  }
1195  ost->last_dropped = nb_frames == nb0_frames && next_picture;
1196 
1197  /* duplicates frame if needed */
1198  for (i = 0; i < nb_frames; i++) {
1199  AVFrame *in_picture;
1200  av_init_packet(&pkt);
1201  pkt.data = NULL;
1202  pkt.size = 0;
1203 
1204  if (i < nb0_frames && ost->last_frame) {
1205  in_picture = ost->last_frame;
1206  } else
1207  in_picture = next_picture;
1208 
1209  if (!in_picture)
1210  return;
1211 
1212  in_picture->pts = ost->sync_opts;
1213 
1214 #if 1
1215  if (!check_recording_time(ost))
1216 #else
1217  if (ost->frame_number >= ost->max_frames)
1218 #endif
1219  return;
1220 
1221  {
1222  int forced_keyframe = 0;
1223  double pts_time;
1224 
1226  ost->top_field_first >= 0)
1227  in_picture->top_field_first = !!ost->top_field_first;
1228 
1229  if (in_picture->interlaced_frame) {
1230  if (enc->codec->id == AV_CODEC_ID_MJPEG)
1231  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TT:AV_FIELD_BB;
1232  else
1233  mux_par->field_order = in_picture->top_field_first ? AV_FIELD_TB:AV_FIELD_BT;
1234  } else
1235  mux_par->field_order = AV_FIELD_PROGRESSIVE;
1236 
1237  in_picture->quality = enc->global_quality;
1238  in_picture->pict_type = 0;
1239 
1240  if (ost->forced_kf_ref_pts == AV_NOPTS_VALUE &&
1241  in_picture->pts != AV_NOPTS_VALUE)
1242  ost->forced_kf_ref_pts = in_picture->pts;
1243 
1244  pts_time = in_picture->pts != AV_NOPTS_VALUE ?
1245  (in_picture->pts - ost->forced_kf_ref_pts) * av_q2d(enc->time_base) : NAN;
1246  if (ost->forced_kf_index < ost->forced_kf_count &&
1247  in_picture->pts >= ost->forced_kf_pts[ost->forced_kf_index]) {
1248  ost->forced_kf_index++;
1249  forced_keyframe = 1;
1250  } else if (ost->forced_keyframes_pexpr) {
1251  double res;
1252  ost->forced_keyframes_expr_const_values[FKF_T] = pts_time;
1255  ff_dlog(NULL, "force_key_frame: n:%f n_forced:%f prev_forced_n:%f t:%f prev_forced_t:%f -> res:%f\n",
1261  res);
1262  if (res) {
1263  forced_keyframe = 1;
1269  }
1270 
1272  } else if ( ost->forced_keyframes
1273  && !strncmp(ost->forced_keyframes, "source", 6)
1274  && in_picture->key_frame==1) {
1275  forced_keyframe = 1;
1276  }
1277 
1278  if (forced_keyframe) {
1279  in_picture->pict_type = AV_PICTURE_TYPE_I;
1280  av_log(NULL, AV_LOG_DEBUG, "Forced keyframe at time %f\n", pts_time);
1281  }
1282 
1284  if (debug_ts) {
1285  av_log(NULL, AV_LOG_INFO, "encoder <- type:video "
1286  "frame_pts:%s frame_pts_time:%s time_base:%d/%d\n",
1287  av_ts2str(in_picture->pts), av_ts2timestr(in_picture->pts, &enc->time_base),
1288  enc->time_base.num, enc->time_base.den);
1289  }
1290 
1291  ost->frames_encoded++;
1292 
1293  ret = avcodec_send_frame(enc, in_picture);
1294  if (ret < 0)
1295  goto error;
1296 
1297  while (1) {
1298  ret = avcodec_receive_packet(enc, &pkt);
1299  update_benchmark("encode_video %d.%d", ost->file_index, ost->index);
1300  if (ret == AVERROR(EAGAIN))
1301  break;
1302  if (ret < 0)
1303  goto error;
1304 
1305  if (debug_ts) {
1306  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1307  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1308  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &enc->time_base),
1309  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &enc->time_base));
1310  }
1311 
1312  if (pkt.pts == AV_NOPTS_VALUE && !(enc->codec->capabilities & AV_CODEC_CAP_DELAY))
1313  pkt.pts = ost->sync_opts;
1314 
1315  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1316 
1317  if (debug_ts) {
1318  av_log(NULL, AV_LOG_INFO, "encoder -> type:video "
1319  "pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s\n",
1320  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ost->mux_timebase),
1321  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ost->mux_timebase));
1322  }
1323 
1324  frame_size = pkt.size;
1325  output_packet(of, &pkt, ost, 0);
1326 
1327  /* if two pass, output log */
1328  if (ost->logfile && enc->stats_out) {
1329  fprintf(ost->logfile, "%s", enc->stats_out);
1330  }
1331  }
1332  }
1333  ost->sync_opts++;
1334  /*
1335  * For video, number of frames in == number of packets out.
1336  * But there may be reordering, so we can't throw away frames on encoder
1337  * flush, we need to limit them here, before they go into encoder.
1338  */
1339  ost->frame_number++;
1340 
1341  if (vstats_filename && frame_size)
1342  do_video_stats(ost, frame_size);
1343  }
1344 
1345  if (!ost->last_frame)
1346  ost->last_frame = av_frame_alloc();
1347  av_frame_unref(ost->last_frame);
1348  if (next_picture && ost->last_frame)
1349  av_frame_ref(ost->last_frame, next_picture);
1350  else
1351  av_frame_free(&ost->last_frame);
1352 
1353  return;
1354 error:
1355  av_log(NULL, AV_LOG_FATAL, "Video encoding failed\n");
1356  exit_program(1);
1357 }
1358 
1359 static double psnr(double d)
1360 {
1361  return -10.0 * log10(d);
1362 }
1363 
1365 {
1366  AVCodecContext *enc;
1367  int frame_number;
1368  double ti1, bitrate, avg_bitrate;
1369 
1370  /* this is executed just the first time do_video_stats is called */
1371  if (!vstats_file) {
1372  vstats_file = fopen(vstats_filename, "w");
1373  if (!vstats_file) {
1374  perror("fopen");
1375  exit_program(1);
1376  }
1377  }
1378 
1379  enc = ost->enc_ctx;
1380  if (enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1381  frame_number = ost->st->nb_frames;
1382  if (vstats_version <= 1) {
1383  fprintf(vstats_file, "frame= %5d q= %2.1f ", frame_number,
1384  ost->quality / (float)FF_QP2LAMBDA);
1385  } else {
1386  fprintf(vstats_file, "out= %2d st= %2d frame= %5d q= %2.1f ", ost->file_index, ost->index, frame_number,
1387  ost->quality / (float)FF_QP2LAMBDA);
1388  }
1389 
1390  if (ost->error[0]>=0 && (enc->flags & AV_CODEC_FLAG_PSNR))
1391  fprintf(vstats_file, "PSNR= %6.2f ", psnr(ost->error[0] / (enc->width * enc->height * 255.0 * 255.0)));
1392 
1393  fprintf(vstats_file,"f_size= %6d ", frame_size);
1394  /* compute pts value */
1395  ti1 = av_stream_get_end_pts(ost->st) * av_q2d(ost->st->time_base);
1396  if (ti1 < 0.01)
1397  ti1 = 0.01;
1398 
1399  bitrate = (frame_size * 8) / av_q2d(enc->time_base) / 1000.0;
1400  avg_bitrate = (double)(ost->data_size * 8) / ti1 / 1000.0;
1401  fprintf(vstats_file, "s_size= %8.0fkB time= %0.3f br= %7.1fkbits/s avg_br= %7.1fkbits/s ",
1402  (double)ost->data_size / 1024, ti1, bitrate, avg_bitrate);
1403  fprintf(vstats_file, "type= %c\n", av_get_picture_type_char(ost->pict_type));
1404  }
1405 }
1406 
1407 static int init_output_stream(OutputStream *ost, char *error, int error_len);
1408 
1410 {
1411  OutputFile *of = output_files[ost->file_index];
1412  int i;
1413 
1415 
1416  if (of->shortest) {
1417  for (i = 0; i < of->ctx->nb_streams; i++)
1418  output_streams[of->ost_index + i]->finished = ENCODER_FINISHED | MUXER_FINISHED;
1419  }
1420 }
1421 
1422 /**
1423  * Get and encode new output from any of the filtergraphs, without causing
1424  * activity.
1425  *
1426  * @return 0 for success, <0 for severe errors
1427  */
1428 static int reap_filters(int flush)
1429 {
1430  AVFrame *filtered_frame = NULL;
1431  int i;
1432 
1433  /* Reap all buffers present in the buffer sinks */
1434  for (i = 0; i < nb_output_streams; i++) {
1435  OutputStream *ost = output_streams[i];
1436  OutputFile *of = output_files[ost->file_index];
1438  AVCodecContext *enc = ost->enc_ctx;
1439  int ret = 0;
1440 
1441  if (!ost->filter || !ost->filter->graph->graph)
1442  continue;
1443  filter = ost->filter->filter;
1444 
1445  if (!ost->initialized) {
1446  char error[1024] = "";
1447  ret = init_output_stream(ost, error, sizeof(error));
1448  if (ret < 0) {
1449  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1450  ost->file_index, ost->index, error);
1451  exit_program(1);
1452  }
1453  }
1454 
1455  if (!ost->filtered_frame && !(ost->filtered_frame = av_frame_alloc())) {
1456  return AVERROR(ENOMEM);
1457  }
1458  filtered_frame = ost->filtered_frame;
1459 
1460  while (1) {
1461  double float_pts = AV_NOPTS_VALUE; // this is identical to filtered_frame.pts but with higher precision
1462  ret = av_buffersink_get_frame_flags(filter, filtered_frame,
1464  if (ret < 0) {
1465  if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF) {
1467  "Error in av_buffersink_get_frame_flags(): %s\n", av_err2str(ret));
1468  } else if (flush && ret == AVERROR_EOF) {
1470  do_video_out(of, ost, NULL, AV_NOPTS_VALUE);
1471  }
1472  break;
1473  }
1474  if (ost->finished) {
1475  av_frame_unref(filtered_frame);
1476  continue;
1477  }
1478  if (filtered_frame->pts != AV_NOPTS_VALUE) {
1479  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
1480  AVRational filter_tb = av_buffersink_get_time_base(filter);
1481  AVRational tb = enc->time_base;
1482  int extra_bits = av_clip(29 - av_log2(tb.den), 0, 16);
1483 
1484  tb.den <<= extra_bits;
1485  float_pts =
1486  av_rescale_q(filtered_frame->pts, filter_tb, tb) -
1487  av_rescale_q(start_time, AV_TIME_BASE_Q, tb);
1488  float_pts /= 1 << extra_bits;
1489  // avoid exact midoints to reduce the chance of rounding differences, this can be removed in case the fps code is changed to work with integers
1490  float_pts += FFSIGN(float_pts) * 1.0 / (1<<17);
1491 
1492  filtered_frame->pts =
1493  av_rescale_q(filtered_frame->pts, filter_tb, enc->time_base) -
1494  av_rescale_q(start_time, AV_TIME_BASE_Q, enc->time_base);
1495  }
1496  //if (ost->source_index >= 0)
1497  // *filtered_frame= *input_streams[ost->source_index]->decoded_frame; //for me_threshold
1498 
1499  switch (av_buffersink_get_type(filter)) {
1500  case AVMEDIA_TYPE_VIDEO:
1501  if (!ost->frame_aspect_ratio.num)
1502  enc->sample_aspect_ratio = filtered_frame->sample_aspect_ratio;
1503 
1504  if (debug_ts) {
1505  av_log(NULL, AV_LOG_INFO, "filter -> pts:%s pts_time:%s exact:%f time_base:%d/%d\n",
1506  av_ts2str(filtered_frame->pts), av_ts2timestr(filtered_frame->pts, &enc->time_base),
1507  float_pts,
1508  enc->time_base.num, enc->time_base.den);
1509  }
1510 
1511  do_video_out(of, ost, filtered_frame, float_pts);
1512  break;
1513  case AVMEDIA_TYPE_AUDIO:
1514  if (!(enc->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE) &&
1515  enc->channels != filtered_frame->channels) {
1517  "Audio filter graph output is not normalized and encoder does not support parameter changes\n");
1518  break;
1519  }
1520  do_audio_out(of, ost, filtered_frame);
1521  break;
1522  default:
1523  // TODO support subtitle filters
1524  av_assert0(0);
1525  }
1526 
1527  av_frame_unref(filtered_frame);
1528  }
1529  }
1530 
1531  return 0;
1532 }
1533 
1534 static void print_final_stats(int64_t total_size)
1535 {
1536  uint64_t video_size = 0, audio_size = 0, extra_size = 0, other_size = 0;
1537  uint64_t subtitle_size = 0;
1538  uint64_t data_size = 0;
1539  float percent = -1.0;
1540  int i, j;
1541  int pass1_used = 1;
1542 
1543  for (i = 0; i < nb_output_streams; i++) {
1544  OutputStream *ost = output_streams[i];
1545  switch (ost->enc_ctx->codec_type) {
1546  case AVMEDIA_TYPE_VIDEO: video_size += ost->data_size; break;
1547  case AVMEDIA_TYPE_AUDIO: audio_size += ost->data_size; break;
1548  case AVMEDIA_TYPE_SUBTITLE: subtitle_size += ost->data_size; break;
1549  default: other_size += ost->data_size; break;
1550  }
1551  extra_size += ost->enc_ctx->extradata_size;
1552  data_size += ost->data_size;
1555  pass1_used = 0;
1556  }
1557 
1558  if (data_size && total_size>0 && total_size >= data_size)
1559  percent = 100.0 * (total_size - data_size) / data_size;
1560 
1561  av_log(NULL, AV_LOG_INFO, "video:%1.0fkB audio:%1.0fkB subtitle:%1.0fkB other streams:%1.0fkB global headers:%1.0fkB muxing overhead: ",
1562  video_size / 1024.0,
1563  audio_size / 1024.0,
1564  subtitle_size / 1024.0,
1565  other_size / 1024.0,
1566  extra_size / 1024.0);
1567  if (percent >= 0.0)
1568  av_log(NULL, AV_LOG_INFO, "%f%%", percent);
1569  else
1570  av_log(NULL, AV_LOG_INFO, "unknown");
1571  av_log(NULL, AV_LOG_INFO, "\n");
1572 
1573  /* print verbose per-stream stats */
1574  for (i = 0; i < nb_input_files; i++) {
1575  InputFile *f = input_files[i];
1576  uint64_t total_packets = 0, total_size = 0;
1577 
1578  av_log(NULL, AV_LOG_VERBOSE, "Input file #%d (%s):\n",
1579  i, f->ctx->url);
1580 
1581  for (j = 0; j < f->nb_streams; j++) {
1582  InputStream *ist = input_streams[f->ist_index + j];
1583  enum AVMediaType type = ist->dec_ctx->codec_type;
1584 
1585  total_size += ist->data_size;
1586  total_packets += ist->nb_packets;
1587 
1588  av_log(NULL, AV_LOG_VERBOSE, " Input stream #%d:%d (%s): ",
1589  i, j, media_type_string(type));
1590  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets read (%"PRIu64" bytes); ",
1591  ist->nb_packets, ist->data_size);
1592 
1593  if (ist->decoding_needed) {
1594  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames decoded",
1595  ist->frames_decoded);
1596  if (type == AVMEDIA_TYPE_AUDIO)
1597  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ist->samples_decoded);
1598  av_log(NULL, AV_LOG_VERBOSE, "; ");
1599  }
1600 
1601  av_log(NULL, AV_LOG_VERBOSE, "\n");
1602  }
1603 
1604  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) demuxed\n",
1605  total_packets, total_size);
1606  }
1607 
1608  for (i = 0; i < nb_output_files; i++) {
1609  OutputFile *of = output_files[i];
1610  uint64_t total_packets = 0, total_size = 0;
1611 
1612  av_log(NULL, AV_LOG_VERBOSE, "Output file #%d (%s):\n",
1613  i, of->ctx->url);
1614 
1615  for (j = 0; j < of->ctx->nb_streams; j++) {
1616  OutputStream *ost = output_streams[of->ost_index + j];
1617  enum AVMediaType type = ost->enc_ctx->codec_type;
1618 
1619  total_size += ost->data_size;
1620  total_packets += ost->packets_written;
1621 
1622  av_log(NULL, AV_LOG_VERBOSE, " Output stream #%d:%d (%s): ",
1623  i, j, media_type_string(type));
1624  if (ost->encoding_needed) {
1625  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" frames encoded",
1626  ost->frames_encoded);
1627  if (type == AVMEDIA_TYPE_AUDIO)
1628  av_log(NULL, AV_LOG_VERBOSE, " (%"PRIu64" samples)", ost->samples_encoded);
1629  av_log(NULL, AV_LOG_VERBOSE, "; ");
1630  }
1631 
1632  av_log(NULL, AV_LOG_VERBOSE, "%"PRIu64" packets muxed (%"PRIu64" bytes); ",
1633  ost->packets_written, ost->data_size);
1634 
1635  av_log(NULL, AV_LOG_VERBOSE, "\n");
1636  }
1637 
1638  av_log(NULL, AV_LOG_VERBOSE, " Total: %"PRIu64" packets (%"PRIu64" bytes) muxed\n",
1639  total_packets, total_size);
1640  }
1641  if(video_size + data_size + audio_size + subtitle_size + extra_size == 0){
1642  av_log(NULL, AV_LOG_WARNING, "Output file is empty, nothing was encoded ");
1643  if (pass1_used) {
1644  av_log(NULL, AV_LOG_WARNING, "\n");
1645  } else {
1646  av_log(NULL, AV_LOG_WARNING, "(check -ss / -t / -frames parameters if used)\n");
1647  }
1648  }
1649 }
1650 
1651 static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
1652 {
1653  AVBPrint buf, buf_script;
1654  OutputStream *ost;
1655  AVFormatContext *oc;
1656  int64_t total_size;
1657  AVCodecContext *enc;
1658  int frame_number, vid, i;
1659  double bitrate;
1660  double speed;
1661  int64_t pts = INT64_MIN + 1;
1662  static int64_t last_time = -1;
1663  static int qp_histogram[52];
1664  int hours, mins, secs, us;
1665  const char *hours_sign;
1666  int ret;
1667  float t;
1668 
1669  if (!print_stats && !is_last_report && !progress_avio)
1670  return;
1671 
1672  if (!is_last_report) {
1673  if (last_time == -1) {
1674  last_time = cur_time;
1675  return;
1676  }
1677  if ((cur_time - last_time) < 500000)
1678  return;
1679  last_time = cur_time;
1680  }
1681 
1682  t = (cur_time-timer_start) / 1000000.0;
1683 
1684 
1685  oc = output_files[0]->ctx;
1686 
1687  total_size = avio_size(oc->pb);
1688  if (total_size <= 0) // FIXME improve avio_size() so it works with non seekable output too
1689  total_size = avio_tell(oc->pb);
1690 
1691  vid = 0;
1693  av_bprint_init(&buf_script, 0, AV_BPRINT_SIZE_AUTOMATIC);
1694  for (i = 0; i < nb_output_streams; i++) {
1695  float q = -1;
1696  ost = output_streams[i];
1697  enc = ost->enc_ctx;
1698  if (!ost->stream_copy)
1699  q = ost->quality / (float) FF_QP2LAMBDA;
1700 
1701  if (vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1702  av_bprintf(&buf, "q=%2.1f ", q);
1703  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1704  ost->file_index, ost->index, q);
1705  }
1706  if (!vid && enc->codec_type == AVMEDIA_TYPE_VIDEO) {
1707  float fps;
1708 
1709  frame_number = ost->frame_number;
1710  fps = t > 1 ? frame_number / t : 0;
1711  av_bprintf(&buf, "frame=%5d fps=%3.*f q=%3.1f ",
1712  frame_number, fps < 9.95, fps, q);
1713  av_bprintf(&buf_script, "frame=%d\n", frame_number);
1714  av_bprintf(&buf_script, "fps=%.2f\n", fps);
1715  av_bprintf(&buf_script, "stream_%d_%d_q=%.1f\n",
1716  ost->file_index, ost->index, q);
1717  if (is_last_report)
1718  av_bprintf(&buf, "L");
1719  if (qp_hist) {
1720  int j;
1721  int qp = lrintf(q);
1722  if (qp >= 0 && qp < FF_ARRAY_ELEMS(qp_histogram))
1723  qp_histogram[qp]++;
1724  for (j = 0; j < 32; j++)
1725  av_bprintf(&buf, "%X", av_log2(qp_histogram[j] + 1));
1726  }
1727 
1728  if ((enc->flags & AV_CODEC_FLAG_PSNR) && (ost->pict_type != AV_PICTURE_TYPE_NONE || is_last_report)) {
1729  int j;
1730  double error, error_sum = 0;
1731  double scale, scale_sum = 0;
1732  double p;
1733  char type[3] = { 'Y','U','V' };
1734  av_bprintf(&buf, "PSNR=");
1735  for (j = 0; j < 3; j++) {
1736  if (is_last_report) {
1737  error = enc->error[j];
1738  scale = enc->width * enc->height * 255.0 * 255.0 * frame_number;
1739  } else {
1740  error = ost->error[j];
1741  scale = enc->width * enc->height * 255.0 * 255.0;
1742  }
1743  if (j)
1744  scale /= 4;
1745  error_sum += error;
1746  scale_sum += scale;
1747  p = psnr(error / scale);
1748  av_bprintf(&buf, "%c:%2.2f ", type[j], p);
1749  av_bprintf(&buf_script, "stream_%d_%d_psnr_%c=%2.2f\n",
1750  ost->file_index, ost->index, type[j] | 32, p);
1751  }
1752  p = psnr(error_sum / scale_sum);
1753  av_bprintf(&buf, "*:%2.2f ", psnr(error_sum / scale_sum));
1754  av_bprintf(&buf_script, "stream_%d_%d_psnr_all=%2.2f\n",
1755  ost->file_index, ost->index, p);
1756  }
1757  vid = 1;
1758  }
1759  /* compute min output value */
1761  pts = FFMAX(pts, av_rescale_q(av_stream_get_end_pts(ost->st),
1762  ost->st->time_base, AV_TIME_BASE_Q));
1763  if (is_last_report)
1764  nb_frames_drop += ost->last_dropped;
1765  }
1766 
1767  secs = FFABS(pts) / AV_TIME_BASE;
1768  us = FFABS(pts) % AV_TIME_BASE;
1769  mins = secs / 60;
1770  secs %= 60;
1771  hours = mins / 60;
1772  mins %= 60;
1773  hours_sign = (pts < 0) ? "-" : "";
1774 
1775  bitrate = pts && total_size >= 0 ? total_size * 8 / (pts / 1000.0) : -1;
1776  speed = t != 0.0 ? (double)pts / AV_TIME_BASE / t : -1;
1777 
1778  if (total_size < 0) av_bprintf(&buf, "size=N/A time=");
1779  else av_bprintf(&buf, "size=%8.0fkB time=", total_size / 1024.0);
1780  if (pts == AV_NOPTS_VALUE) {
1781  av_bprintf(&buf, "N/A ");
1782  } else {
1783  av_bprintf(&buf, "%s%02d:%02d:%02d.%02d ",
1784  hours_sign, hours, mins, secs, (100 * us) / AV_TIME_BASE);
1785  }
1786 
1787  if (bitrate < 0) {
1788  av_bprintf(&buf, "bitrate=N/A");
1789  av_bprintf(&buf_script, "bitrate=N/A\n");
1790  }else{
1791  av_bprintf(&buf, "bitrate=%6.1fkbits/s", bitrate);
1792  av_bprintf(&buf_script, "bitrate=%6.1fkbits/s\n", bitrate);
1793  }
1794 
1795  if (total_size < 0) av_bprintf(&buf_script, "total_size=N/A\n");
1796  else av_bprintf(&buf_script, "total_size=%"PRId64"\n", total_size);
1797  if (pts == AV_NOPTS_VALUE) {
1798  av_bprintf(&buf_script, "out_time_us=N/A\n");
1799  av_bprintf(&buf_script, "out_time_ms=N/A\n");
1800  av_bprintf(&buf_script, "out_time=N/A\n");
1801  } else {
1802  av_bprintf(&buf_script, "out_time_us=%"PRId64"\n", pts);
1803  av_bprintf(&buf_script, "out_time_ms=%"PRId64"\n", pts);
1804  av_bprintf(&buf_script, "out_time=%s%02d:%02d:%02d.%06d\n",
1805  hours_sign, hours, mins, secs, us);
1806  }
1807 
1809  av_bprintf(&buf, " dup=%d drop=%d", nb_frames_dup, nb_frames_drop);
1810  av_bprintf(&buf_script, "dup_frames=%d\n", nb_frames_dup);
1811  av_bprintf(&buf_script, "drop_frames=%d\n", nb_frames_drop);
1812 
1813  if (speed < 0) {
1814  av_bprintf(&buf, " speed=N/A");
1815  av_bprintf(&buf_script, "speed=N/A\n");
1816  } else {
1817  av_bprintf(&buf, " speed=%4.3gx", speed);
1818  av_bprintf(&buf_script, "speed=%4.3gx\n", speed);
1819  }
1820 
1821  if (print_stats || is_last_report) {
1822  const char end = is_last_report ? '\n' : '\r';
1823  if (print_stats==1 && AV_LOG_INFO > av_log_get_level()) {
1824  fprintf(stderr, "%s %c", buf.str, end);
1825  } else
1826  av_log(NULL, AV_LOG_INFO, "%s %c", buf.str, end);
1827 
1828  fflush(stderr);
1829  }
1830  av_bprint_finalize(&buf, NULL);
1831 
1832  if (progress_avio) {
1833  av_bprintf(&buf_script, "progress=%s\n",
1834  is_last_report ? "end" : "continue");
1835  avio_write(progress_avio, buf_script.str,
1836  FFMIN(buf_script.len, buf_script.size - 1));
1837  avio_flush(progress_avio);
1838  av_bprint_finalize(&buf_script, NULL);
1839  if (is_last_report) {
1840  if ((ret = avio_closep(&progress_avio)) < 0)
1842  "Error closing progress log, loss of information possible: %s\n", av_err2str(ret));
1843  }
1844  }
1845 
1846  if (is_last_report)
1847  print_final_stats(total_size);
1848 }
1849 
1851 {
1852  // We never got any input. Set a fake format, which will
1853  // come from libavformat.
1854  ifilter->format = par->format;
1855  ifilter->sample_rate = par->sample_rate;
1856  ifilter->channels = par->channels;
1857  ifilter->channel_layout = par->channel_layout;
1858  ifilter->width = par->width;
1859  ifilter->height = par->height;
1860  ifilter->sample_aspect_ratio = par->sample_aspect_ratio;
1861 }
1862 
1863 static void flush_encoders(void)
1864 {
1865  int i, ret;
1866 
1867  for (i = 0; i < nb_output_streams; i++) {
1868  OutputStream *ost = output_streams[i];
1869  AVCodecContext *enc = ost->enc_ctx;
1870  OutputFile *of = output_files[ost->file_index];
1871 
1872  if (!ost->encoding_needed)
1873  continue;
1874 
1875  // Try to enable encoding with no input frames.
1876  // Maybe we should just let encoding fail instead.
1877  if (!ost->initialized) {
1878  FilterGraph *fg = ost->filter->graph;
1879  char error[1024] = "";
1880 
1882  "Finishing stream %d:%d without any data written to it.\n",
1883  ost->file_index, ost->st->index);
1884 
1885  if (ost->filter && !fg->graph) {
1886  int x;
1887  for (x = 0; x < fg->nb_inputs; x++) {
1888  InputFilter *ifilter = fg->inputs[x];
1889  if (ifilter->format < 0)
1890  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
1891  }
1892 
1894  continue;
1895 
1896  ret = configure_filtergraph(fg);
1897  if (ret < 0) {
1898  av_log(NULL, AV_LOG_ERROR, "Error configuring filter graph\n");
1899  exit_program(1);
1900  }
1901 
1902  finish_output_stream(ost);
1903  }
1904 
1905  ret = init_output_stream(ost, error, sizeof(error));
1906  if (ret < 0) {
1907  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
1908  ost->file_index, ost->index, error);
1909  exit_program(1);
1910  }
1911  }
1912 
1913  if (enc->codec_type == AVMEDIA_TYPE_AUDIO && enc->frame_size <= 1)
1914  continue;
1915 
1917  continue;
1918 
1919  for (;;) {
1920  const char *desc = NULL;
1921  AVPacket pkt;
1922  int pkt_size;
1923 
1924  switch (enc->codec_type) {
1925  case AVMEDIA_TYPE_AUDIO:
1926  desc = "audio";
1927  break;
1928  case AVMEDIA_TYPE_VIDEO:
1929  desc = "video";
1930  break;
1931  default:
1932  av_assert0(0);
1933  }
1934 
1935  av_init_packet(&pkt);
1936  pkt.data = NULL;
1937  pkt.size = 0;
1938 
1940 
1941  while ((ret = avcodec_receive_packet(enc, &pkt)) == AVERROR(EAGAIN)) {
1942  ret = avcodec_send_frame(enc, NULL);
1943  if (ret < 0) {
1944  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1945  desc,
1946  av_err2str(ret));
1947  exit_program(1);
1948  }
1949  }
1950 
1951  update_benchmark("flush_%s %d.%d", desc, ost->file_index, ost->index);
1952  if (ret < 0 && ret != AVERROR_EOF) {
1953  av_log(NULL, AV_LOG_FATAL, "%s encoding failed: %s\n",
1954  desc,
1955  av_err2str(ret));
1956  exit_program(1);
1957  }
1958  if (ost->logfile && enc->stats_out) {
1959  fprintf(ost->logfile, "%s", enc->stats_out);
1960  }
1961  if (ret == AVERROR_EOF) {
1962  output_packet(of, &pkt, ost, 1);
1963  break;
1964  }
1965  if (ost->finished & MUXER_FINISHED) {
1966  av_packet_unref(&pkt);
1967  continue;
1968  }
1969  av_packet_rescale_ts(&pkt, enc->time_base, ost->mux_timebase);
1970  pkt_size = pkt.size;
1971  output_packet(of, &pkt, ost, 0);
1973  do_video_stats(ost, pkt_size);
1974  }
1975  }
1976  }
1977 }
1978 
1979 /*
1980  * Check whether a packet from ist should be written into ost at this time
1981  */
1983 {
1984  OutputFile *of = output_files[ost->file_index];
1985  int ist_index = input_files[ist->file_index]->ist_index + ist->st->index;
1986 
1987  if (ost->source_index != ist_index)
1988  return 0;
1989 
1990  if (ost->finished)
1991  return 0;
1992 
1993  if (of->start_time != AV_NOPTS_VALUE && ist->pts < of->start_time)
1994  return 0;
1995 
1996  return 1;
1997 }
1998 
1999 static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
2000 {
2001  OutputFile *of = output_files[ost->file_index];
2002  InputFile *f = input_files [ist->file_index];
2003  int64_t start_time = (of->start_time == AV_NOPTS_VALUE) ? 0 : of->start_time;
2004  int64_t ost_tb_start_time = av_rescale_q(start_time, AV_TIME_BASE_Q, ost->mux_timebase);
2005  AVPacket opkt = { 0 };
2006 
2007  av_init_packet(&opkt);
2008 
2009  // EOF: flush output bitstream filters.
2010  if (!pkt) {
2011  output_packet(of, &opkt, ost, 1);
2012  return;
2013  }
2014 
2015  if ((!ost->frame_number && !(pkt->flags & AV_PKT_FLAG_KEY)) &&
2017  return;
2018 
2019  if (!ost->frame_number && !ost->copy_prior_start) {
2020  int64_t comp_start = start_time;
2021  if (copy_ts && f->start_time != AV_NOPTS_VALUE)
2022  comp_start = FFMAX(start_time, f->start_time + f->ts_offset);
2023  if (pkt->pts == AV_NOPTS_VALUE ?
2024  ist->pts < comp_start :
2025  pkt->pts < av_rescale_q(comp_start, AV_TIME_BASE_Q, ist->st->time_base))
2026  return;
2027  }
2028 
2029  if (of->recording_time != INT64_MAX &&
2030  ist->pts >= of->recording_time + start_time) {
2031  close_output_stream(ost);
2032  return;
2033  }
2034 
2035  if (f->recording_time != INT64_MAX) {
2036  start_time = f->ctx->start_time;
2037  if (f->start_time != AV_NOPTS_VALUE && copy_ts)
2038  start_time += f->start_time;
2039  if (ist->pts >= f->recording_time + start_time) {
2040  close_output_stream(ost);
2041  return;
2042  }
2043  }
2044 
2045  /* force the input stream PTS */
2046  if (ost->enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO)
2047  ost->sync_opts++;
2048 
2049  if (pkt->pts != AV_NOPTS_VALUE)
2050  opkt.pts = av_rescale_q(pkt->pts, ist->st->time_base, ost->mux_timebase) - ost_tb_start_time;
2051  else
2052  opkt.pts = AV_NOPTS_VALUE;
2053 
2054  if (pkt->dts == AV_NOPTS_VALUE)
2055  opkt.dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ost->mux_timebase);
2056  else
2057  opkt.dts = av_rescale_q(pkt->dts, ist->st->time_base, ost->mux_timebase);
2058  opkt.dts -= ost_tb_start_time;
2059 
2060  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO && pkt->dts != AV_NOPTS_VALUE) {
2062  if(!duration)
2063  duration = ist->dec_ctx->frame_size;
2064  opkt.dts = opkt.pts = av_rescale_delta(ist->st->time_base, pkt->dts,
2065  (AVRational){1, ist->dec_ctx->sample_rate}, duration, &ist->filter_in_rescale_delta_last,
2066  ost->mux_timebase) - ost_tb_start_time;
2067  }
2068 
2069  opkt.duration = av_rescale_q(pkt->duration, ist->st->time_base, ost->mux_timebase);
2070 
2071  opkt.flags = pkt->flags;
2072 
2073  if (pkt->buf) {
2074  opkt.buf = av_buffer_ref(pkt->buf);
2075  if (!opkt.buf)
2076  exit_program(1);
2077  }
2078  opkt.data = pkt->data;
2079  opkt.size = pkt->size;
2080 
2081  av_copy_packet_side_data(&opkt, pkt);
2082 
2083  output_packet(of, &opkt, ost, 0);
2084 }
2085 
2087 {
2088  AVCodecContext *dec = ist->dec_ctx;
2089 
2090  if (!dec->channel_layout) {
2091  char layout_name[256];
2092 
2093  if (dec->channels > ist->guess_layout_max)
2094  return 0;
2096  if (!dec->channel_layout)
2097  return 0;
2098  av_get_channel_layout_string(layout_name, sizeof(layout_name),
2099  dec->channels, dec->channel_layout);
2100  av_log(NULL, AV_LOG_WARNING, "Guessed Channel Layout for Input Stream "
2101  "#%d.%d : %s\n", ist->file_index, ist->st->index, layout_name);
2102  }
2103  return 1;
2104 }
2105 
2106 static void check_decode_result(InputStream *ist, int *got_output, int ret)
2107 {
2108  if (*got_output || ret<0)
2109  decode_error_stat[ret<0] ++;
2110 
2111  if (ret < 0 && exit_on_error)
2112  exit_program(1);
2113 
2114  if (*got_output && ist) {
2117  "%s: corrupt decoded frame in stream %d\n", input_files[ist->file_index]->ctx->url, ist->st->index);
2118  if (exit_on_error)
2119  exit_program(1);
2120  }
2121  }
2122 }
2123 
2124 // Filters can be configured only if the formats of all inputs are known.
2126 {
2127  int i;
2128  for (i = 0; i < fg->nb_inputs; i++) {
2129  if (fg->inputs[i]->format < 0 && (fg->inputs[i]->type == AVMEDIA_TYPE_AUDIO ||
2130  fg->inputs[i]->type == AVMEDIA_TYPE_VIDEO))
2131  return 0;
2132  }
2133  return 1;
2134 }
2135 
2137 {
2138  FilterGraph *fg = ifilter->graph;
2139  int need_reinit, ret, i;
2140 
2141  /* determine if the parameters for this input changed */
2142  need_reinit = ifilter->format != frame->format;
2143 
2144  switch (ifilter->ist->st->codecpar->codec_type) {
2145  case AVMEDIA_TYPE_AUDIO:
2146  need_reinit |= ifilter->sample_rate != frame->sample_rate ||
2147  ifilter->channels != frame->channels ||
2148  ifilter->channel_layout != frame->channel_layout;
2149  break;
2150  case AVMEDIA_TYPE_VIDEO:
2151  need_reinit |= ifilter->width != frame->width ||
2152  ifilter->height != frame->height;
2153  break;
2154  }
2155 
2156  if (!ifilter->ist->reinit_filters && fg->graph)
2157  need_reinit = 0;
2158 
2159  if (!!ifilter->hw_frames_ctx != !!frame->hw_frames_ctx ||
2160  (ifilter->hw_frames_ctx && ifilter->hw_frames_ctx->data != frame->hw_frames_ctx->data))
2161  need_reinit = 1;
2162 
2163  if (need_reinit) {
2164  ret = ifilter_parameters_from_frame(ifilter, frame);
2165  if (ret < 0)
2166  return ret;
2167  }
2168 
2169  /* (re)init the graph if possible, otherwise buffer the frame and return */
2170  if (need_reinit || !fg->graph) {
2171  for (i = 0; i < fg->nb_inputs; i++) {
2172  if (!ifilter_has_all_input_formats(fg)) {
2173  AVFrame *tmp = av_frame_clone(frame);
2174  if (!tmp)
2175  return AVERROR(ENOMEM);
2176  av_frame_unref(frame);
2177 
2178  if (!av_fifo_space(ifilter->frame_queue)) {
2179  ret = av_fifo_realloc2(ifilter->frame_queue, 2 * av_fifo_size(ifilter->frame_queue));
2180  if (ret < 0) {
2181  av_frame_free(&tmp);
2182  return ret;
2183  }
2184  }
2185  av_fifo_generic_write(ifilter->frame_queue, &tmp, sizeof(tmp), NULL);
2186  return 0;
2187  }
2188  }
2189 
2190  ret = reap_filters(1);
2191  if (ret < 0 && ret != AVERROR_EOF) {
2192  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2193  return ret;
2194  }
2195 
2196  ret = configure_filtergraph(fg);
2197  if (ret < 0) {
2198  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
2199  return ret;
2200  }
2201  }
2202 
2204  if (ret < 0) {
2205  if (ret != AVERROR_EOF)
2206  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
2207  return ret;
2208  }
2209 
2210  return 0;
2211 }
2212 
2213 static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
2214 {
2215  int ret;
2216 
2217  ifilter->eof = 1;
2218 
2219  if (ifilter->filter) {
2220  ret = av_buffersrc_close(ifilter->filter, pts, AV_BUFFERSRC_FLAG_PUSH);
2221  if (ret < 0)
2222  return ret;
2223  } else {
2224  // the filtergraph was never configured
2225  if (ifilter->format < 0)
2226  ifilter_parameters_from_codecpar(ifilter, ifilter->ist->st->codecpar);
2227  if (ifilter->format < 0 && (ifilter->type == AVMEDIA_TYPE_AUDIO || ifilter->type == AVMEDIA_TYPE_VIDEO)) {
2228  av_log(NULL, AV_LOG_ERROR, "Cannot determine format of input stream %d:%d after EOF\n", ifilter->ist->file_index, ifilter->ist->st->index);
2229  return AVERROR_INVALIDDATA;
2230  }
2231  }
2232 
2233  return 0;
2234 }
2235 
2236 // This does not quite work like avcodec_decode_audio4/avcodec_decode_video2.
2237 // There is the following difference: if you got a frame, you must call
2238 // it again with pkt=NULL. pkt==NULL is treated differently from pkt->size==0
2239 // (pkt==NULL means get more output, pkt->size==0 is a flush/drain packet)
2240 static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
2241 {
2242  int ret;
2243 
2244  *got_frame = 0;
2245 
2246  if (pkt) {
2247  ret = avcodec_send_packet(avctx, pkt);
2248  // In particular, we don't expect AVERROR(EAGAIN), because we read all
2249  // decoded frames with avcodec_receive_frame() until done.
2250  if (ret < 0 && ret != AVERROR_EOF)
2251  return ret;
2252  }
2253 
2254  ret = avcodec_receive_frame(avctx, frame);
2255  if (ret < 0 && ret != AVERROR(EAGAIN))
2256  return ret;
2257  if (ret >= 0)
2258  *got_frame = 1;
2259 
2260  return 0;
2261 }
2262 
2263 static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
2264 {
2265  int i, ret;
2266  AVFrame *f;
2267 
2268  av_assert1(ist->nb_filters > 0); /* ensure ret is initialized */
2269  for (i = 0; i < ist->nb_filters; i++) {
2270  if (i < ist->nb_filters - 1) {
2271  f = ist->filter_frame;
2272  ret = av_frame_ref(f, decoded_frame);
2273  if (ret < 0)
2274  break;
2275  } else
2276  f = decoded_frame;
2277  ret = ifilter_send_frame(ist->filters[i], f);
2278  if (ret == AVERROR_EOF)
2279  ret = 0; /* ignore */
2280  if (ret < 0) {
2282  "Failed to inject frame into filter network: %s\n", av_err2str(ret));
2283  break;
2284  }
2285  }
2286  return ret;
2287 }
2288 
2289 static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output,
2290  int *decode_failed)
2291 {
2292  AVFrame *decoded_frame;
2293  AVCodecContext *avctx = ist->dec_ctx;
2294  int ret, err = 0;
2295  AVRational decoded_frame_tb;
2296 
2297  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2298  return AVERROR(ENOMEM);
2299  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2300  return AVERROR(ENOMEM);
2301  decoded_frame = ist->decoded_frame;
2302 
2304  ret = decode(avctx, decoded_frame, got_output, pkt);
2305  update_benchmark("decode_audio %d.%d", ist->file_index, ist->st->index);
2306  if (ret < 0)
2307  *decode_failed = 1;
2308 
2309  if (ret >= 0 && avctx->sample_rate <= 0) {
2310  av_log(avctx, AV_LOG_ERROR, "Sample rate %d invalid\n", avctx->sample_rate);
2311  ret = AVERROR_INVALIDDATA;
2312  }
2313 
2314  if (ret != AVERROR_EOF)
2315  check_decode_result(ist, got_output, ret);
2316 
2317  if (!*got_output || ret < 0)
2318  return ret;
2319 
2320  ist->samples_decoded += decoded_frame->nb_samples;
2321  ist->frames_decoded++;
2322 
2323 #if 1
2324  /* increment next_dts to use for the case where the input stream does not
2325  have timestamps or there are multiple frames in the packet */
2326  ist->next_pts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2327  avctx->sample_rate;
2328  ist->next_dts += ((int64_t)AV_TIME_BASE * decoded_frame->nb_samples) /
2329  avctx->sample_rate;
2330 #endif
2331 
2332  if (decoded_frame->pts != AV_NOPTS_VALUE) {
2333  decoded_frame_tb = ist->st->time_base;
2334  } else if (pkt && pkt->pts != AV_NOPTS_VALUE) {
2335  decoded_frame->pts = pkt->pts;
2336  decoded_frame_tb = ist->st->time_base;
2337  }else {
2338  decoded_frame->pts = ist->dts;
2339  decoded_frame_tb = AV_TIME_BASE_Q;
2340  }
2341  if (decoded_frame->pts != AV_NOPTS_VALUE)
2342  decoded_frame->pts = av_rescale_delta(decoded_frame_tb, decoded_frame->pts,
2343  (AVRational){1, avctx->sample_rate}, decoded_frame->nb_samples, &ist->filter_in_rescale_delta_last,
2344  (AVRational){1, avctx->sample_rate});
2345  ist->nb_samples = decoded_frame->nb_samples;
2346  err = send_frame_to_filters(ist, decoded_frame);
2347 
2349  av_frame_unref(decoded_frame);
2350  return err < 0 ? err : ret;
2351 }
2352 
2353 static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof,
2354  int *decode_failed)
2355 {
2356  AVFrame *decoded_frame;
2357  int i, ret = 0, err = 0;
2358  int64_t best_effort_timestamp;
2359  int64_t dts = AV_NOPTS_VALUE;
2360  AVPacket avpkt;
2361 
2362  // With fate-indeo3-2, we're getting 0-sized packets before EOF for some
2363  // reason. This seems like a semi-critical bug. Don't trigger EOF, and
2364  // skip the packet.
2365  if (!eof && pkt && pkt->size == 0)
2366  return 0;
2367 
2368  if (!ist->decoded_frame && !(ist->decoded_frame = av_frame_alloc()))
2369  return AVERROR(ENOMEM);
2370  if (!ist->filter_frame && !(ist->filter_frame = av_frame_alloc()))
2371  return AVERROR(ENOMEM);
2372  decoded_frame = ist->decoded_frame;
2373  if (ist->dts != AV_NOPTS_VALUE)
2374  dts = av_rescale_q(ist->dts, AV_TIME_BASE_Q, ist->st->time_base);
2375  if (pkt) {
2376  avpkt = *pkt;
2377  avpkt.dts = dts; // ffmpeg.c probably shouldn't do this
2378  }
2379 
2380  // The old code used to set dts on the drain packet, which does not work
2381  // with the new API anymore.
2382  if (eof) {
2383  void *new = av_realloc_array(ist->dts_buffer, ist->nb_dts_buffer + 1, sizeof(ist->dts_buffer[0]));
2384  if (!new)
2385  return AVERROR(ENOMEM);
2386  ist->dts_buffer = new;
2387  ist->dts_buffer[ist->nb_dts_buffer++] = dts;
2388  }
2389 
2391  ret = decode(ist->dec_ctx, decoded_frame, got_output, pkt ? &avpkt : NULL);
2392  update_benchmark("decode_video %d.%d", ist->file_index, ist->st->index);
2393  if (ret < 0)
2394  *decode_failed = 1;
2395 
2396  // The following line may be required in some cases where there is no parser
2397  // or the parser does not has_b_frames correctly
2398  if (ist->st->codecpar->video_delay < ist->dec_ctx->has_b_frames) {
2399  if (ist->dec_ctx->codec_id == AV_CODEC_ID_H264) {
2400  ist->st->codecpar->video_delay = ist->dec_ctx->has_b_frames;
2401  } else
2403  "video_delay is larger in decoder than demuxer %d > %d.\n"
2404  "If you want to help, upload a sample "
2405  "of this file to ftp://upload.ffmpeg.org/incoming/ "
2406  "and contact the ffmpeg-devel mailing list. (ffmpeg-devel@ffmpeg.org)\n",
2407  ist->dec_ctx->has_b_frames,
2408  ist->st->codecpar->video_delay);
2409  }
2410 
2411  if (ret != AVERROR_EOF)
2412  check_decode_result(ist, got_output, ret);
2413 
2414  if (*got_output && ret >= 0) {
2415  if (ist->dec_ctx->width != decoded_frame->width ||
2416  ist->dec_ctx->height != decoded_frame->height ||
2417  ist->dec_ctx->pix_fmt != decoded_frame->format) {
2418  av_log(NULL, AV_LOG_DEBUG, "Frame parameters mismatch context %d,%d,%d != %d,%d,%d\n",
2419  decoded_frame->width,
2420  decoded_frame->height,
2421  decoded_frame->format,
2422  ist->dec_ctx->width,
2423  ist->dec_ctx->height,
2424  ist->dec_ctx->pix_fmt);
2425  }
2426  }
2427 
2428  if (!*got_output || ret < 0)
2429  return ret;
2430 
2431  if(ist->top_field_first>=0)
2432  decoded_frame->top_field_first = ist->top_field_first;
2433 
2434  ist->frames_decoded++;
2435 
2436  if (ist->hwaccel_retrieve_data && decoded_frame->format == ist->hwaccel_pix_fmt) {
2437  err = ist->hwaccel_retrieve_data(ist->dec_ctx, decoded_frame);
2438  if (err < 0)
2439  goto fail;
2440  }
2441  ist->hwaccel_retrieved_pix_fmt = decoded_frame->format;
2442 
2443  best_effort_timestamp= decoded_frame->best_effort_timestamp;
2444  *duration_pts = decoded_frame->pkt_duration;
2445 
2446  if (ist->framerate.num)
2447  best_effort_timestamp = ist->cfr_next_pts++;
2448 
2449  if (eof && best_effort_timestamp == AV_NOPTS_VALUE && ist->nb_dts_buffer > 0) {
2450  best_effort_timestamp = ist->dts_buffer[0];
2451 
2452  for (i = 0; i < ist->nb_dts_buffer - 1; i++)
2453  ist->dts_buffer[i] = ist->dts_buffer[i + 1];
2454  ist->nb_dts_buffer--;
2455  }
2456 
2457  if(best_effort_timestamp != AV_NOPTS_VALUE) {
2458  int64_t ts = av_rescale_q(decoded_frame->pts = best_effort_timestamp, ist->st->time_base, AV_TIME_BASE_Q);
2459 
2460  if (ts != AV_NOPTS_VALUE)
2461  ist->next_pts = ist->pts = ts;
2462  }
2463 
2464  if (debug_ts) {
2465  av_log(NULL, AV_LOG_INFO, "decoder -> ist_index:%d type:video "
2466  "frame_pts:%s frame_pts_time:%s best_effort_ts:%"PRId64" best_effort_ts_time:%s keyframe:%d frame_type:%d time_base:%d/%d\n",
2467  ist->st->index, av_ts2str(decoded_frame->pts),
2468  av_ts2timestr(decoded_frame->pts, &ist->st->time_base),
2469  best_effort_timestamp,
2470  av_ts2timestr(best_effort_timestamp, &ist->st->time_base),
2471  decoded_frame->key_frame, decoded_frame->pict_type,
2472  ist->st->time_base.num, ist->st->time_base.den);
2473  }
2474 
2475  if (ist->st->sample_aspect_ratio.num)
2476  decoded_frame->sample_aspect_ratio = ist->st->sample_aspect_ratio;
2477 
2478  err = send_frame_to_filters(ist, decoded_frame);
2479 
2480 fail:
2482  av_frame_unref(decoded_frame);
2483  return err < 0 ? err : ret;
2484 }
2485 
2486 static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output,
2487  int *decode_failed)
2488 {
2489  AVSubtitle subtitle;
2490  int free_sub = 1;
2491  int i, ret = avcodec_decode_subtitle2(ist->dec_ctx,
2492  &subtitle, got_output, pkt);
2493 
2494  check_decode_result(NULL, got_output, ret);
2495 
2496  if (ret < 0 || !*got_output) {
2497  *decode_failed = 1;
2498  if (!pkt->size)
2499  sub2video_flush(ist);
2500  return ret;
2501  }
2502 
2503  if (ist->fix_sub_duration) {
2504  int end = 1;
2505  if (ist->prev_sub.got_output) {
2506  end = av_rescale(subtitle.pts - ist->prev_sub.subtitle.pts,
2507  1000, AV_TIME_BASE);
2508  if (end < ist->prev_sub.subtitle.end_display_time) {
2509  av_log(ist->dec_ctx, AV_LOG_DEBUG,
2510  "Subtitle duration reduced from %"PRId32" to %d%s\n",
2512  end <= 0 ? ", dropping it" : "");
2514  }
2515  }
2516  FFSWAP(int, *got_output, ist->prev_sub.got_output);
2517  FFSWAP(int, ret, ist->prev_sub.ret);
2518  FFSWAP(AVSubtitle, subtitle, ist->prev_sub.subtitle);
2519  if (end <= 0)
2520  goto out;
2521  }
2522 
2523  if (!*got_output)
2524  return ret;
2525 
2526  if (ist->sub2video.frame) {
2527  sub2video_update(ist, &subtitle);
2528  } else if (ist->nb_filters) {
2529  if (!ist->sub2video.sub_queue)
2530  ist->sub2video.sub_queue = av_fifo_alloc(8 * sizeof(AVSubtitle));
2531  if (!ist->sub2video.sub_queue)
2532  exit_program(1);
2533  if (!av_fifo_space(ist->sub2video.sub_queue)) {
2535  if (ret < 0)
2536  exit_program(1);
2537  }
2538  av_fifo_generic_write(ist->sub2video.sub_queue, &subtitle, sizeof(subtitle), NULL);
2539  free_sub = 0;
2540  }
2541 
2542  if (!subtitle.num_rects)
2543  goto out;
2544 
2545  ist->frames_decoded++;
2546 
2547  for (i = 0; i < nb_output_streams; i++) {
2548  OutputStream *ost = output_streams[i];
2549 
2550  if (!check_output_constraints(ist, ost) || !ost->encoding_needed
2551  || ost->enc->type != AVMEDIA_TYPE_SUBTITLE)
2552  continue;
2553 
2554  do_subtitle_out(output_files[ost->file_index], ost, &subtitle);
2555  }
2556 
2557 out:
2558  if (free_sub)
2559  avsubtitle_free(&subtitle);
2560  return ret;
2561 }
2562 
2564 {
2565  int i, ret;
2566  /* TODO keep pts also in stream time base to avoid converting back */
2567  int64_t pts = av_rescale_q_rnd(ist->pts, AV_TIME_BASE_Q, ist->st->time_base,
2569 
2570  for (i = 0; i < ist->nb_filters; i++) {
2571  ret = ifilter_send_eof(ist->filters[i], pts);
2572  if (ret < 0)
2573  return ret;
2574  }
2575  return 0;
2576 }
2577 
2578 /* pkt = NULL means EOF (needed to flush decoder buffers) */
2579 static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
2580 {
2581  int ret = 0, i;
2582  int repeating = 0;
2583  int eof_reached = 0;
2584 
2585  AVPacket avpkt;
2586  if (!ist->saw_first_ts) {
2587  ist->dts = ist->st->avg_frame_rate.num ? - ist->dec_ctx->has_b_frames * AV_TIME_BASE / av_q2d(ist->st->avg_frame_rate) : 0;
2588  ist->pts = 0;
2589  if (pkt && pkt->pts != AV_NOPTS_VALUE && !ist->decoding_needed) {
2590  ist->dts += av_rescale_q(pkt->pts, ist->st->time_base, AV_TIME_BASE_Q);
2591  ist->pts = ist->dts; //unused but better to set it to a value thats not totally wrong
2592  }
2593  ist->saw_first_ts = 1;
2594  }
2595 
2596  if (ist->next_dts == AV_NOPTS_VALUE)
2597  ist->next_dts = ist->dts;
2598  if (ist->next_pts == AV_NOPTS_VALUE)
2599  ist->next_pts = ist->pts;
2600 
2601  if (!pkt) {
2602  /* EOF handling */
2603  av_init_packet(&avpkt);
2604  avpkt.data = NULL;
2605  avpkt.size = 0;
2606  } else {
2607  avpkt = *pkt;
2608  }
2609 
2610  if (pkt && pkt->dts != AV_NOPTS_VALUE) {
2611  ist->next_dts = ist->dts = av_rescale_q(pkt->dts, ist->st->time_base, AV_TIME_BASE_Q);
2612  if (ist->dec_ctx->codec_type != AVMEDIA_TYPE_VIDEO || !ist->decoding_needed)
2613  ist->next_pts = ist->pts = ist->dts;
2614  }
2615 
2616  // while we have more to decode or while the decoder did output something on EOF
2617  while (ist->decoding_needed) {
2618  int64_t duration_dts = 0;
2619  int64_t duration_pts = 0;
2620  int got_output = 0;
2621  int decode_failed = 0;
2622 
2623  ist->pts = ist->next_pts;
2624  ist->dts = ist->next_dts;
2625 
2626  switch (ist->dec_ctx->codec_type) {
2627  case AVMEDIA_TYPE_AUDIO:
2628  ret = decode_audio (ist, repeating ? NULL : &avpkt, &got_output,
2629  &decode_failed);
2630  break;
2631  case AVMEDIA_TYPE_VIDEO:
2632  ret = decode_video (ist, repeating ? NULL : &avpkt, &got_output, &duration_pts, !pkt,
2633  &decode_failed);
2634  if (!repeating || !pkt || got_output) {
2635  if (pkt && pkt->duration) {
2636  duration_dts = av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2637  } else if(ist->dec_ctx->framerate.num != 0 && ist->dec_ctx->framerate.den != 0) {
2639  duration_dts = ((int64_t)AV_TIME_BASE *
2640  ist->dec_ctx->framerate.den * ticks) /
2642  }
2643 
2644  if(ist->dts != AV_NOPTS_VALUE && duration_dts) {
2645  ist->next_dts += duration_dts;
2646  }else
2647  ist->next_dts = AV_NOPTS_VALUE;
2648  }
2649 
2650  if (got_output) {
2651  if (duration_pts > 0) {
2652  ist->next_pts += av_rescale_q(duration_pts, ist->st->time_base, AV_TIME_BASE_Q);
2653  } else {
2654  ist->next_pts += duration_dts;
2655  }
2656  }
2657  break;
2658  case AVMEDIA_TYPE_SUBTITLE:
2659  if (repeating)
2660  break;
2661  ret = transcode_subtitles(ist, &avpkt, &got_output, &decode_failed);
2662  if (!pkt && ret >= 0)
2663  ret = AVERROR_EOF;
2664  break;
2665  default:
2666  return -1;
2667  }
2668 
2669  if (ret == AVERROR_EOF) {
2670  eof_reached = 1;
2671  break;
2672  }
2673 
2674  if (ret < 0) {
2675  if (decode_failed) {
2676  av_log(NULL, AV_LOG_ERROR, "Error while decoding stream #%d:%d: %s\n",
2677  ist->file_index, ist->st->index, av_err2str(ret));
2678  } else {
2679  av_log(NULL, AV_LOG_FATAL, "Error while processing the decoded "
2680  "data for stream #%d:%d\n", ist->file_index, ist->st->index);
2681  }
2682  if (!decode_failed || exit_on_error)
2683  exit_program(1);
2684  break;
2685  }
2686 
2687  if (got_output)
2688  ist->got_output = 1;
2689 
2690  if (!got_output)
2691  break;
2692 
2693  // During draining, we might get multiple output frames in this loop.
2694  // ffmpeg.c does not drain the filter chain on configuration changes,
2695  // which means if we send multiple frames at once to the filters, and
2696  // one of those frames changes configuration, the buffered frames will
2697  // be lost. This can upset certain FATE tests.
2698  // Decode only 1 frame per call on EOF to appease these FATE tests.
2699  // The ideal solution would be to rewrite decoding to use the new
2700  // decoding API in a better way.
2701  if (!pkt)
2702  break;
2703 
2704  repeating = 1;
2705  }
2706 
2707  /* after flushing, send an EOF on all the filter inputs attached to the stream */
2708  /* except when looping we need to flush but not to send an EOF */
2709  if (!pkt && ist->decoding_needed && eof_reached && !no_eof) {
2710  int ret = send_filter_eof(ist);
2711  if (ret < 0) {
2712  av_log(NULL, AV_LOG_FATAL, "Error marking filters as finished\n");
2713  exit_program(1);
2714  }
2715  }
2716 
2717  /* handle stream copy */
2718  if (!ist->decoding_needed && pkt) {
2719  ist->dts = ist->next_dts;
2720  switch (ist->dec_ctx->codec_type) {
2721  case AVMEDIA_TYPE_AUDIO:
2722  av_assert1(pkt->duration >= 0);
2723  if (ist->dec_ctx->sample_rate) {
2724  ist->next_dts += ((int64_t)AV_TIME_BASE * ist->dec_ctx->frame_size) /
2725  ist->dec_ctx->sample_rate;
2726  } else {
2727  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2728  }
2729  break;
2730  case AVMEDIA_TYPE_VIDEO:
2731  if (ist->framerate.num) {
2732  // TODO: Remove work-around for c99-to-c89 issue 7
2733  AVRational time_base_q = AV_TIME_BASE_Q;
2734  int64_t next_dts = av_rescale_q(ist->next_dts, time_base_q, av_inv_q(ist->framerate));
2735  ist->next_dts = av_rescale_q(next_dts + 1, av_inv_q(ist->framerate), time_base_q);
2736  } else if (pkt->duration) {
2737  ist->next_dts += av_rescale_q(pkt->duration, ist->st->time_base, AV_TIME_BASE_Q);
2738  } else if(ist->dec_ctx->framerate.num != 0) {
2739  int ticks= av_stream_get_parser(ist->st) ? av_stream_get_parser(ist->st)->repeat_pict + 1 : ist->dec_ctx->ticks_per_frame;
2740  ist->next_dts += ((int64_t)AV_TIME_BASE *
2741  ist->dec_ctx->framerate.den * ticks) /
2743  }
2744  break;
2745  }
2746  ist->pts = ist->dts;
2747  ist->next_pts = ist->next_dts;
2748  }
2749  for (i = 0; i < nb_output_streams; i++) {
2750  OutputStream *ost = output_streams[i];
2751 
2752  if (!check_output_constraints(ist, ost) || ost->encoding_needed)
2753  continue;
2754 
2755  do_streamcopy(ist, ost, pkt);
2756  }
2757 
2758  return !eof_reached;
2759 }
2760 
2761 static void print_sdp(void)
2762 {
2763  char sdp[16384];
2764  int i;
2765  int j;
2766  AVIOContext *sdp_pb;
2767  AVFormatContext **avc;
2768 
2769  for (i = 0; i < nb_output_files; i++) {
2770  if (!output_files[i]->header_written)
2771  return;
2772  }
2773 
2774  avc = av_malloc_array(nb_output_files, sizeof(*avc));
2775  if (!avc)
2776  exit_program(1);
2777  for (i = 0, j = 0; i < nb_output_files; i++) {
2778  if (!strcmp(output_files[i]->ctx->oformat->name, "rtp")) {
2779  avc[j] = output_files[i]->ctx;
2780  j++;
2781  }
2782  }
2783 
2784  if (!j)
2785  goto fail;
2786 
2787  av_sdp_create(avc, j, sdp, sizeof(sdp));
2788 
2789  if (!sdp_filename) {
2790  printf("SDP:\n%s\n", sdp);
2791  fflush(stdout);
2792  } else {
2793  if (avio_open2(&sdp_pb, sdp_filename, AVIO_FLAG_WRITE, &int_cb, NULL) < 0) {
2794  av_log(NULL, AV_LOG_ERROR, "Failed to open sdp file '%s'\n", sdp_filename);
2795  } else {
2796  avio_printf(sdp_pb, "SDP:\n%s", sdp);
2797  avio_closep(&sdp_pb);
2799  }
2800  }
2801 
2802 fail:
2803  av_freep(&avc);
2804 }
2805 
2807 {
2808  InputStream *ist = s->opaque;
2809  const enum AVPixelFormat *p;
2810  int ret;
2811 
2812  for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
2814  const AVCodecHWConfig *config = NULL;
2815  int i;
2816 
2817  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
2818  break;
2819 
2820  if (ist->hwaccel_id == HWACCEL_GENERIC ||
2821  ist->hwaccel_id == HWACCEL_AUTO) {
2822  for (i = 0;; i++) {
2823  config = avcodec_get_hw_config(s->codec, i);
2824  if (!config)
2825  break;
2826  if (!(config->methods &
2828  continue;
2829  if (config->pix_fmt == *p)
2830  break;
2831  }
2832  }
2833  if (config) {
2834  if (config->device_type != ist->hwaccel_device_type) {
2835  // Different hwaccel offered, ignore.
2836  continue;
2837  }
2838 
2839  ret = hwaccel_decode_init(s);
2840  if (ret < 0) {
2841  if (ist->hwaccel_id == HWACCEL_GENERIC) {
2843  "%s hwaccel requested for input stream #%d:%d, "
2844  "but cannot be initialized.\n",
2846  ist->file_index, ist->st->index);
2847  return AV_PIX_FMT_NONE;
2848  }
2849  continue;
2850  }
2851  } else {
2852  const HWAccel *hwaccel = NULL;
2853  int i;
2854  for (i = 0; hwaccels[i].name; i++) {
2855  if (hwaccels[i].pix_fmt == *p) {
2856  hwaccel = &hwaccels[i];
2857  break;
2858  }
2859  }
2860  if (!hwaccel) {
2861  // No hwaccel supporting this pixfmt.
2862  continue;
2863  }
2864  if (hwaccel->id != ist->hwaccel_id) {
2865  // Does not match requested hwaccel.
2866  continue;
2867  }
2868 
2869  ret = hwaccel->init(s);
2870  if (ret < 0) {
2872  "%s hwaccel requested for input stream #%d:%d, "
2873  "but cannot be initialized.\n", hwaccel->name,
2874  ist->file_index, ist->st->index);
2875  return AV_PIX_FMT_NONE;
2876  }
2877  }
2878 
2879  if (ist->hw_frames_ctx) {
2881  if (!s->hw_frames_ctx)
2882  return AV_PIX_FMT_NONE;
2883  }
2884 
2885  ist->hwaccel_pix_fmt = *p;
2886  break;
2887  }
2888 
2889  return *p;
2890 }
2891 
2893 {
2894  InputStream *ist = s->opaque;
2895 
2896  if (ist->hwaccel_get_buffer && frame->format == ist->hwaccel_pix_fmt)
2897  return ist->hwaccel_get_buffer(s, frame, flags);
2898 
2899  return avcodec_default_get_buffer2(s, frame, flags);
2900 }
2901 
2902 static int init_input_stream(int ist_index, char *error, int error_len)
2903 {
2904  int ret;
2905  InputStream *ist = input_streams[ist_index];
2906 
2907  if (ist->decoding_needed) {
2908  AVCodec *codec = ist->dec;
2909  if (!codec) {
2910  snprintf(error, error_len, "Decoder (codec %s) not found for input stream #%d:%d",
2911  avcodec_get_name(ist->dec_ctx->codec_id), ist->file_index, ist->st->index);
2912  return AVERROR(EINVAL);
2913  }
2914 
2915  ist->dec_ctx->opaque = ist;
2916  ist->dec_ctx->get_format = get_format;
2917  ist->dec_ctx->get_buffer2 = get_buffer;
2918  ist->dec_ctx->thread_safe_callbacks = 1;
2919 
2920  av_opt_set_int(ist->dec_ctx, "refcounted_frames", 1, 0);
2921  if (ist->dec_ctx->codec_id == AV_CODEC_ID_DVB_SUBTITLE &&
2922  (ist->decoding_needed & DECODING_FOR_OST)) {
2923  av_dict_set(&ist->decoder_opts, "compute_edt", "1", AV_DICT_DONT_OVERWRITE);
2925  av_log(NULL, AV_LOG_WARNING, "Warning using DVB subtitles for filtering and output at the same time is not fully supported, also see -compute_edt [0|1]\n");
2926  }
2927 
2928  av_dict_set(&ist->decoder_opts, "sub_text_format", "ass", AV_DICT_DONT_OVERWRITE);
2929 
2930  /* Useful for subtitles retiming by lavf (FIXME), skipping samples in
2931  * audio, and video decoders such as cuvid or mediacodec */
2932  ist->dec_ctx->pkt_timebase = ist->st->time_base;
2933 
2934  if (!av_dict_get(ist->decoder_opts, "threads", NULL, 0))
2935  av_dict_set(&ist->decoder_opts, "threads", "auto", 0);
2936  /* Attached pics are sparse, therefore we would not want to delay their decoding till EOF. */
2938  av_dict_set(&ist->decoder_opts, "threads", "1", 0);
2939 
2940  ret = hw_device_setup_for_decode(ist);
2941  if (ret < 0) {
2942  snprintf(error, error_len, "Device setup failed for "
2943  "decoder on input stream #%d:%d : %s",
2944  ist->file_index, ist->st->index, av_err2str(ret));
2945  return ret;
2946  }
2947 
2948  if ((ret = avcodec_open2(ist->dec_ctx, codec, &ist->decoder_opts)) < 0) {
2949  if (ret == AVERROR_EXPERIMENTAL)
2950  abort_codec_experimental(codec, 0);
2951 
2952  snprintf(error, error_len,
2953  "Error while opening decoder for input stream "
2954  "#%d:%d : %s",
2955  ist->file_index, ist->st->index, av_err2str(ret));
2956  return ret;
2957  }
2959  }
2960 
2961  ist->next_pts = AV_NOPTS_VALUE;
2962  ist->next_dts = AV_NOPTS_VALUE;
2963 
2964  return 0;
2965 }
2966 
2968 {
2969  if (ost->source_index >= 0)
2970  return input_streams[ost->source_index];
2971  return NULL;
2972 }
2973 
2974 static int compare_int64(const void *a, const void *b)
2975 {
2976  return FFDIFFSIGN(*(const int64_t *)a, *(const int64_t *)b);
2977 }
2978 
2979 /* open the muxer when all the streams are initialized */
2980 static int check_init_output_file(OutputFile *of, int file_index)
2981 {
2982  int ret, i;
2983 
2984  for (i = 0; i < of->ctx->nb_streams; i++) {
2985  OutputStream *ost = output_streams[of->ost_index + i];
2986  if (!ost->initialized)
2987  return 0;
2988  }
2989 
2990  of->ctx->interrupt_callback = int_cb;
2991 
2992  ret = avformat_write_header(of->ctx, &of->opts);
2993  if (ret < 0) {
2995  "Could not write header for output file #%d "
2996  "(incorrect codec parameters ?): %s\n",
2997  file_index, av_err2str(ret));
2998  return ret;
2999  }
3000  //assert_avoptions(of->opts);
3001  of->header_written = 1;
3002 
3003  av_dump_format(of->ctx, file_index, of->ctx->url, 1);
3004 
3005  if (sdp_filename || want_sdp)
3006  print_sdp();
3007 
3008  /* flush the muxing queues */
3009  for (i = 0; i < of->ctx->nb_streams; i++) {
3010  OutputStream *ost = output_streams[of->ost_index + i];
3011 
3012  /* try to improve muxing time_base (only possible if nothing has been written yet) */
3013  if (!av_fifo_size(ost->muxing_queue))
3014  ost->mux_timebase = ost->st->time_base;
3015 
3016  while (av_fifo_size(ost->muxing_queue)) {
3017  AVPacket pkt;
3018  av_fifo_generic_read(ost->muxing_queue, &pkt, sizeof(pkt), NULL);
3019  write_packet(of, &pkt, ost, 1);
3020  }
3021  }
3022 
3023  return 0;
3024 }
3025 
3027 {
3028  AVBSFContext *ctx;
3029  int i, ret;
3030 
3031  if (!ost->nb_bitstream_filters)
3032  return 0;
3033 
3034  for (i = 0; i < ost->nb_bitstream_filters; i++) {
3035  ctx = ost->bsf_ctx[i];
3036 
3037  ret = avcodec_parameters_copy(ctx->par_in,
3038  i ? ost->bsf_ctx[i - 1]->par_out : ost->st->codecpar);
3039  if (ret < 0)
3040  return ret;
3041 
3042  ctx->time_base_in = i ? ost->bsf_ctx[i - 1]->time_base_out : ost->st->time_base;
3043 
3044  ret = av_bsf_init(ctx);
3045  if (ret < 0) {
3046  av_log(NULL, AV_LOG_ERROR, "Error initializing bitstream filter: %s\n",
3047  ost->bsf_ctx[i]->filter->name);
3048  return ret;
3049  }
3050  }
3051 
3052  ctx = ost->bsf_ctx[ost->nb_bitstream_filters - 1];
3053  ret = avcodec_parameters_copy(ost->st->codecpar, ctx->par_out);
3054  if (ret < 0)
3055  return ret;
3056 
3057  ost->st->time_base = ctx->time_base_out;
3058 
3059  return 0;
3060 }
3061 
3063 {
3064  OutputFile *of = output_files[ost->file_index];
3065  InputStream *ist = get_input_stream(ost);
3066  AVCodecParameters *par_dst = ost->st->codecpar;
3067  AVCodecParameters *par_src = ost->ref_par;
3068  AVRational sar;
3069  int i, ret;
3070  uint32_t codec_tag = par_dst->codec_tag;
3071 
3072  av_assert0(ist && !ost->filter);
3073 
3074  ret = avcodec_parameters_to_context(ost->enc_ctx, ist->st->codecpar);
3075  if (ret >= 0)
3076  ret = av_opt_set_dict(ost->enc_ctx, &ost->encoder_opts);
3077  if (ret < 0) {
3079  "Error setting up codec context options.\n");
3080  return ret;
3081  }
3082 
3083  ret = avcodec_parameters_from_context(par_src, ost->enc_ctx);
3084  if (ret < 0) {
3086  "Error getting reference codec parameters.\n");
3087  return ret;
3088  }
3089 
3090  if (!codec_tag) {
3091  unsigned int codec_tag_tmp;
3092  if (!of->ctx->oformat->codec_tag ||
3093  av_codec_get_id (of->ctx->oformat->codec_tag, par_src->codec_tag) == par_src->codec_id ||
3094  !av_codec_get_tag2(of->ctx->oformat->codec_tag, par_src->codec_id, &codec_tag_tmp))
3095  codec_tag = par_src->codec_tag;
3096  }
3097 
3098  ret = avcodec_parameters_copy(par_dst, par_src);
3099  if (ret < 0)
3100  return ret;
3101 
3102  par_dst->codec_tag = codec_tag;
3103 
3104  if (!ost->frame_rate.num)
3105  ost->frame_rate = ist->framerate;
3106  ost->st->avg_frame_rate = ost->frame_rate;
3107 
3109  if (ret < 0)
3110  return ret;
3111 
3112  // copy timebase while removing common factors
3113  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3115 
3116  // copy estimated duration as a hint to the muxer
3117  if (ost->st->duration <= 0 && ist->st->duration > 0)
3118  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3119 
3120  // copy disposition
3121  ost->st->disposition = ist->st->disposition;
3122 
3123  if (ist->st->nb_side_data) {
3124  for (i = 0; i < ist->st->nb_side_data; i++) {
3125  const AVPacketSideData *sd_src = &ist->st->side_data[i];
3126  uint8_t *dst_data;
3127 
3128  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3129  if (!dst_data)
3130  return AVERROR(ENOMEM);
3131  memcpy(dst_data, sd_src->data, sd_src->size);
3132  }
3133  }
3134 
3135  if (ost->rotate_overridden) {
3137  sizeof(int32_t) * 9);
3138  if (sd)
3140  }
3141 
3142  switch (par_dst->codec_type) {
3143  case AVMEDIA_TYPE_AUDIO:
3144  if (audio_volume != 256) {
3145  av_log(NULL, AV_LOG_FATAL, "-acodec copy and -vol are incompatible (frames are not decoded)\n");
3146  exit_program(1);
3147  }
3148  if((par_dst->block_align == 1 || par_dst->block_align == 1152 || par_dst->block_align == 576) && par_dst->codec_id == AV_CODEC_ID_MP3)
3149  par_dst->block_align= 0;
3150  if(par_dst->codec_id == AV_CODEC_ID_AC3)
3151  par_dst->block_align= 0;
3152  break;
3153  case AVMEDIA_TYPE_VIDEO:
3154  if (ost->frame_aspect_ratio.num) { // overridden by the -aspect cli option
3155  sar =
3157  (AVRational){ par_dst->height, par_dst->width });
3158  av_log(NULL, AV_LOG_WARNING, "Overriding aspect ratio "
3159  "with stream copy may produce invalid files\n");
3160  }
3161  else if (ist->st->sample_aspect_ratio.num)
3162  sar = ist->st->sample_aspect_ratio;
3163  else
3164  sar = par_src->sample_aspect_ratio;
3165  ost->st->sample_aspect_ratio = par_dst->sample_aspect_ratio = sar;
3166  ost->st->avg_frame_rate = ist->st->avg_frame_rate;
3167  ost->st->r_frame_rate = ist->st->r_frame_rate;
3168  break;
3169  }
3170 
3171  ost->mux_timebase = ist->st->time_base;
3172 
3173  return 0;
3174 }
3175 
3177 {
3178  AVDictionaryEntry *e;
3179 
3180  uint8_t *encoder_string;
3181  int encoder_string_len;
3182  int format_flags = 0;
3183  int codec_flags = ost->enc_ctx->flags;
3184 
3185  if (av_dict_get(ost->st->metadata, "encoder", NULL, 0))
3186  return;
3187 
3188  e = av_dict_get(of->opts, "fflags", NULL, 0);
3189  if (e) {
3190  const AVOption *o = av_opt_find(of->ctx, "fflags", NULL, 0, 0);
3191  if (!o)
3192  return;
3193  av_opt_eval_flags(of->ctx, o, e->value, &format_flags);
3194  }
3195  e = av_dict_get(ost->encoder_opts, "flags", NULL, 0);
3196  if (e) {
3197  const AVOption *o = av_opt_find(ost->enc_ctx, "flags", NULL, 0, 0);
3198  if (!o)
3199  return;
3200  av_opt_eval_flags(ost->enc_ctx, o, e->value, &codec_flags);
3201  }
3202 
3203  encoder_string_len = sizeof(LIBAVCODEC_IDENT) + strlen(ost->enc->name) + 2;
3204  encoder_string = av_mallocz(encoder_string_len);
3205  if (!encoder_string)
3206  exit_program(1);
3207 
3208  if (!(format_flags & AVFMT_FLAG_BITEXACT) && !(codec_flags & AV_CODEC_FLAG_BITEXACT))
3209  av_strlcpy(encoder_string, LIBAVCODEC_IDENT " ", encoder_string_len);
3210  else
3211  av_strlcpy(encoder_string, "Lavc ", encoder_string_len);
3212  av_strlcat(encoder_string, ost->enc->name, encoder_string_len);
3213  av_dict_set(&ost->st->metadata, "encoder", encoder_string,
3215 }
3216 
3217 static void parse_forced_key_frames(char *kf, OutputStream *ost,
3218  AVCodecContext *avctx)
3219 {
3220  char *p;
3221  int n = 1, i, size, index = 0;
3222  int64_t t, *pts;
3223 
3224  for (p = kf; *p; p++)
3225  if (*p == ',')
3226  n++;
3227  size = n;
3228  pts = av_malloc_array(size, sizeof(*pts));
3229  if (!pts) {
3230  av_log(NULL, AV_LOG_FATAL, "Could not allocate forced key frames array.\n");
3231  exit_program(1);
3232  }
3233 
3234  p = kf;
3235  for (i = 0; i < n; i++) {
3236  char *next = strchr(p, ',');
3237 
3238  if (next)
3239  *next++ = 0;
3240 
3241  if (!memcmp(p, "chapters", 8)) {
3242 
3243  AVFormatContext *avf = output_files[ost->file_index]->ctx;
3244  int j;
3245 
3246  if (avf->nb_chapters > INT_MAX - size ||
3247  !(pts = av_realloc_f(pts, size += avf->nb_chapters - 1,
3248  sizeof(*pts)))) {
3250  "Could not allocate forced key frames array.\n");
3251  exit_program(1);
3252  }
3253  t = p[8] ? parse_time_or_die("force_key_frames", p + 8, 1) : 0;
3254  t = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3255 
3256  for (j = 0; j < avf->nb_chapters; j++) {
3257  AVChapter *c = avf->chapters[j];
3258  av_assert1(index < size);
3259  pts[index++] = av_rescale_q(c->start, c->time_base,
3260  avctx->time_base) + t;
3261  }
3262 
3263  } else {
3264 
3265  t = parse_time_or_die("force_key_frames", p, 1);
3266  av_assert1(index < size);
3267  pts[index++] = av_rescale_q(t, AV_TIME_BASE_Q, avctx->time_base);
3268 
3269  }
3270 
3271  p = next;
3272  }
3273 
3274  av_assert0(index == size);
3275  qsort(pts, size, sizeof(*pts), compare_int64);
3276  ost->forced_kf_count = size;
3277  ost->forced_kf_pts = pts;
3278 }
3279 
3280 static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
3281 {
3282  InputStream *ist = get_input_stream(ost);
3283  AVCodecContext *enc_ctx = ost->enc_ctx;
3284  AVFormatContext *oc;
3285 
3286  if (ost->enc_timebase.num > 0) {
3287  enc_ctx->time_base = ost->enc_timebase;
3288  return;
3289  }
3290 
3291  if (ost->enc_timebase.num < 0) {
3292  if (ist) {
3293  enc_ctx->time_base = ist->st->time_base;
3294  return;
3295  }
3296 
3297  oc = output_files[ost->file_index]->ctx;
3298  av_log(oc, AV_LOG_WARNING, "Input stream data not available, using default time base\n");
3299  }
3300 
3301  enc_ctx->time_base = default_time_base;
3302 }
3303 
3305 {
3306  InputStream *ist = get_input_stream(ost);
3307  AVCodecContext *enc_ctx = ost->enc_ctx;
3309  AVFormatContext *oc = output_files[ost->file_index]->ctx;
3310  int j, ret;
3311 
3312  set_encoder_id(output_files[ost->file_index], ost);
3313 
3314  // Muxers use AV_PKT_DATA_DISPLAYMATRIX to signal rotation. On the other
3315  // hand, the legacy API makes demuxers set "rotate" metadata entries,
3316  // which have to be filtered out to prevent leaking them to output files.
3317  av_dict_set(&ost->st->metadata, "rotate", NULL, 0);
3318 
3319  if (ist) {
3320  ost->st->disposition = ist->st->disposition;
3321 
3322  dec_ctx = ist->dec_ctx;
3323 
3324  enc_ctx->chroma_sample_location = dec_ctx->chroma_sample_location;
3325  } else {
3326  for (j = 0; j < oc->nb_streams; j++) {
3327  AVStream *st = oc->streams[j];
3328  if (st != ost->st && st->codecpar->codec_type == ost->st->codecpar->codec_type)
3329  break;
3330  }
3331  if (j == oc->nb_streams)
3332  if (ost->st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO ||
3335  }
3336 
3337  if (enc_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
3338  if (!ost->frame_rate.num)
3340  if (ist && !ost->frame_rate.num)
3341  ost->frame_rate = ist->framerate;
3342  if (ist && !ost->frame_rate.num)
3343  ost->frame_rate = ist->st->r_frame_rate;
3344  if (ist && !ost->frame_rate.num) {
3345  ost->frame_rate = (AVRational){25, 1};
3347  "No information "
3348  "about the input framerate is available. Falling "
3349  "back to a default value of 25fps for output stream #%d:%d. Use the -r option "
3350  "if you want a different framerate.\n",
3351  ost->file_index, ost->index);
3352  }
3353 // ost->frame_rate = ist->st->avg_frame_rate.num ? ist->st->avg_frame_rate : (AVRational){25, 1};
3354  if (ost->enc->supported_framerates && !ost->force_fps) {
3355  int idx = av_find_nearest_q_idx(ost->frame_rate, ost->enc->supported_framerates);
3356  ost->frame_rate = ost->enc->supported_framerates[idx];
3357  }
3358  // reduce frame rate for mpeg4 to be within the spec limits
3359  if (enc_ctx->codec_id == AV_CODEC_ID_MPEG4) {
3360  av_reduce(&ost->frame_rate.num, &ost->frame_rate.den,
3361  ost->frame_rate.num, ost->frame_rate.den, 65535);
3362  }
3363  }
3364 
3365  switch (enc_ctx->codec_type) {
3366  case AVMEDIA_TYPE_AUDIO:
3368  if (dec_ctx)
3369  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3370  av_get_bytes_per_sample(enc_ctx->sample_fmt) << 3);
3374 
3375  init_encoder_time_base(ost, av_make_q(1, enc_ctx->sample_rate));
3376  break;
3377 
3378  case AVMEDIA_TYPE_VIDEO:
3380 
3381  if (!(enc_ctx->time_base.num && enc_ctx->time_base.den))
3383  if ( av_q2d(enc_ctx->time_base) < 0.001 && video_sync_method != VSYNC_PASSTHROUGH
3385  av_log(oc, AV_LOG_WARNING, "Frame rate very high for a muxer not efficiently supporting it.\n"
3386  "Please consider specifying a lower framerate, a different muxer or -vsync 2\n");
3387  }
3388  for (j = 0; j < ost->forced_kf_count; j++)
3389  ost->forced_kf_pts[j] = av_rescale_q(ost->forced_kf_pts[j],
3391  enc_ctx->time_base);
3392 
3393  enc_ctx->width = av_buffersink_get_w(ost->filter->filter);
3394  enc_ctx->height = av_buffersink_get_h(ost->filter->filter);
3395  enc_ctx->sample_aspect_ratio = ost->st->sample_aspect_ratio =
3396  ost->frame_aspect_ratio.num ? // overridden by the -aspect cli option
3397  av_mul_q(ost->frame_aspect_ratio, (AVRational){ enc_ctx->height, enc_ctx->width }) :
3399 
3400  enc_ctx->pix_fmt = av_buffersink_get_format(ost->filter->filter);
3401  if (dec_ctx)
3402  enc_ctx->bits_per_raw_sample = FFMIN(dec_ctx->bits_per_raw_sample,
3403  av_pix_fmt_desc_get(enc_ctx->pix_fmt)->comp[0].depth);
3404 
3405  enc_ctx->framerate = ost->frame_rate;
3406 
3407  ost->st->avg_frame_rate = ost->frame_rate;
3408 
3409  if (!dec_ctx ||
3410  enc_ctx->width != dec_ctx->width ||
3411  enc_ctx->height != dec_ctx->height ||
3412  enc_ctx->pix_fmt != dec_ctx->pix_fmt) {
3414  }
3415 
3416  if (ost->top_field_first == 0) {
3417  enc_ctx->field_order = AV_FIELD_BB;
3418  } else if (ost->top_field_first == 1) {
3419  enc_ctx->field_order = AV_FIELD_TT;
3420  }
3421 
3422  if (ost->forced_keyframes) {
3423  if (!strncmp(ost->forced_keyframes, "expr:", 5)) {
3426  if (ret < 0) {
3428  "Invalid force_key_frames expression '%s'\n", ost->forced_keyframes+5);
3429  return ret;
3430  }
3435 
3436  // Don't parse the 'forced_keyframes' in case of 'keep-source-keyframes',
3437  // parse it only for static kf timings
3438  } else if(strncmp(ost->forced_keyframes, "source", 6)) {
3440  }
3441  }
3442  break;
3443  case AVMEDIA_TYPE_SUBTITLE:
3444  enc_ctx->time_base = AV_TIME_BASE_Q;
3445  if (!enc_ctx->width) {
3446  enc_ctx->width = input_streams[ost->source_index]->st->codecpar->width;
3447  enc_ctx->height = input_streams[ost->source_index]->st->codecpar->height;
3448  }
3449  break;
3450  case AVMEDIA_TYPE_DATA:
3451  break;
3452  default:
3453  abort();
3454  break;
3455  }
3456 
3457  ost->mux_timebase = enc_ctx->time_base;
3458 
3459  return 0;
3460 }
3461 
3462 static int init_output_stream(OutputStream *ost, char *error, int error_len)
3463 {
3464  int ret = 0;
3465 
3466  if (ost->encoding_needed) {
3467  AVCodec *codec = ost->enc;
3468  AVCodecContext *dec = NULL;
3469  InputStream *ist;
3470 
3471  ret = init_output_stream_encode(ost);
3472  if (ret < 0)
3473  return ret;
3474 
3475  if ((ist = get_input_stream(ost)))
3476  dec = ist->dec_ctx;
3477  if (dec && dec->subtitle_header) {
3478  /* ASS code assumes this buffer is null terminated so add extra byte. */
3480  if (!ost->enc_ctx->subtitle_header)
3481  return AVERROR(ENOMEM);
3482  memcpy(ost->enc_ctx->subtitle_header, dec->subtitle_header, dec->subtitle_header_size);
3484  }
3485  if (!av_dict_get(ost->encoder_opts, "threads", NULL, 0))
3486  av_dict_set(&ost->encoder_opts, "threads", "auto", 0);
3487  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3488  !codec->defaults &&
3489  !av_dict_get(ost->encoder_opts, "b", NULL, 0) &&
3490  !av_dict_get(ost->encoder_opts, "ab", NULL, 0))
3491  av_dict_set(&ost->encoder_opts, "b", "128000", 0);
3492 
3493  if (ost->filter && av_buffersink_get_hw_frames_ctx(ost->filter->filter) &&
3497  if (!ost->enc_ctx->hw_frames_ctx)
3498  return AVERROR(ENOMEM);
3499  } else {
3500  ret = hw_device_setup_for_encode(ost);
3501  if (ret < 0) {
3502  snprintf(error, error_len, "Device setup failed for "
3503  "encoder on output stream #%d:%d : %s",
3504  ost->file_index, ost->index, av_err2str(ret));
3505  return ret;
3506  }
3507  }
3508  if (ist && ist->dec->type == AVMEDIA_TYPE_SUBTITLE && ost->enc->type == AVMEDIA_TYPE_SUBTITLE) {
3509  int input_props = 0, output_props = 0;
3510  AVCodecDescriptor const *input_descriptor =
3512  AVCodecDescriptor const *output_descriptor =
3514  if (input_descriptor)
3515  input_props = input_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3516  if (output_descriptor)
3517  output_props = output_descriptor->props & (AV_CODEC_PROP_TEXT_SUB | AV_CODEC_PROP_BITMAP_SUB);
3518  if (input_props && output_props && input_props != output_props) {
3519  snprintf(error, error_len,
3520  "Subtitle encoding currently only possible from text to text "
3521  "or bitmap to bitmap");
3522  return AVERROR_INVALIDDATA;
3523  }
3524  }
3525 
3526  if ((ret = avcodec_open2(ost->enc_ctx, codec, &ost->encoder_opts)) < 0) {
3527  if (ret == AVERROR_EXPERIMENTAL)
3528  abort_codec_experimental(codec, 1);
3529  snprintf(error, error_len,
3530  "Error while opening encoder for output stream #%d:%d - "
3531  "maybe incorrect parameters such as bit_rate, rate, width or height",
3532  ost->file_index, ost->index);
3533  return ret;
3534  }
3535  if (ost->enc->type == AVMEDIA_TYPE_AUDIO &&
3536  !(ost->enc->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
3538  ost->enc_ctx->frame_size);
3540  if (ost->enc_ctx->bit_rate && ost->enc_ctx->bit_rate < 1000 &&
3541  ost->enc_ctx->codec_id != AV_CODEC_ID_CODEC2 /* don't complain about 700 bit/s modes */)
3542  av_log(NULL, AV_LOG_WARNING, "The bitrate parameter is set too low."
3543  " It takes bits/s as argument, not kbits/s\n");
3544 
3546  if (ret < 0) {
3548  "Error initializing the output stream codec context.\n");
3549  exit_program(1);
3550  }
3551  /*
3552  * FIXME: ost->st->codec should't be needed here anymore.
3553  */
3554  ret = avcodec_copy_context(ost->st->codec, ost->enc_ctx);
3555  if (ret < 0)
3556  return ret;
3557 
3558  if (ost->enc_ctx->nb_coded_side_data) {
3559  int i;
3560 
3561  for (i = 0; i < ost->enc_ctx->nb_coded_side_data; i++) {
3562  const AVPacketSideData *sd_src = &ost->enc_ctx->coded_side_data[i];
3563  uint8_t *dst_data;
3564 
3565  dst_data = av_stream_new_side_data(ost->st, sd_src->type, sd_src->size);
3566  if (!dst_data)
3567  return AVERROR(ENOMEM);
3568  memcpy(dst_data, sd_src->data, sd_src->size);
3569  }
3570  }
3571 
3572  /*
3573  * Add global input side data. For now this is naive, and copies it
3574  * from the input stream's global side data. All side data should
3575  * really be funneled over AVFrame and libavfilter, then added back to
3576  * packet side data, and then potentially using the first packet for
3577  * global side data.
3578  */
3579  if (ist) {
3580  int i;
3581  for (i = 0; i < ist->st->nb_side_data; i++) {
3582  AVPacketSideData *sd = &ist->st->side_data[i];
3583  uint8_t *dst = av_stream_new_side_data(ost->st, sd->type, sd->size);
3584  if (!dst)
3585  return AVERROR(ENOMEM);
3586  memcpy(dst, sd->data, sd->size);
3587  if (ist->autorotate && sd->type == AV_PKT_DATA_DISPLAYMATRIX)
3588  av_display_rotation_set((uint32_t *)dst, 0);
3589  }
3590  }
3591 
3592  // copy timebase while removing common factors
3593  if (ost->st->time_base.num <= 0 || ost->st->time_base.den <= 0)
3594  ost->st->time_base = av_add_q(ost->enc_ctx->time_base, (AVRational){0, 1});
3595 
3596  // copy estimated duration as a hint to the muxer
3597  if (ost->st->duration <= 0 && ist && ist->st->duration > 0)
3598  ost->st->duration = av_rescale_q(ist->st->duration, ist->st->time_base, ost->st->time_base);
3599 
3600  ost->st->codec->codec= ost->enc_ctx->codec;
3601  } else if (ost->stream_copy) {
3602  ret = init_output_stream_streamcopy(ost);
3603  if (ret < 0)
3604  return ret;
3605  }
3606 
3607  // parse user provided disposition, and update stream values
3608  if (ost->disposition) {
3609  static const AVOption opts[] = {
3610  { "disposition" , NULL, 0, AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT64_MIN, INT64_MAX, .unit = "flags" },
3611  { "default" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEFAULT }, .unit = "flags" },
3612  { "dub" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DUB }, .unit = "flags" },
3613  { "original" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ORIGINAL }, .unit = "flags" },
3614  { "comment" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_COMMENT }, .unit = "flags" },
3615  { "lyrics" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_LYRICS }, .unit = "flags" },
3616  { "karaoke" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_KARAOKE }, .unit = "flags" },
3617  { "forced" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_FORCED }, .unit = "flags" },
3618  { "hearing_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_HEARING_IMPAIRED }, .unit = "flags" },
3619  { "visual_impaired" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_VISUAL_IMPAIRED }, .unit = "flags" },
3620  { "clean_effects" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CLEAN_EFFECTS }, .unit = "flags" },
3621  { "attached_pic" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_ATTACHED_PIC }, .unit = "flags" },
3622  { "captions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_CAPTIONS }, .unit = "flags" },
3623  { "descriptions" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DESCRIPTIONS }, .unit = "flags" },
3624  { "dependent" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_DEPENDENT }, .unit = "flags" },
3625  { "metadata" , NULL, 0, AV_OPT_TYPE_CONST, { .i64 = AV_DISPOSITION_METADATA }, .unit = "flags" },
3626  { NULL },
3627  };
3628  static const AVClass class = {
3629  .class_name = "",
3630  .item_name = av_default_item_name,
3631  .option = opts,
3632  .version = LIBAVUTIL_VERSION_INT,
3633  };
3634  const AVClass *pclass = &class;
3635 
3636  ret = av_opt_eval_flags(&pclass, &opts[0], ost->disposition, &ost->st->disposition);
3637  if (ret < 0)
3638  return ret;
3639  }
3640 
3641  /* initialize bitstream filters for the output stream
3642  * needs to be done here, because the codec id for streamcopy is not
3643  * known until now */
3644  ret = init_output_bsfs(ost);
3645  if (ret < 0)
3646  return ret;
3647 
3648  ost->initialized = 1;
3649 
3650  ret = check_init_output_file(output_files[ost->file_index], ost->file_index);
3651  if (ret < 0)
3652  return ret;
3653 
3654  return ret;
3655 }
3656 
3657 static void report_new_stream(int input_index, AVPacket *pkt)
3658 {
3659  InputFile *file = input_files[input_index];
3660  AVStream *st = file->ctx->streams[pkt->stream_index];
3661 
3662  if (pkt->stream_index < file->nb_streams_warn)
3663  return;
3664  av_log(file->ctx, AV_LOG_WARNING,
3665  "New %s stream %d:%d at pos:%"PRId64" and DTS:%ss\n",
3667  input_index, pkt->stream_index,
3668  pkt->pos, av_ts2timestr(pkt->dts, &st->time_base));
3669  file->nb_streams_warn = pkt->stream_index + 1;
3670 }
3671 
3672 static int transcode_init(void)
3673 {
3674  int ret = 0, i, j, k;
3675  AVFormatContext *oc;
3676  OutputStream *ost;
3677  InputStream *ist;
3678  char error[1024] = {0};
3679 
3680  for (i = 0; i < nb_filtergraphs; i++) {
3681  FilterGraph *fg = filtergraphs[i];
3682  for (j = 0; j < fg->nb_outputs; j++) {
3683  OutputFilter *ofilter = fg->outputs[j];
3684  if (!ofilter->ost || ofilter->ost->source_index >= 0)
3685  continue;
3686  if (fg->nb_inputs != 1)
3687  continue;
3688  for (k = nb_input_streams-1; k >= 0 ; k--)
3689  if (fg->inputs[0]->ist == input_streams[k])
3690  break;
3691  ofilter->ost->source_index = k;
3692  }
3693  }
3694 
3695  /* init framerate emulation */
3696  for (i = 0; i < nb_input_files; i++) {
3697  InputFile *ifile = input_files[i];
3698  if (ifile->rate_emu)
3699  for (j = 0; j < ifile->nb_streams; j++)
3700  input_streams[j + ifile->ist_index]->start = av_gettime_relative();
3701  }
3702 
3703  /* init input streams */
3704  for (i = 0; i < nb_input_streams; i++)
3705  if ((ret = init_input_stream(i, error, sizeof(error))) < 0) {
3706  for (i = 0; i < nb_output_streams; i++) {
3707  ost = output_streams[i];
3708  avcodec_close(ost->enc_ctx);
3709  }
3710  goto dump_format;
3711  }
3712 
3713  /* open each encoder */
3714  for (i = 0; i < nb_output_streams; i++) {
3715  // skip streams fed from filtergraphs until we have a frame for them
3716  if (output_streams[i]->filter)
3717  continue;
3718 
3719  ret = init_output_stream(output_streams[i], error, sizeof(error));
3720  if (ret < 0)
3721  goto dump_format;
3722  }
3723 
3724  /* discard unused programs */
3725  for (i = 0; i < nb_input_files; i++) {
3726  InputFile *ifile = input_files[i];
3727  for (j = 0; j < ifile->ctx->nb_programs; j++) {
3728  AVProgram *p = ifile->ctx->programs[j];
3729  int discard = AVDISCARD_ALL;
3730 
3731  for (k = 0; k < p->nb_stream_indexes; k++)
3732  if (!input_streams[ifile->ist_index + p->stream_index[k]]->discard) {
3733  discard = AVDISCARD_DEFAULT;
3734  break;
3735  }
3736  p->discard = discard;
3737  }
3738  }
3739 
3740  /* write headers for files with no streams */
3741  for (i = 0; i < nb_output_files; i++) {
3742  oc = output_files[i]->ctx;
3743  if (oc->oformat->flags & AVFMT_NOSTREAMS && oc->nb_streams == 0) {
3744  ret = check_init_output_file(output_files[i], i);
3745  if (ret < 0)
3746  goto dump_format;
3747  }
3748  }
3749 
3750  dump_format:
3751  /* dump the stream mapping */
3752  av_log(NULL, AV_LOG_INFO, "Stream mapping:\n");
3753  for (i = 0; i < nb_input_streams; i++) {
3754  ist = input_streams[i];
3755 
3756  for (j = 0; j < ist->nb_filters; j++) {
3757  if (!filtergraph_is_simple(ist->filters[j]->graph)) {
3758  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d (%s) -> %s",
3759  ist->file_index, ist->st->index, ist->dec ? ist->dec->name : "?",
3760  ist->filters[j]->name);
3761  if (nb_filtergraphs > 1)
3762  av_log(NULL, AV_LOG_INFO, " (graph %d)", ist->filters[j]->graph->index);
3763  av_log(NULL, AV_LOG_INFO, "\n");
3764  }
3765  }
3766  }
3767 
3768  for (i = 0; i < nb_output_streams; i++) {
3769  ost = output_streams[i];
3770 
3771  if (ost->attachment_filename) {
3772  /* an attached file */
3773  av_log(NULL, AV_LOG_INFO, " File %s -> Stream #%d:%d\n",
3774  ost->attachment_filename, ost->file_index, ost->index);
3775  continue;
3776  }
3777 
3778  if (ost->filter && !filtergraph_is_simple(ost->filter->graph)) {
3779  /* output from a complex graph */
3780  av_log(NULL, AV_LOG_INFO, " %s", ost->filter->name);
3781  if (nb_filtergraphs > 1)
3782  av_log(NULL, AV_LOG_INFO, " (graph %d)", ost->filter->graph->index);
3783 
3784  av_log(NULL, AV_LOG_INFO, " -> Stream #%d:%d (%s)\n", ost->file_index,
3785  ost->index, ost->enc ? ost->enc->name : "?");
3786  continue;
3787  }
3788 
3789  av_log(NULL, AV_LOG_INFO, " Stream #%d:%d -> #%d:%d",
3790  input_streams[ost->source_index]->file_index,
3791  input_streams[ost->source_index]->st->index,
3792  ost->file_index,
3793  ost->index);
3794  if (ost->sync_ist != input_streams[ost->source_index])
3795  av_log(NULL, AV_LOG_INFO, " [sync #%d:%d]",
3796  ost->sync_ist->file_index,
3797  ost->sync_ist->st->index);
3798  if (ost->stream_copy)
3799  av_log(NULL, AV_LOG_INFO, " (copy)");
3800  else {
3801  const AVCodec *in_codec = input_streams[ost->source_index]->dec;
3802  const AVCodec *out_codec = ost->enc;
3803  const char *decoder_name = "?";
3804  const char *in_codec_name = "?";
3805  const char *encoder_name = "?";
3806  const char *out_codec_name = "?";
3807  const AVCodecDescriptor *desc;
3808 
3809  if (in_codec) {
3810  decoder_name = in_codec->name;
3811  desc = avcodec_descriptor_get(in_codec->id);
3812  if (desc)
3813  in_codec_name = desc->name;
3814  if (!strcmp(decoder_name, in_codec_name))
3815  decoder_name = "native";
3816  }
3817 
3818  if (out_codec) {
3819  encoder_name = out_codec->name;
3820  desc = avcodec_descriptor_get(out_codec->id);
3821  if (desc)
3822  out_codec_name = desc->name;
3823  if (!strcmp(encoder_name, out_codec_name))
3824  encoder_name = "native";
3825  }
3826 
3827  av_log(NULL, AV_LOG_INFO, " (%s (%s) -> %s (%s))",
3828  in_codec_name, decoder_name,
3829  out_codec_name, encoder_name);
3830  }
3831  av_log(NULL, AV_LOG_INFO, "\n");
3832  }
3833 
3834  if (ret) {
3835  av_log(NULL, AV_LOG_ERROR, "%s\n", error);
3836  return ret;
3837  }
3838 
3840 
3841  return 0;
3842 }
3843 
3844 /* Return 1 if there remain streams where more output is wanted, 0 otherwise. */
3845 static int need_output(void)
3846 {
3847  int i;
3848 
3849  for (i = 0; i < nb_output_streams; i++) {
3850  OutputStream *ost = output_streams[i];
3851  OutputFile *of = output_files[ost->file_index];
3852  AVFormatContext *os = output_files[ost->file_index]->ctx;
3853 
3854  if (ost->finished ||
3855  (os->pb && avio_tell(os->pb) >= of->limit_filesize))
3856  continue;
3857  if (ost->frame_number >= ost->max_frames) {
3858  int j;
3859  for (j = 0; j < of->ctx->nb_streams; j++)
3860  close_output_stream(output_streams[of->ost_index + j]);
3861  continue;
3862  }
3863 
3864  return 1;
3865  }
3866 
3867  return 0;
3868 }
3869 
3870 /**
3871  * Select the output stream to process.
3872  *
3873  * @return selected output stream, or NULL if none available
3874  */
3876 {
3877  int i;
3878  int64_t opts_min = INT64_MAX;
3879  OutputStream *ost_min = NULL;
3880 
3881  for (i = 0; i < nb_output_streams; i++) {
3882  OutputStream *ost = output_streams[i];
3883  int64_t opts = ost->st->cur_dts == AV_NOPTS_VALUE ? INT64_MIN :
3884  av_rescale_q(ost->st->cur_dts, ost->st->time_base,
3885  AV_TIME_BASE_Q);
3886  if (ost->st->cur_dts == AV_NOPTS_VALUE)
3887  av_log(NULL, AV_LOG_DEBUG, "cur_dts is invalid (this is harmless if it occurs once at the start per stream)\n");
3888 
3889  if (!ost->initialized && !ost->inputs_done)
3890  return ost;
3891 
3892  if (!ost->finished && opts < opts_min) {
3893  opts_min = opts;
3894  ost_min = ost->unavailable ? NULL : ost;
3895  }
3896  }
3897  return ost_min;
3898 }
3899 
3900 static void set_tty_echo(int on)
3901 {
3902 #if HAVE_TERMIOS_H
3903  struct termios tty;
3904  if (tcgetattr(0, &tty) == 0) {
3905  if (on) tty.c_lflag |= ECHO;
3906  else tty.c_lflag &= ~ECHO;
3907  tcsetattr(0, TCSANOW, &tty);
3908  }
3909 #endif
3910 }
3911 
3912 static int check_keyboard_interaction(int64_t cur_time)
3913 {
3914  int i, ret, key;
3915  static int64_t last_time;
3916  if (received_nb_signals)
3917  return AVERROR_EXIT;
3918  /* read_key() returns 0 on EOF */
3919  if(cur_time - last_time >= 100000 && !run_as_daemon){
3920  key = read_key();
3921  last_time = cur_time;
3922  }else
3923  key = -1;
3924  if (key == 'q')
3925  return AVERROR_EXIT;
3926  if (key == '+') av_log_set_level(av_log_get_level()+10);
3927  if (key == '-') av_log_set_level(av_log_get_level()-10);
3928  if (key == 's') qp_hist ^= 1;
3929  if (key == 'h'){
3930  if (do_hex_dump){
3931  do_hex_dump = do_pkt_dump = 0;
3932  } else if(do_pkt_dump){
3933  do_hex_dump = 1;
3934  } else
3935  do_pkt_dump = 1;
3937  }
3938  if (key == 'c' || key == 'C'){
3939  char buf[4096], target[64], command[256], arg[256] = {0};
3940  double time;
3941  int k, n = 0;
3942  fprintf(stderr, "\nEnter command: <target>|all <time>|-1 <command>[ <argument>]\n");
3943  i = 0;
3944  set_tty_echo(1);
3945  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3946  if (k > 0)
3947  buf[i++] = k;
3948  buf[i] = 0;
3949  set_tty_echo(0);
3950  fprintf(stderr, "\n");
3951  if (k > 0 &&
3952  (n = sscanf(buf, "%63[^ ] %lf %255[^ ] %255[^\n]", target, &time, command, arg)) >= 3) {
3953  av_log(NULL, AV_LOG_DEBUG, "Processing command target:%s time:%f command:%s arg:%s",
3954  target, time, command, arg);
3955  for (i = 0; i < nb_filtergraphs; i++) {
3956  FilterGraph *fg = filtergraphs[i];
3957  if (fg->graph) {
3958  if (time < 0) {
3959  ret = avfilter_graph_send_command(fg->graph, target, command, arg, buf, sizeof(buf),
3960  key == 'c' ? AVFILTER_CMD_FLAG_ONE : 0);
3961  fprintf(stderr, "Command reply for stream %d: ret:%d res:\n%s", i, ret, buf);
3962  } else if (key == 'c') {
3963  fprintf(stderr, "Queuing commands only on filters supporting the specific command is unsupported\n");
3964  ret = AVERROR_PATCHWELCOME;
3965  } else {
3966  ret = avfilter_graph_queue_command(fg->graph, target, command, arg, 0, time);
3967  if (ret < 0)
3968  fprintf(stderr, "Queuing command failed with error %s\n", av_err2str(ret));
3969  }
3970  }
3971  }
3972  } else {
3974  "Parse error, at least 3 arguments were expected, "
3975  "only %d given in string '%s'\n", n, buf);
3976  }
3977  }
3978  if (key == 'd' || key == 'D'){
3979  int debug=0;
3980  if(key == 'D') {
3981  debug = input_streams[0]->st->codec->debug<<1;
3982  if(!debug) debug = 1;
3983  while(debug & (FF_DEBUG_DCT_COEFF
3984 #if FF_API_DEBUG_MV
3985  |FF_DEBUG_VIS_QP|FF_DEBUG_VIS_MB_TYPE
3986 #endif
3987  )) //unsupported, would just crash
3988  debug += debug;
3989  }else{
3990  char buf[32];
3991  int k = 0;
3992  i = 0;
3993  set_tty_echo(1);
3994  while ((k = read_key()) != '\n' && k != '\r' && i < sizeof(buf)-1)
3995  if (k > 0)
3996  buf[i++] = k;
3997  buf[i] = 0;
3998  set_tty_echo(0);
3999  fprintf(stderr, "\n");
4000  if (k <= 0 || sscanf(buf, "%d", &debug)!=1)
4001  fprintf(stderr,"error parsing debug value\n");
4002  }
4003  for(i=0;i<nb_input_streams;i++) {
4004  input_streams[i]->st->codec->debug = debug;
4005  }
4006  for(i=0;i<nb_output_streams;i++) {
4007  OutputStream *ost = output_streams[i];
4008  ost->enc_ctx->debug = debug;
4009  }
4010  if(debug) av_log_set_level(AV_LOG_DEBUG);
4011  fprintf(stderr,"debug=%d\n", debug);
4012  }
4013  if (key == '?'){
4014  fprintf(stderr, "key function\n"
4015  "? show this help\n"
4016  "+ increase verbosity\n"
4017  "- decrease verbosity\n"
4018  "c Send command to first matching filter supporting it\n"
4019  "C Send/Queue command to all matching filters\n"
4020  "D cycle through available debug modes\n"
4021  "h dump packets/hex press to cycle through the 3 states\n"
4022  "q quit\n"
4023  "s Show QP histogram\n"
4024  );
4025  }
4026  return 0;
4027 }
4028 
4029 #if HAVE_THREADS
4030 static void *input_thread(void *arg)
4031 {
4032  InputFile *f = arg;
4033  unsigned flags = f->non_blocking ? AV_THREAD_MESSAGE_NONBLOCK : 0;
4034  int ret = 0;
4035 
4036  while (1) {
4037  AVPacket pkt;
4038  ret = av_read_frame(f->ctx, &pkt);
4039 
4040  if (ret == AVERROR(EAGAIN)) {
4041  av_usleep(10000);
4042  continue;
4043  }
4044  if (ret < 0) {
4045  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4046  break;
4047  }
4048  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4049  if (flags && ret == AVERROR(EAGAIN)) {
4050  flags = 0;
4051  ret = av_thread_message_queue_send(f->in_thread_queue, &pkt, flags);
4053  "Thread message queue blocking; consider raising the "
4054  "thread_queue_size option (current value: %d)\n",
4055  f->thread_queue_size);
4056  }
4057  if (ret < 0) {
4058  if (ret != AVERROR_EOF)
4059  av_log(f->ctx, AV_LOG_ERROR,
4060  "Unable to send packet to main thread: %s\n",
4061  av_err2str(ret));
4062  av_packet_unref(&pkt);
4063  av_thread_message_queue_set_err_recv(f->in_thread_queue, ret);
4064  break;
4065  }
4066  }
4067 
4068  return NULL;
4069 }
4070 
4071 static void free_input_thread(int i)
4072 {
4073  InputFile *f = input_files[i];
4074  AVPacket pkt;
4075 
4076  if (!f || !f->in_thread_queue)
4077  return;
4079  while (av_thread_message_queue_recv(f->in_thread_queue, &pkt, 0) >= 0)
4080  av_packet_unref(&pkt);
4081 
4082  pthread_join(f->thread, NULL);
4083  f->joined = 1;
4084  av_thread_message_queue_free(&f->in_thread_queue);
4085 }
4086 
4087 static void free_input_threads(void)
4088 {
4089  int i;
4090 
4091  for (i = 0; i < nb_input_files; i++)
4092  free_input_thread(i);
4093 }
4094 
4095 static int init_input_thread(int i)
4096 {
4097  int ret;
4098  InputFile *f = input_files[i];
4099 
4100  if (nb_input_files == 1)
4101  return 0;
4102 
4103  if (f->ctx->pb ? !f->ctx->pb->seekable :
4104  strcmp(f->ctx->iformat->name, "lavfi"))
4105  f->non_blocking = 1;
4106  ret = av_thread_message_queue_alloc(&f->in_thread_queue,
4107  f->thread_queue_size, sizeof(AVPacket));
4108  if (ret < 0)
4109  return ret;
4110 
4111  if ((ret = pthread_create(&f->thread, NULL, input_thread, f))) {
4112  av_log(NULL, AV_LOG_ERROR, "pthread_create failed: %s. Try to increase `ulimit -v` or decrease `ulimit -s`.\n", strerror(ret));
4113  av_thread_message_queue_free(&f->in_thread_queue);
4114  return AVERROR(ret);
4115  }
4116 
4117  return 0;
4118 }
4119 
4120 static int init_input_threads(void)
4121 {
4122  int i, ret;
4123 
4124  for (i = 0; i < nb_input_files; i++) {
4125  ret = init_input_thread(i);
4126  if (ret < 0)
4127  return ret;
4128  }
4129  return 0;
4130 }
4131 
4132 static int get_input_packet_mt(InputFile *f, AVPacket *pkt)
4133 {
4134  return av_thread_message_queue_recv(f->in_thread_queue, pkt,
4135  f->non_blocking ?
4137 }
4138 #endif
4139 
4141 {
4142  if (f->rate_emu) {
4143  int i;
4144  for (i = 0; i < f->nb_streams; i++) {
4145  InputStream *ist = input_streams[f->ist_index + i];
4146  int64_t pts = av_rescale(ist->dts, 1000000, AV_TIME_BASE);
4147  int64_t now = av_gettime_relative() - ist->start;
4148  if (pts > now)
4149  return AVERROR(EAGAIN);
4150  }
4151  }
4152 
4153 #if HAVE_THREADS
4154  if (nb_input_files > 1)
4155  return get_input_packet_mt(f, pkt);
4156 #endif
4157  return av_read_frame(f->ctx, pkt);
4158 }
4159 
4160 static int got_eagain(void)
4161 {
4162  int i;
4163  for (i = 0; i < nb_output_streams; i++)
4164  if (output_streams[i]->unavailable)
4165  return 1;
4166  return 0;
4167 }
4168 
4169 static void reset_eagain(void)
4170 {
4171  int i;
4172  for (i = 0; i < nb_input_files; i++)
4173  input_files[i]->eagain = 0;
4174  for (i = 0; i < nb_output_streams; i++)
4175  output_streams[i]->unavailable = 0;
4176 }
4177 
4178 // set duration to max(tmp, duration) in a proper time base and return duration's time_base
4179 static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base,
4180  AVRational time_base)
4181 {
4182  int ret;
4183 
4184  if (!*duration) {
4185  *duration = tmp;
4186  return tmp_time_base;
4187  }
4188 
4189  ret = av_compare_ts(*duration, time_base, tmp, tmp_time_base);
4190  if (ret < 0) {
4191  *duration = tmp;
4192  return tmp_time_base;
4193  }
4194 
4195  return time_base;
4196 }
4197 
4199 {
4200  InputStream *ist;
4201  AVCodecContext *avctx;
4202  int i, ret, has_audio = 0;
4203  int64_t duration = 0;
4204 
4205  ret = av_seek_frame(is, -1, is->start_time, 0);
4206  if (ret < 0)
4207  return ret;
4208 
4209  for (i = 0; i < ifile->nb_streams; i++) {
4210  ist = input_streams[ifile->ist_index + i];
4211  avctx = ist->dec_ctx;
4212 
4213  /* duration is the length of the last frame in a stream
4214  * when audio stream is present we don't care about
4215  * last video frame length because it's not defined exactly */
4216  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples)
4217  has_audio = 1;
4218  }
4219 
4220  for (i = 0; i < ifile->nb_streams; i++) {
4221  ist = input_streams[ifile->ist_index + i];
4222  avctx = ist->dec_ctx;
4223 
4224  if (has_audio) {
4225  if (avctx->codec_type == AVMEDIA_TYPE_AUDIO && ist->nb_samples) {
4226  AVRational sample_rate = {1, avctx->sample_rate};
4227 
4228  duration = av_rescale_q(ist->nb_samples, sample_rate, ist->st->time_base);
4229  } else {
4230  continue;
4231  }
4232  } else {
4233  if (ist->framerate.num) {
4234  duration = av_rescale_q(1, av_inv_q(ist->framerate), ist->st->time_base);
4235  } else if (ist->st->avg_frame_rate.num) {
4236  duration = av_rescale_q(1, av_inv_q(ist->st->avg_frame_rate), ist->st->time_base);
4237  } else {
4238  duration = 1;
4239  }
4240  }
4241  if (!ifile->duration)
4242  ifile->time_base = ist->st->time_base;
4243  /* the total duration of the stream, max_pts - min_pts is
4244  * the duration of the stream without the last frame */
4245  if (ist->max_pts > ist->min_pts && ist->max_pts - (uint64_t)ist->min_pts < INT64_MAX - duration)
4246  duration += ist->max_pts - ist->min_pts;
4247  ifile->time_base = duration_max(duration, &ifile->duration, ist->st->time_base,
4248  ifile->time_base);
4249  }
4250 
4251  if (ifile->loop > 0)
4252  ifile->loop--;
4253 
4254  return ret;
4255 }
4256 
4257 /*
4258  * Return
4259  * - 0 -- one packet was read and processed
4260  * - AVERROR(EAGAIN) -- no packets were available for selected file,
4261  * this function should be called again
4262  * - AVERROR_EOF -- this function should not be called again
4263  */
4264 static int process_input(int file_index)
4265 {
4266  InputFile *ifile = input_files[file_index];
4268  InputStream *ist;
4269  AVPacket pkt;
4270  int ret, thread_ret, i, j;
4271  int64_t duration;
4272  int64_t pkt_dts;
4273 
4274  is = ifile->ctx;
4275  ret = get_input_packet(ifile, &pkt);
4276 
4277  if (ret == AVERROR(EAGAIN)) {
4278  ifile->eagain = 1;
4279  return ret;
4280  }
4281  if (ret < 0 && ifile->loop) {
4282  AVCodecContext *avctx;
4283  for (i = 0; i < ifile->nb_streams; i++) {
4284  ist = input_streams[ifile->ist_index + i];
4285  avctx = ist->dec_ctx;
4286  if (ist->decoding_needed) {
4287  ret = process_input_packet(ist, NULL, 1);
4288  if (ret>0)
4289  return 0;
4290  avcodec_flush_buffers(avctx);
4291  }
4292  }
4293 #if HAVE_THREADS
4294  free_input_thread(file_index);
4295 #endif
4296  ret = seek_to_start(ifile, is);
4297 #if HAVE_THREADS
4298  thread_ret = init_input_thread(file_index);
4299  if (thread_ret < 0)
4300  return thread_ret;
4301 #endif
4302  if (ret < 0)
4303  av_log(NULL, AV_LOG_WARNING, "Seek to start failed.\n");
4304  else
4305  ret = get_input_packet(ifile, &pkt);
4306  if (ret == AVERROR(EAGAIN)) {
4307  ifile->eagain = 1;
4308  return ret;
4309  }
4310  }
4311  if (ret < 0) {
4312  if (ret != AVERROR_EOF) {
4313  print_error(is->url, ret);
4314  if (exit_on_error)
4315  exit_program(1);
4316  }
4317 
4318  for (i = 0; i < ifile->nb_streams; i++) {
4319  ist = input_streams[ifile->ist_index + i];
4320  if (ist->decoding_needed) {
4321  ret = process_input_packet(ist, NULL, 0);
4322  if (ret>0)
4323  return 0;
4324  }
4325 
4326  /* mark all outputs that don't go through lavfi as finished */
4327  for (j = 0; j < nb_output_streams; j++) {
4328  OutputStream *ost = output_streams[j];
4329 
4330  if (ost->source_index == ifile->ist_index + i &&
4331  (ost->stream_copy || ost->enc->type == AVMEDIA_TYPE_SUBTITLE))
4332  finish_output_stream(ost);
4333  }
4334  }
4335 
4336  ifile->eof_reached = 1;
4337  return AVERROR(EAGAIN);
4338  }
4339 
4340  reset_eagain();
4341 
4342  if (do_pkt_dump) {
4344  is->streams[pkt.stream_index]);
4345  }
4346  /* the following test is needed in case new streams appear
4347  dynamically in stream : we ignore them */
4348  if (pkt.stream_index >= ifile->nb_streams) {
4349  report_new_stream(file_index, &pkt);
4350  goto discard_packet;
4351  }
4352 
4353  ist = input_streams[ifile->ist_index + pkt.stream_index];
4354 
4355  ist->data_size += pkt.size;
4356  ist->nb_packets++;
4357 
4358  if (ist->discard)
4359  goto discard_packet;
4360 
4361  if (pkt.flags & AV_PKT_FLAG_CORRUPT) {
4363  "%s: corrupt input packet in stream %d\n", is->url, pkt.stream_index);
4364  if (exit_on_error)
4365  exit_program(1);
4366  }
4367 
4368  if (debug_ts) {
4369  av_log(NULL, AV_LOG_INFO, "demuxer -> ist_index:%d type:%s "
4370  "next_dts:%s next_dts_time:%s next_pts:%s next_pts_time:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4374  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4375  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4376  av_ts2str(input_files[ist->file_index]->ts_offset),
4377  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4378  }
4379 
4380  if(!ist->wrap_correction_done && is->start_time != AV_NOPTS_VALUE && ist->st->pts_wrap_bits < 64){
4381  int64_t stime, stime2;
4382  // Correcting starttime based on the enabled streams
4383  // FIXME this ideally should be done before the first use of starttime but we do not know which are the enabled streams at that point.
4384  // so we instead do it here as part of discontinuity handling
4385  if ( ist->next_dts == AV_NOPTS_VALUE
4386  && ifile->ts_offset == -is->start_time
4387  && (is->iformat->flags & AVFMT_TS_DISCONT)) {
4388  int64_t new_start_time = INT64_MAX;
4389  for (i=0; i<is->nb_streams; i++) {
4390  AVStream *st = is->streams[i];
4391  if(st->discard == AVDISCARD_ALL || st->start_time == AV_NOPTS_VALUE)
4392  continue;
4393  new_start_time = FFMIN(new_start_time, av_rescale_q(st->start_time, st->time_base, AV_TIME_BASE_Q));
4394  }
4395  if (new_start_time > is->start_time) {
4396  av_log(is, AV_LOG_VERBOSE, "Correcting start time by %"PRId64"\n", new_start_time - is->start_time);
4397  ifile->ts_offset = -new_start_time;
4398  }
4399  }
4400 
4401  stime = av_rescale_q(is->start_time, AV_TIME_BASE_Q, ist->st->time_base);
4402  stime2= stime + (1ULL<<ist->st->pts_wrap_bits);
4403  ist->wrap_correction_done = 1;
4404 
4405  if(stime2 > stime && pkt.dts != AV_NOPTS_VALUE && pkt.dts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4406  pkt.dts -= 1ULL<<ist->st->pts_wrap_bits;
4407  ist->wrap_correction_done = 0;
4408  }
4409  if(stime2 > stime && pkt.pts != AV_NOPTS_VALUE && pkt.pts > stime + (1LL<<(ist->st->pts_wrap_bits-1))) {
4410  pkt.pts -= 1ULL<<ist->st->pts_wrap_bits;
4411  ist->wrap_correction_done = 0;
4412  }
4413  }
4414 
4415  /* add the stream-global side data to the first packet */
4416  if (ist->nb_packets == 1) {
4417  for (i = 0; i < ist->st->nb_side_data; i++) {
4418  AVPacketSideData *src_sd = &ist->st->side_data[i];
4419  uint8_t *dst_data;
4420 
4421  if (src_sd->type == AV_PKT_DATA_DISPLAYMATRIX)
4422  continue;
4423 
4424  if (av_packet_get_side_data(&pkt, src_sd->type, NULL))
4425  continue;
4426 
4427  dst_data = av_packet_new_side_data(&pkt, src_sd->type, src_sd->size);
4428  if (!dst_data)
4429  exit_program(1);
4430 
4431  memcpy(dst_data, src_sd->data, src_sd->size);
4432  }
4433  }
4434 
4435  if (pkt.dts != AV_NOPTS_VALUE)
4436  pkt.dts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4437  if (pkt.pts != AV_NOPTS_VALUE)
4438  pkt.pts += av_rescale_q(ifile->ts_offset, AV_TIME_BASE_Q, ist->st->time_base);
4439 
4440  if (pkt.pts != AV_NOPTS_VALUE)
4441  pkt.pts *= ist->ts_scale;
4442  if (pkt.dts != AV_NOPTS_VALUE)
4443  pkt.dts *= ist->ts_scale;
4444 
4446  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4448  pkt_dts != AV_NOPTS_VALUE && ist->next_dts == AV_NOPTS_VALUE && !copy_ts
4449  && (is->iformat->flags & AVFMT_TS_DISCONT) && ifile->last_ts != AV_NOPTS_VALUE) {
4450  int64_t delta = pkt_dts - ifile->last_ts;
4451  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4452  delta > 1LL*dts_delta_threshold*AV_TIME_BASE){
4453  ifile->ts_offset -= delta;
4455  "Inter stream timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4456  delta, ifile->ts_offset);
4457  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4458  if (pkt.pts != AV_NOPTS_VALUE)
4459  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4460  }
4461  }
4462 
4463  duration = av_rescale_q(ifile->duration, ifile->time_base, ist->st->time_base);
4464  if (pkt.pts != AV_NOPTS_VALUE) {
4465  pkt.pts += duration;
4466  ist->max_pts = FFMAX(pkt.pts, ist->max_pts);
4467  ist->min_pts = FFMIN(pkt.pts, ist->min_pts);
4468  }
4469 
4470  if (pkt.dts != AV_NOPTS_VALUE)
4471  pkt.dts += duration;
4472 
4474  if ((ist->dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO ||
4476  pkt_dts != AV_NOPTS_VALUE && ist->next_dts != AV_NOPTS_VALUE &&
4477  !copy_ts) {
4478  int64_t delta = pkt_dts - ist->next_dts;
4479  if (is->iformat->flags & AVFMT_TS_DISCONT) {
4480  if (delta < -1LL*dts_delta_threshold*AV_TIME_BASE ||
4481  delta > 1LL*dts_delta_threshold*AV_TIME_BASE ||
4482  pkt_dts + AV_TIME_BASE/10 < FFMAX(ist->pts, ist->dts)) {
4483  ifile->ts_offset -= delta;
4485  "timestamp discontinuity %"PRId64", new offset= %"PRId64"\n",
4486  delta, ifile->ts_offset);
4487  pkt.dts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4488  if (pkt.pts != AV_NOPTS_VALUE)
4489  pkt.pts -= av_rescale_q(delta, AV_TIME_BASE_Q, ist->st->time_base);
4490  }
4491  } else {
4492  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4493  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4494  av_log(NULL, AV_LOG_WARNING, "DTS %"PRId64", next:%"PRId64" st:%d invalid dropping\n", pkt.dts, ist->next_dts, pkt.stream_index);
4495  pkt.dts = AV_NOPTS_VALUE;
4496  }
4497  if (pkt.pts != AV_NOPTS_VALUE){
4498  int64_t pkt_pts = av_rescale_q(pkt.pts, ist->st->time_base, AV_TIME_BASE_Q);
4499  delta = pkt_pts - ist->next_dts;
4500  if ( delta < -1LL*dts_error_threshold*AV_TIME_BASE ||
4501  delta > 1LL*dts_error_threshold*AV_TIME_BASE) {
4502  av_log(NULL, AV_LOG_WARNING, "PTS %"PRId64", next:%"PRId64" invalid dropping st:%d\n", pkt.pts, ist->next_dts, pkt.stream_index);
4503  pkt.pts = AV_NOPTS_VALUE;
4504  }
4505  }
4506  }
4507  }
4508 
4509  if (pkt.dts != AV_NOPTS_VALUE)
4510  ifile->last_ts = av_rescale_q(pkt.dts, ist->st->time_base, AV_TIME_BASE_Q);
4511 
4512  if (debug_ts) {
4513  av_log(NULL, AV_LOG_INFO, "demuxer+ffmpeg -> ist_index:%d type:%s pkt_pts:%s pkt_pts_time:%s pkt_dts:%s pkt_dts_time:%s off:%s off_time:%s\n",
4515  av_ts2str(pkt.pts), av_ts2timestr(pkt.pts, &ist->st->time_base),
4516  av_ts2str(pkt.dts), av_ts2timestr(pkt.dts, &ist->st->time_base),
4517  av_ts2str(input_files[ist->file_index]->ts_offset),
4518  av_ts2timestr(input_files[ist->file_index]->ts_offset, &AV_TIME_BASE_Q));
4519  }
4520 
4521  sub2video_heartbeat(ist, pkt.pts);
4522 
4523  process_input_packet(ist, &pkt, 0);
4524 
4525 discard_packet:
4526  av_packet_unref(&pkt);
4527 
4528  return 0;
4529 }
4530 
4531 /**
4532  * Perform a step of transcoding for the specified filter graph.
4533  *
4534  * @param[in] graph filter graph to consider
4535  * @param[out] best_ist input stream where a frame would allow to continue
4536  * @return 0 for success, <0 for error
4537  */
4538 static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
4539 {
4540  int i, ret;
4541  int nb_requests, nb_requests_max = 0;
4542  InputFilter *ifilter;
4543  InputStream *ist;
4544 
4545  *best_ist = NULL;
4546  ret = avfilter_graph_request_oldest(graph->graph);
4547  if (ret >= 0)
4548  return reap_filters(0);
4549 
4550  if (ret == AVERROR_EOF) {
4551  ret = reap_filters(1);
4552  for (i = 0; i < graph->nb_outputs; i++)
4553  close_output_stream(graph->outputs[i]->ost);
4554  return ret;
4555  }
4556  if (ret != AVERROR(EAGAIN))
4557  return ret;
4558 
4559  for (i = 0; i < graph->nb_inputs; i++) {
4560  ifilter = graph->inputs[i];
4561  ist = ifilter->ist;
4562  if (input_files[ist->file_index]->eagain ||
4563  input_files[ist->file_index]->eof_reached)
4564  continue;
4565  nb_requests = av_buffersrc_get_nb_failed_requests(ifilter->filter);
4566  if (nb_requests > nb_requests_max) {
4567  nb_requests_max = nb_requests;
4568  *best_ist = ist;
4569  }
4570  }
4571 
4572  if (!*best_ist)
4573  for (i = 0; i < graph->nb_outputs; i++)
4574  graph->outputs[i]->ost->unavailable = 1;
4575 
4576  return 0;
4577 }
4578 
4579 /**
4580  * Run a single step of transcoding.
4581  *
4582  * @return 0 for success, <0 for error
4583  */
4584 static int transcode_step(void)
4585 {
4586  OutputStream *ost;
4587  InputStream *ist = NULL;
4588  int ret;
4589 
4590  ost = choose_output();
4591  if (!ost) {
4592  if (got_eagain()) {
4593  reset_eagain();
4594  av_usleep(10000);
4595  return 0;
4596  }
4597  av_log(NULL, AV_LOG_VERBOSE, "No more inputs to read from, finishing.\n");
4598  return AVERROR_EOF;
4599  }
4600 
4601  if (ost->filter && !ost->filter->graph->graph) {
4603  ret = configure_filtergraph(ost->filter->graph);
4604  if (ret < 0) {
4605  av_log(NULL, AV_LOG_ERROR, "Error reinitializing filters!\n");
4606  return ret;
4607  }
4608  }
4609  }
4610 
4611  if (ost->filter && ost->filter->graph->graph) {
4612  if (!ost->initialized) {
4613  char error[1024] = {0};
4614  ret = init_output_stream(ost, error, sizeof(error));
4615  if (ret < 0) {
4616  av_log(NULL, AV_LOG_ERROR, "Error initializing output stream %d:%d -- %s\n",
4617  ost->file_index, ost->index, error);
4618  exit_program(1);
4619  }
4620  }
4621  if ((ret = transcode_from_filter(ost->filter->graph, &ist)) < 0)
4622  return ret;
4623  if (!ist)
4624  return 0;
4625  } else if (ost->filter) {
4626  int i;
4627  for (i = 0; i < ost->filter->graph->nb_inputs; i++) {
4628  InputFilter *ifilter = ost->filter->graph->inputs[i];
4629  if (!ifilter->ist->got_output && !input_files[ifilter->ist->file_index]->eof_reached) {
4630  ist = ifilter->ist;
4631  break;
4632  }
4633  }
4634  if (!ist) {
4635  ost->inputs_done = 1;
4636  return 0;
4637  }
4638  } else {
4639  av_assert0(ost->source_index >= 0);
4640  ist = input_streams[ost->source_index];
4641  }
4642 
4643  ret = process_input(ist->file_index);
4644  if (ret == AVERROR(EAGAIN)) {
4645  if (input_files[ist->file_index]->eagain)
4646  ost->unavailable = 1;
4647  return 0;
4648  }
4649 
4650  if (ret < 0)
4651  return ret == AVERROR_EOF ? 0 : ret;
4652 
4653  return reap_filters(0);
4654 }
4655 
4656 /*
4657  * The following code is the main loop of the file converter
4658  */
4659 static int transcode(void)
4660 {
4661  int ret, i;
4662  AVFormatContext *os;
4663  OutputStream *ost;
4664  InputStream *ist;
4665  int64_t timer_start;
4666  int64_t total_packets_written = 0;
4667 
4668  ret = transcode_init();
4669  if (ret < 0)
4670  goto fail;
4671 
4672  if (stdin_interaction) {
4673  av_log(NULL, AV_LOG_INFO, "Press [q] to stop, [?] for help\n");
4674  }
4675 
4676  timer_start = av_gettime_relative();
4677 
4678 #if HAVE_THREADS
4679  if ((ret = init_input_threads()) < 0)
4680  goto fail;
4681 #endif
4682 
4683  while (!received_sigterm) {
4684  int64_t cur_time= av_gettime_relative();
4685 
4686  /* if 'q' pressed, exits */
4687  if (stdin_interaction)
4688  if (check_keyboard_interaction(cur_time) < 0)
4689  break;
4690 
4691  /* check if there's any stream where output is still needed */
4692  if (!need_output()) {
4693  av_log(NULL, AV_LOG_VERBOSE, "No more output streams to write to, finishing.\n");
4694  break;
4695  }
4696 
4697  ret = transcode_step();
4698  if (ret < 0 && ret != AVERROR_EOF) {
4699  av_log(NULL, AV_LOG_ERROR, "Error while filtering: %s\n", av_err2str(ret));
4700  break;
4701  }
4702 
4703  /* dump report by using the output first video and audio streams */
4704  print_report(0, timer_start, cur_time);
4705  }
4706 #if HAVE_THREADS
4707  free_input_threads();
4708 #endif
4709 
4710  /* at the end of stream, we must flush the decoder buffers */
4711  for (i = 0; i < nb_input_streams; i++) {
4712  ist = input_streams[i];
4713  if (!input_files[ist->file_index]->eof_reached) {
4714  process_input_packet(ist, NULL, 0);
4715  }
4716  }
4717  flush_encoders();
4718 
4719  term_exit();
4720 
4721  /* write the trailer if needed and close file */
4722  for (i = 0; i < nb_output_files; i++) {
4723  os = output_files[i]->ctx;
4724  if (!output_files[i]->header_written) {
4726  "Nothing was written into output file %d (%s), because "
4727  "at least one of its streams received no packets.\n",
4728  i, os->url);
4729  continue;
4730  }
4731  if ((ret = av_write_trailer(os)) < 0) {
4732  av_log(NULL, AV_LOG_ERROR, "Error writing trailer of %s: %s\n", os->url, av_err2str(ret));
4733  if (exit_on_error)
4734  exit_program(1);
4735  }
4736  }
4737 
4738  /* dump report by using the first video and audio streams */
4739  print_report(1, timer_start, av_gettime_relative());
4740 
4741  /* close each encoder */
4742  for (i = 0; i < nb_output_streams; i++) {
4743  ost = output_streams[i];
4744  if (ost->encoding_needed) {
4745  av_freep(&ost->enc_ctx->stats_in);
4746  }
4747  total_packets_written += ost->packets_written;
4748  }
4749 
4750  if (!total_packets_written && (abort_on_flags & ABORT_ON_FLAG_EMPTY_OUTPUT)) {
4751  av_log(NULL, AV_LOG_FATAL, "Empty output\n");
4752  exit_program(1);
4753  }
4754 
4755  /* close each decoder */
4756  for (i = 0; i < nb_input_streams; i++) {
4757  ist = input_streams[i];
4758  if (ist->decoding_needed) {
4759  avcodec_close(ist->dec_ctx);
4760  if (ist->hwaccel_uninit)
4761  ist->hwaccel_uninit(ist->dec_ctx);
4762  }
4763  }
4764 
4767 
4768  /* finished ! */
4769  ret = 0;
4770 
4771  fail:
4772 #if HAVE_THREADS
4773  free_input_threads();
4774 #endif
4775 
4776  if (output_streams) {
4777  for (i = 0; i < nb_output_streams; i++) {
4778  ost = output_streams[i];
4779  if (ost) {
4780  if (ost->logfile) {
4781  if (fclose(ost->logfile))
4783  "Error closing logfile, loss of information possible: %s\n",
4784  av_err2str(AVERROR(errno)));
4785  ost->logfile = NULL;
4786  }
4787  av_freep(&ost->forced_kf_pts);
4788  av_freep(&ost->apad);
4789  av_freep(&ost->disposition);
4790  av_dict_free(&ost->encoder_opts);
4791  av_dict_free(&ost->sws_dict);
4792  av_dict_free(&ost->swr_opts);
4793  av_dict_free(&ost->resample_opts);
4794  }
4795  }
4796  }
4797  return ret;
4798 }
4799 
4801 {
4802  BenchmarkTimeStamps time_stamps = { av_gettime_relative() };
4803 #if HAVE_GETRUSAGE
4804  struct rusage rusage;
4805 
4806  getrusage(RUSAGE_SELF, &rusage);
4807  time_stamps.user_usec =
4808  (rusage.ru_utime.tv_sec * 1000000LL) + rusage.ru_utime.tv_usec;
4809  time_stamps.sys_usec =
4810  (rusage.ru_stime.tv_sec * 1000000LL) + rusage.ru_stime.tv_usec;
4811 #elif HAVE_GETPROCESSTIMES
4812  HANDLE proc;
4813  FILETIME c, e, k, u;
4814  proc = GetCurrentProcess();
4815  GetProcessTimes(proc, &c, &e, &k, &u);
4816  time_stamps.user_usec =
4817  ((int64_t)u.dwHighDateTime << 32 | u.dwLowDateTime) / 10;
4818  time_stamps.sys_usec =
4819  ((int64_t)k.dwHighDateTime << 32 | k.dwLowDateTime) / 10;
4820 #else
4821  time_stamps.user_usec = time_stamps.sys_usec = 0;
4822 #endif
4823  return time_stamps;
4824 }
4825 
4826 static int64_t getmaxrss(void)
4827 {
4828 #if HAVE_GETRUSAGE && HAVE_STRUCT_RUSAGE_RU_MAXRSS
4829  struct rusage rusage;
4830  getrusage(RUSAGE_SELF, &rusage);
4831  return (int64_t)rusage.ru_maxrss * 1024;
4832 #elif HAVE_GETPROCESSMEMORYINFO
4833  HANDLE proc;
4834  PROCESS_MEMORY_COUNTERS memcounters;
4835  proc = GetCurrentProcess();
4836  memcounters.cb = sizeof(memcounters);
4837  GetProcessMemoryInfo(proc, &memcounters, sizeof(memcounters));
4838  return memcounters.PeakPagefileUsage;
4839 #else
4840  return 0;
4841 #endif
4842 }
4843 
4844 static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
4845 {
4846 }
4847 
4848 int main(int argc, char **argv)
4849 {
4850  int i, ret;
4852 
4853  init_dynload();
4854 
4856 
4857  setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
4858 
4860  parse_loglevel(argc, argv, options);
4861 
4862  if(argc>1 && !strcmp(argv[1], "-d")){
4863  run_as_daemon=1;
4865  argc--;
4866  argv++;
4867  }
4868 
4869 #if CONFIG_AVDEVICE
4871 #endif
4873 
4874  show_banner(argc, argv, options);
4875 
4876  /* parse options and open all input/output files */
4877  ret = ffmpeg_parse_options(argc, argv);
4878  if (ret < 0)
4879  exit_program(1);
4880 
4881  if (nb_output_files <= 0 && nb_input_files == 0) {
4882  show_usage();
4883  av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
4884  exit_program(1);
4885  }
4886 
4887  /* file converter / grab */
4888  if (nb_output_files <= 0) {
4889  av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
4890  exit_program(1);
4891  }
4892 
4893 // if (nb_input_files == 0) {
4894 // av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
4895 // exit_program(1);
4896 // }
4897 
4898  for (i = 0; i < nb_output_files; i++) {
4899  if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
4900  want_sdp = 0;
4901  }
4902 
4903  current_time = ti = get_benchmark_time_stamps();
4904  if (transcode() < 0)
4905  exit_program(1);
4906  if (do_benchmark) {
4907  int64_t utime, stime, rtime;
4908  current_time = get_benchmark_time_stamps();
4909  utime = current_time.user_usec - ti.user_usec;
4910  stime = current_time.sys_usec - ti.sys_usec;
4911  rtime = current_time.real_usec - ti.real_usec;
4913  "bench: utime=%0.3fs stime=%0.3fs rtime=%0.3fs\n",
4914  utime / 1000000.0, stime / 1000000.0, rtime / 1000000.0);
4915  }
4916  av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
4919  exit_program(69);
4920 
4922  return main_return_code;
4923 }
unsigned int nb_chapters
Number of chapters in AVChapter array.
Definition: avformat.h:1580
int64_t pts
current pts of the decoded frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:316
int nb_bitstream_filters
Definition: ffmpeg.h:462
#define extra_bits(eb)
Definition: intrax8.c:159
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:904
void init_dynload(void)
Initialize dynamic library loading.
Definition: cmdutils.c:120
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1785
AVRational enc_timebase
Definition: ffmpeg.h:460
static void write_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int unqueue)
Definition: ffmpeg.c:690
int got_output
Definition: ffmpeg.h:341
#define AV_DISPOSITION_METADATA
Definition: avformat.h:856
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
static void do_streamcopy(InputStream *ist, OutputStream *ost, const AVPacket *pkt)
Definition: ffmpeg.c:1999
int64_t first_dts
Timestamp corresponding to the last dts sync point.
Definition: avformat.h:1076
int guess_input_channel_layout(InputStream *ist)
Definition: ffmpeg.c:2086
#define ATOMIC_VAR_INIT(value)
Definition: stdatomic.h:31
#define NULL
Definition: coverity.c:32
int frame_number
Definition: ffmpeg.h:448
const struct AVCodec * codec
Definition: avcodec.h:1542
Definition: ffmpeg.h:425
AVRational framerate
Definition: avcodec.h:3056
enum AVFieldOrder field_order
Video only.
Definition: avcodec.h:3981
static void output_packet(OutputFile *of, AVPacket *pkt, OutputStream *ost, int eof)
Definition: ffmpeg.c:847
static void do_subtitle_out(OutputFile *of, OutputStream *ost, AVSubtitle *sub)
Definition: ffmpeg.c:971
Bytestream IO Context.
Definition: avio.h:161
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
int64_t recording_time
desired length of the resulting file in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:558
void term_init(void)
Definition: ffmpeg.c:387
int64_t avio_size(AVIOContext *s)
Get the filesize.
Definition: aviobuf.c:336
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5737
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
uint8_t * name
Definition: ffmpeg.h:263
int nb_outputs
Definition: ffmpeg.h:292
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
AVDictionary * swr_opts
Definition: ffmpeg.h:509
#define DECODING_FOR_FILTER
Definition: ffmpeg.h:302
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2446
#define av_realloc_f(p, o, n)
This structure describes decoded (raw) audio or video data.
Definition: frame.h:226
int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts)
Queue a command for one or more filter instances.
static int init_output_stream_streamcopy(OutputStream *ost)
Definition: ffmpeg.c:3062
void term_exit(void)
Definition: ffmpeg.c:328
int stream_copy
Definition: ffmpeg.h:514
int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt)
Write a packet to an output media file ensuring correct interleaving.
Definition: mux.c:1185
int x
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3842
AVIOInterruptCB interrupt_callback
Custom interrupt callbacks for the I/O layer.
Definition: avformat.h:1629
#define atomic_store(object, desired)
Definition: stdatomic.h:85
AVOption.
Definition: opt.h:246
AVRational frame_rate
Definition: ffmpeg.h:477
int64_t * forced_kf_pts
Definition: ffmpeg.h:488
void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, int err)
Set the receiving error code.
static void flush(AVCodecContext *avctx)
static void sub2video_flush(InputStream *ist)
Definition: ffmpeg.c:304
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:2709
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:376
char * filters
filtergraph associated to the -filter option
Definition: ffmpeg.h:504
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:883
static int process_input(int file_index)
Definition: ffmpeg.c:4264
int exit_on_error
Definition: ffmpeg_opt.c:104
int64_t cfr_next_pts
Definition: ffmpeg.h:326
int64_t forced_kf_ref_pts
Definition: ffmpeg.h:487
const char * fmt
Definition: avisynth_c.h:769
static int init_output_stream(OutputStream *ost, char *error, int error_len)
Definition: ffmpeg.c:3462
static atomic_int transcode_init_done
Definition: ffmpeg.c:336
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
int64_t bit_rate
the average bitrate
Definition: avcodec.h:1583
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
Main libavfilter public API header.
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1465
#define AV_DICT_DONT_OVERWRITE
Don&#39;t overwrite existing entries.
Definition: dict.h:79
static int run_as_daemon
Definition: ffmpeg.c:134
Memory buffer source API.
const char * desc
Definition: nvenc.c:65
void av_log_set_level(int level)
Set the log level.
Definition: log.c:385
AVRational framerate
Definition: ffmpeg.h:333
AVRational sample_aspect_ratio
Video only.
Definition: avcodec.h:3976
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:417
int height
Definition: ffmpeg.h:247
int64_t max_pts
Definition: ffmpeg.h:322
int decoding_needed
Definition: ffmpeg.h:300
enum AVCodecID codec_id
Specific type of the encoded data (the codec used).
Definition: avcodec.h:3900
void sub2video_update(InputStream *ist, AVSubtitle *sub)
Definition: ffmpeg.c:240
const struct AVBitStreamFilter * filter
The bitstream filter this context is an instance of.
Definition: avcodec.h:5712
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown)
Definition: avformat.h:936
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5703
static void print_report(int is_last_report, int64_t timer_start, int64_t cur_time)
Definition: ffmpeg.c:1651
#define vsnprintf
Definition: snprintf.h:36
int rotate_overridden
Definition: ffmpeg.h:481
int index
stream index in AVFormatContext
Definition: avformat.h:875
int size
Definition: avcodec.h:1446
static int64_t getmaxrss(void)
Definition: ffmpeg.c:4826
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
int max_muxing_queue_size
Definition: ffmpeg.h:542
const char * b
Definition: vf_curves.c:116
static int nb_frames_dup
Definition: ffmpeg.c:135
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:191
#define us(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:263
static InputStream * get_input_stream(OutputStream *ost)
Definition: ffmpeg.c:2967
void hw_device_free_all(void)
Definition: ffmpeg_hw.c:269
#define AVIO_FLAG_WRITE
write-only
Definition: avio.h:655
#define AV_DISPOSITION_DUB
Definition: avformat.h:820
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1912
int eagain
Definition: ffmpeg.h:396
void avfilter_graph_free(AVFilterGraph **graph)
Free a graph, destroy its links, and set *graph to NULL.
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:772
void show_banner(int argc, char **argv, const OptionDef *options)
Print the program banner to stderr.
Definition: cmdutils.c:1180
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1743
static int init_output_stream_encode(OutputStream *ost)
Definition: ffmpeg.c:3304
static void abort_codec_experimental(AVCodec *c, int encoder)
Definition: ffmpeg.c:655
#define AV_DISPOSITION_HEARING_IMPAIRED
stream for hearing impaired audiences
Definition: avformat.h:832
void(* hwaccel_uninit)(AVCodecContext *s)
Definition: ffmpeg.h:371
int quality
Definition: ffmpeg.h:540
unsigned num_rects
Definition: avcodec.h:3880
AVFrame * filter_frame
Definition: ffmpeg.h:307
static int transcode_init(void)
Definition: ffmpeg.c:3672
static int compare_int64(const void *a, const void *b)
Definition: ffmpeg.c:2974
static int process_input_packet(InputStream *ist, const AVPacket *pkt, int no_eof)
Definition: ffmpeg.c:2579
char * stats_in
pass2 encoding statistics input buffer Concatenated stuff from stats_out of pass1 should be placed he...
Definition: avcodec.h:2556
int do_benchmark_all
Definition: ffmpeg_opt.c:97
enum AVMediaType type
Definition: avcodec.h:3437
#define AV_DISPOSITION_CLEAN_EFFECTS
stream without voice
Definition: avformat.h:834
const char * key
int last_dropped
Definition: ffmpeg.h:471
int av_usleep(unsigned usec)
Sleep for a period of time.
Definition: time.c:84
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
discard all
Definition: avcodec.h:803
AVPacketSideData * side_data
An array of side data that applies to the whole stream (i.e.
Definition: avformat.h:976
int64_t input_ts_offset
Definition: ffmpeg.h:402
int do_hex_dump
Definition: ffmpeg_opt.c:98
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:2757
int nb_input_streams
Definition: ffmpeg.c:148
const char * name
Definition: ffmpeg.h:68
intptr_t atomic_int
Definition: stdatomic.h:55
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:1025
static void report_new_stream(int input_index, AVPacket *pkt)
Definition: ffmpeg.c:3657
#define src
Definition: vp8dsp.c:254
uint64_t packets_written
Definition: ffmpeg.h:534
AVCodec.
Definition: avcodec.h:3424
#define VSYNC_VFR
Definition: ffmpeg.h:52
int nb_dts_buffer
Definition: ffmpeg.h:388
#define AVFMT_TS_NONSTRICT
Format does not require strictly increasing timestamps, but they must still be monotonic.
Definition: avformat.h:479
This struct describes the properties of an encoded stream.
Definition: avcodec.h:3892
int print_stats
Definition: ffmpeg_opt.c:106
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:135
float dts_error_threshold
Definition: ffmpeg_opt.c:89
int64_t start_time
start time in microseconds == AV_TIME_BASE units
Definition: ffmpeg.h:559
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int index
Definition: ffmpeg.h:283
uint64_t data_size
Definition: ffmpeg.h:532
static int decode_interrupt_cb(void *ctx)
Definition: ffmpeg.c:476
AVBSFContext ** bsf_ctx
Definition: ffmpeg.h:463
int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int(*func)(void *, void *, int))
Feed data from a user-supplied callback to an AVFifoBuffer.
Definition: fifo.c:122
void av_display_rotation_set(int32_t matrix[9], double angle)
Initialize a transformation matrix describing a pure counterclockwise rotation by the specified angle...
Definition: display.c:50
#define AV_DISPOSITION_KARAOKE
Definition: avformat.h:824
struct FilterGraph * graph
Definition: ffmpeg.h:238
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1656
static int send_frame_to_filters(InputStream *ist, AVFrame *decoded_frame)
Definition: ffmpeg.c:2263
Undefined.
Definition: avutil.h:273
AVSubtitleRect ** rects
Definition: avcodec.h:3881
int encoding_needed
Definition: ffmpeg.h:447
static void update_benchmark(const char *fmt,...)
Definition: ffmpeg.c:660
static void log_callback_null(void *ptr, int level, const char *fmt, va_list vl)
Definition: ffmpeg.c:4844
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:564
static int check_keyboard_interaction(int64_t cur_time)
Definition: ffmpeg.c:3912
Format I/O context.
Definition: avformat.h:1351
Immediately push the frame to the output.
Definition: buffersrc.h:46
uint64_t samples_decoded
Definition: ffmpeg.h:385
memory buffer sink API for audio and video
struct InputStream * ist
Definition: ffmpeg.h:237
static int decode_video(InputStream *ist, AVPacket *pkt, int *got_output, int64_t *duration_pts, int eof, int *decode_failed)
Definition: ffmpeg.c:2353
#define AV_RL64
Definition: intreadwrite.h:173
unsigned int nb_stream_indexes
Definition: avformat.h:1273
#define AV_LOG_QUIET
Print no output.
Definition: log.h:158
enum HWAccelID id
Definition: ffmpeg.h:70
int64_t cur_dts
Definition: avformat.h:1077
int w
width of pict, undefined when pict is not set
Definition: avcodec.h:3844
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:993
uint64_t frames_decoded
Definition: ffmpeg.h:384
int header_written
Definition: ffmpeg.h:564
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVFilterGraph * graph
Definition: ffmpeg.h:286
int av_buffersink_get_sample_rate(const AVFilterContext *ctx)
Public dictionary API.
char * logfile_prefix
Definition: ffmpeg.h:499
static uint8_t * subtitle_out
Definition: ffmpeg.c:145
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:212
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:153
static int main_return_code
Definition: ffmpeg.c:338
static int64_t start_time
Definition: ffplay.c:330
int copy_initial_nonkeyframes
Definition: ffmpeg.h:524
void register_exit(void(*cb)(int ret))
Register a program-specific cleanup routine.
Definition: cmdutils.c:131
static int init_output_bsfs(OutputStream *ost)
Definition: ffmpeg.c:3026
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2197
int64_t * dts_buffer
Definition: ffmpeg.h:387
int flags
can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE
Definition: avformat.h:526
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
Opaque data information usually continuous.
Definition: avutil.h:203
AVDictionary * sws_dict
Definition: ffmpeg.h:508
int av_thread_message_queue_recv(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Receive a message from the queue.
int width
Video only.
Definition: avcodec.h:3966
float delta
static void sub2video_push_ref(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:222
AVOptions.
int subtitle_header_size
Definition: avcodec.h:3002
int flags
Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
Definition: avformat.h:661
enum AVCodecID av_codec_get_id(const struct AVCodecTag *const *tags, unsigned int tag)
Get the AVCodecID for the given codec tag tag.
miscellaneous OS support macros and functions.
timestamp utils, mostly useful for debugging/logging purposes
int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, AVStream *ost, const AVStream *ist, enum AVTimebaseSource copy_tb)
Transfer internal timing information from one stream to another.
Definition: utils.c:5773
int stdin_interaction
Definition: ffmpeg_opt.c:108
FILE * logfile
Definition: ffmpeg.h:500
#define f(width, name)
Definition: cbs_vp9.c:255
AVDictionary * opts
Definition: ffmpeg.h:556
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
#define media_type_string
Definition: cmdutils.h:620
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1463
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1220
int av_thread_message_queue_send(AVThreadMessageQueue *mq, void *msg, unsigned flags)
Send a message on the queue.
#define ECHO(name, type, min, max)
Definition: af_aecho.c:186
#define FF_API_DEBUG_MV
Definition: version.h:58
static int need_output(void)
Definition: ffmpeg.c:3845
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:319
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3402
int av_fifo_space(const AVFifoBuffer *f)
Return the amount of space in bytes in the AVFifoBuffer, that is the amount of data you can write int...
Definition: fifo.c:82
int nb_side_data
The number of elements in the AVStream.side_data array.
Definition: avformat.h:980
static double psnr(double d)
Definition: ffmpeg.c:1359
int do_benchmark
Definition: ffmpeg_opt.c:96
int audio_sync_method
Definition: ffmpeg_opt.c:92
#define u(width, name, range_min, range_max)
Definition: cbs_h2645.c:253
int shortest
Definition: ffmpeg.h:562
AVStream ** streams
A list of all streams in the file.
Definition: avformat.h:1419
int64_t duration
Definition: movenc.c:63
int avcodec_parameters_to_context(AVCodecContext *codec, const AVCodecParameters *par)
Fill the codec context based on the values from the supplied codec parameters.
Definition: utils.c:2161
int av_codec_get_tag2(const struct AVCodecTag *const *tags, enum AVCodecID id, unsigned int *tag)
Get the codec tag for the given codec id.
static AVFrame * frame
static void ifilter_parameters_from_codecpar(InputFilter *ifilter, AVCodecParameters *par)
Definition: ffmpeg.c:1850
void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, const AVStream *st)
Send a nice dump of a packet to the log.
Definition: dump.c:113
AVRational av_buffersink_get_frame_rate(const AVFilterContext *ctx)
const char * name
Definition: avcodec.h:5753
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static void finish(void)
Definition: movenc.c:345
#define AV_DISPOSITION_DEPENDENT
dependent audio stream (mix_type=0 in mpegts)
Definition: avformat.h:857
int nb_streams
Definition: ffmpeg.h:409
uint8_t * data
Definition: avcodec.h:1445
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
enum AVMediaType type
Definition: ffmpeg.h:240
static void set_tty_echo(int on)
Definition: ffmpeg.c:3900
AVDictionary * resample_opts
Definition: ffmpeg.h:510
void av_packet_move_ref(AVPacket *dst, AVPacket *src)
Move every field in src to dst and reset src.
Definition: avpacket.c:653
static void parse_forced_key_frames(char *kf, OutputStream *ost, AVCodecContext *avctx)
Definition: ffmpeg.c:3217
#define FFMIN3(a, b, c)
Definition: common.h:97
AVFilterContext * filter
Definition: ffmpeg.h:260
static int seek_to_start(InputFile *ifile, AVFormatContext *is)
Definition: ffmpeg.c:4198
int avformat_network_init(void)
Do global initialization of network libraries.
Definition: utils.c:5010
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
int * formats
Definition: ffmpeg.h:277
#define ff_dlog(a,...)
int nb_input_files
Definition: ffmpeg.c:150
#define AVERROR_EOF
End of file.
Definition: error.h:55
static int read_key(void)
Definition: ffmpeg.c:425
static int reap_filters(int flush)
Get and encode new output from any of the filtergraphs, without causing activity. ...
Definition: ffmpeg.c:1428
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
static volatile int ffmpeg_exited
Definition: ffmpeg.c:337
int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size)
Generate an SDP for an RTP session.
Definition: sdp.c:841
static void do_video_stats(OutputStream *ost, int frame_size)
Definition: ffmpeg.c:1364
uint8_t * data
Definition: avcodec.h:1389
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:373
void parse_loglevel(int argc, char **argv, const OptionDef *options)
Find the &#39;-loglevel&#39; option in the command line args and apply it.
Definition: cmdutils.c:506
int h
height of pict, undefined when pict is not set
Definition: avcodec.h:3845
static av_always_inline int64_t avio_tell(AVIOContext *s)
ftell() equivalent for AVIOContext.
Definition: avio.h:557
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
AVCodec * dec
Definition: ffmpeg.h:305
AVBufferRef * av_buffersink_get_hw_frames_ctx(const AVFilterContext *ctx)
enum AVDiscard discard
selects which program to discard and which to feed to the caller
Definition: avformat.h:1271
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:2548
void avio_write(AVIOContext *s, const unsigned char *buf, int size)
Definition: aviobuf.c:218
int top_field_first
Definition: ffmpeg.h:334
#define AVFMT_FLAG_BITEXACT
When muxing, try to avoid writing any random/volatile data to the output.
Definition: avformat.h:1499
int nb_output_streams
Definition: ffmpeg.c:153
int file_index
Definition: ffmpeg.h:296
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2171
unsigned int * stream_index
Definition: avformat.h:1272
int av_buffersink_get_h(const AVFilterContext *ctx)
struct InputStream::sub2video sub2video
int av_buffersink_get_format(const AVFilterContext *ctx)
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3370
int wrap_correction_done
Definition: ffmpeg.h:317
int64_t filter_in_rescale_delta_last
Definition: ffmpeg.h:319
uint64_t channel_layout
Audio only.
Definition: avcodec.h:4002
#define av_log(a,...)
static void sub2video_heartbeat(InputStream *ist, int64_t pts)
Definition: ffmpeg.c:274
#define AV_DISPOSITION_CAPTIONS
To specify text track kind (different from subtitles default).
Definition: avformat.h:854
#define AV_BUFFERSINK_FLAG_NO_REQUEST
Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
Definition: buffersink.h:60
struct AVOutputFormat * oformat
The output container format.
Definition: avformat.h:1370
int64_t next_dts
Definition: ffmpeg.h:312
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: avcodec.h:1477
attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src)
Copy packet side data.
Definition: avpacket.c:226
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
Print detailed information about the input or output format, such as duration, bitrate, streams, container, programs, metadata, side data, codec and time base.
Definition: dump.c:563
AVFifoBuffer * sub_queue
queue of AVSubtitle* before filter init
Definition: ffmpeg.h:349
Main libavdevice API header.
Callback for checking whether to abort blocking functions.
Definition: avio.h:58
int avcodec_close(AVCodecContext *avctx)
Close a given AVCodecContext and free all the data associated with it (but not the AVCodecContext its...
Definition: utils.c:1119
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3070
static int transcode_subtitles(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2486
AVRational av_stream_get_codec_timebase(const AVStream *st)
Get the internal codec timebase from a stream.
Definition: utils.c:5835
libswresample public header
enum AVCodecID id
Definition: avcodec.h:3438
int(* hwaccel_retrieve_data)(AVCodecContext *s, AVFrame *frame)
Definition: ffmpeg.h:373
AVRational sample_aspect_ratio
Definition: ffmpeg.h:248
const AVCodecDefault * defaults
Private codec-specific defaults.
Definition: avcodec.h:3497
int rate_emu
Definition: ffmpeg.h:412
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:258
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2086
int width
Definition: frame.h:284
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
#define AV_DICT_MATCH_CASE
Only get an entry with exact-case key match.
Definition: dict.h:69
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1823
int sample_rate
Definition: ffmpeg.h:250
static void finish_output_stream(OutputStream *ost)
Definition: ffmpeg.c:1409
static void reset_eagain(void)
Definition: ffmpeg.c:4169
static AVBufferRef * hw_device_ctx
Definition: hw_decode.c:45
enum AVPixelFormat hwaccel_pix_fmt
Definition: ffmpeg.h:374
void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst)
Convert valid timing fields (timestamps / durations) in a packet from one timebase to another...
Definition: avpacket.c:700
int ffmpeg_parse_options(int argc, char **argv)
Definition: ffmpeg_opt.c:3253
FilterGraph ** filtergraphs
Definition: ffmpeg.c:157
const AVIOInterruptCB int_cb
Definition: ffmpeg.c:481
AVFilterContext * filter
Definition: ffmpeg.h:236
#define AVFMT_TS_DISCONT
Format allows timestamp discontinuities.
Definition: avformat.h:471
#define atomic_load(object)
Definition: stdatomic.h:93
int64_t start
Definition: ffmpeg.h:309
int loop
Definition: ffmpeg.h:398
int y
top left corner of pict, undefined when pict is not set
Definition: avcodec.h:3843
uint64_t nb_packets
Definition: ffmpeg.h:382
static BenchmarkTimeStamps current_time
Definition: ffmpeg.c:142
#define AVERROR(e)
Definition: error.h:43
int64_t last_mux_dts
Definition: ffmpeg.h:457
int video_sync_method
Definition: ffmpeg_opt.c:93
int format
Definition: ffmpeg.h:245
int av_opt_eval_flags(void *obj, const AVOption *o, const char *val, int *flags_out)
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
static int64_t decode_error_stat[2]
Definition: ffmpeg.c:138
#define VSYNC_VSCFR
Definition: ffmpeg.h:53
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
char * sdp_filename
Definition: ffmpeg_opt.c:85
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
#define FALSE
Definition: windows2linux.h:37
int last_nb0_frames[3]
Definition: ffmpeg.h:472
Display matrix.
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3407
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
char * url
input or output URL.
Definition: avformat.h:1447
int video_delay
Video only.
Definition: avcodec.h:3995
int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:740
const char * r
Definition: vf_curves.c:114
const char *const forced_keyframes_const_names[]
Definition: ffmpeg.c:114
int capabilities
Codec capabilities.
Definition: avcodec.h:3443
struct AVCodecParserContext * av_stream_get_parser(const AVStream *s)
Definition: utils.c:149
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void(*func)(void *, void *, int))
Feed data from an AVFifoBuffer to a user-supplied callback.
Definition: fifo.c:213
int av_buffersrc_close(AVFilterContext *ctx, int64_t pts, unsigned flags)
Close the buffer source after EOF.
Definition: buffersrc.c:263
unsigned int nb_programs
Definition: avformat.h:1530
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
preferred ID for decoding MPEG audio layer 1, 2 or 3
Definition: avcodec.h:559
enum AVMediaType codec_type
General type of the encoded data.
Definition: avcodec.h:3896
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1428
const char * arg
Definition: jacosubdec.c:66
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1613
int av_opt_set_int(void *obj, const char *name, int64_t val, int search_flags)
Definition: opt.c:568
AVChapter ** chapters
Definition: avformat.h:1581
#define AV_LOG_SKIP_REPEATED
Skip repeated messages, this requires the user app to use av_log() instead of (f)printf as the 2 woul...
Definition: log.h:345
Round to nearest and halfway cases away from zero.
Definition: mathematics.h:84
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5743
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
enum AVPacketSideDataType type
Definition: avcodec.h:1391
int av_log_get_level(void)
Get the current log level.
Definition: log.c:380
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
const char * name
Name of the codec implementation.
Definition: avcodec.h:3431
static int check_recording_time(OutputStream *ost)
Definition: ffmpeg.c:898
void remove_avoptions(AVDictionary **a, AVDictionary *b)
Definition: ffmpeg.c:637
int eof
Definition: ffmpeg.h:256
int force_fps
Definition: ffmpeg.h:479
int hw_device_setup_for_encode(OutputStream *ost)
Definition: ffmpeg_hw.c:414
AVRational avg_frame_rate
Average framerate.
Definition: avformat.h:947
New fields can be added to the end with minor version bumps.
Definition: avformat.h:1268
#define FFMAX(a, b)
Definition: common.h:94
int attribute_align_arg av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Get a frame with filtered data from sink and put it in frame.
Definition: buffersink.c:119
void avcodec_parameters_free(AVCodecParameters **par)
Free an AVCodecParameters instance and everything associated with it and write NULL to the supplied p...
Definition: utils.c:2075
int qp_hist
Definition: ffmpeg_opt.c:107
size_t av_strlcpy(char *dst, const char *src, size_t size)
Copy the string src to dst, but no more than size - 1 bytes, and null-terminate dst.
Definition: avstring.c:83
#define fail()
Definition: checkasm.h:117
float frame_drop_threshold
Definition: ffmpeg_opt.c:94
#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE
Audio encoder supports receiving a different number of samples in each call.
Definition: avcodec.h:1040
int64_t error[4]
Definition: ffmpeg.h:551
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1451
const AVCodecDescriptor * avcodec_descriptor_get(enum AVCodecID id)
Definition: codec_desc.c:3199
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2240
int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b)
Compare two timestamps each in its own time base.
Definition: mathematics.c:147
uint32_t end_display_time
Definition: avcodec.h:3879
static int want_sdp
Definition: ffmpeg.c:140
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3882
OutputFilter * filter
Definition: ffmpeg.h:502
static void check_decode_result(InputStream *ist, int *got_output, int ret)
Definition: ffmpeg.c:2106
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:404
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:724
AVRational frame_aspect_ratio
Definition: ffmpeg.h:484
#define AV_DISPOSITION_LYRICS
Definition: avformat.h:823
static int ifilter_send_eof(InputFilter *ifilter, int64_t pts)
Definition: ffmpeg.c:2213
const AVOption * av_opt_find(void *obj, const char *name, const char *unit, int opt_flags, int search_flags)
Look for an option in an object.
Definition: opt.c:1608
#define FFDIFFSIGN(x, y)
Comparator.
Definition: common.h:92
#define AV_DISPOSITION_FORCED
Track should be used during playback by default.
Definition: avformat.h:831
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
AVRational mux_timebase
Definition: ffmpeg.h:459
unsigned int nb_streams
Number of elements in AVFormatContext.streams.
Definition: avformat.h:1407
AVDictionary * opts
Definition: movenc.c:50
int block_align
Audio only.
Definition: avcodec.h:4017
static int nb_frames_drop
Definition: ffmpeg.c:137
A bitmap, pict will be set.
Definition: avcodec.h:3824
int linesize[4]
Definition: avcodec.h:3860
int nb_output_files
Definition: ffmpeg.c:155
int seekable
A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
Definition: avio.h:260
int void avio_flush(AVIOContext *s)
Force flushing of buffered data.
Definition: aviobuf.c:238
int channels
number of audio channels, only used for audio.
Definition: frame.h:531
audio channel layout utility functions
int is_cfr
Definition: ffmpeg.h:478
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:309
static int transcode(void)
Definition: ffmpeg.c:4659
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
Definition: avcodec.h:895
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:464
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
#define NAN
Definition: mathematics.h:64
#define AV_TIME_BASE
Internal time base represented as integer.
Definition: avutil.h:254
av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options)
Allocate the stream private data and write the stream header to an output media file.
Definition: mux.c:508
#define FFMIN(a, b)
Definition: common.h:96
AVPacketSideData * coded_side_data
Additional data associated with the entire coded stream.
Definition: avcodec.h:3188
uint64_t * channel_layouts
Definition: ffmpeg.h:278
#define VSYNC_AUTO
Definition: ffmpeg.h:49
void av_log_set_callback(void(*callback)(void *, int, const char *, va_list))
Set the logging callback.
Definition: log.c:400
attribute_deprecated int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src)
Copy the settings of the source AVCodecContext into the destination AVCodecContext.
Definition: options.c:215
int saw_first_ts
Definition: ffmpeg.h:331
int abort_on_flags
Definition: ffmpeg_opt.c:105
This side data contains quality related information from the encoder.
Definition: avcodec.h:1244
#define AV_DICT_DONT_STRDUP_VAL
Take ownership of a value that&#39;s been allocated with av_malloc() or another memory allocation functio...
Definition: dict.h:76
#define FFSIGN(a)
Definition: common.h:73
struct OutputStream * ost
Definition: ffmpeg.h:261
int width
picture width / height.
Definition: avcodec.h:1706
PVOID HANDLE
uint8_t w
Definition: llviddspenc.c:38
char * apad
Definition: ffmpeg.h:511
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3213
int64_t nb_samples
Definition: ffmpeg.h:328
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5749
int hw_device_setup_for_decode(InputStream *ist)
Definition: ffmpeg_hw.c:298
double forced_keyframes_expr_const_values[FKF_NB]
Definition: ffmpeg.h:493
int av_packet_make_refcounted(AVPacket *pkt)
Ensure the data described by a given packet is reference counted.
Definition: avpacket.c:661
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:186
int64_t duration
Definition: ffmpeg.h:399
#define AV_FRAME_FLAG_CORRUPT
The frame data may be corrupted, e.g.
Definition: frame.h:452
const char * name
Definition: avformat.h:507
int width
Definition: ffmpeg.h:247
int32_t
#define av_err2str(errnum)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: error.h:119
AVFormatContext * ctx
Definition: movenc.c:48
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:874
int ifilter_parameters_from_frame(InputFilter *ifilter, const AVFrame *frame)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:858
static int decode_audio(InputStream *ist, AVPacket *pkt, int *got_output, int *decode_failed)
Definition: ffmpeg.c:2289
int nb_filtergraphs
Definition: ffmpeg.c:158
static av_always_inline int pthread_join(pthread_t thread, void **value_ptr)
Definition: os2threads.h:90
int64_t last_ts
Definition: ffmpeg.h:405
#define TRUE
Definition: windows2linux.h:33
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:349
int do_pkt_dump
Definition: ffmpeg_opt.c:99
int64_t max_frames
Definition: ffmpeg.h:468
#define AV_RL32
Definition: intreadwrite.h:146
static void term_exit_sigsafe(void)
Definition: ffmpeg.c:320
int audio_channels_mapped
Definition: ffmpeg.h:497
int n
Definition: avisynth_c.h:684
AVDictionary * metadata
Definition: avformat.h:938
uint8_t * av_stream_new_side_data(AVStream *stream, enum AVPacketSideDataType type, int size)
Allocate new information from stream.
Definition: utils.c:5544
int ticks_per_frame
For some codecs, the time base is closer to the field rate than the frame rate.
Definition: avcodec.h:1665
#define AVFILTER_CMD_FLAG_ONE
Stop once a filter understood the command (for target=all for example), fast filters are favored auto...
Definition: avfilter.h:691
static uint32_t codec_flags(enum AVCodecID codec_id)
Definition: cafenc.c:37
const char program_name[]
program name, defined by the program for show_version().
Definition: ffmpeg.c:109
#define AVERROR_EXIT
Immediate exit was requested; the called function should not be restarted.
Definition: error.h:56
#define is(width, name, range_min, range_max, subs,...)
Definition: cbs_h2645.c:269
uint8_t * data[4]
data+linesize for the bitmap of this subtitle.
Definition: avcodec.h:3859
static int got_eagain(void)
Definition: ffmpeg.c:4160
int inputs_done
Definition: ffmpeg.h:521
static void error(const char *err)
int vstats_version
Definition: ffmpeg_opt.c:113
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
#define AVERROR_EXPERIMENTAL
Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it...
Definition: error.h:72
#define FF_ARRAY_ELEMS(a)
#define AV_DISPOSITION_ATTACHED_PIC
The stream is stored in the file as an attached picture/"cover art" (e.g.
Definition: avformat.h:842
const char * avcodec_get_name(enum AVCodecID id)
Get the name of a codec.
Definition: utils.c:1185
void exit_program(int ret)
Wraps exit with a program-specific cleanup routine.
Definition: cmdutils.c:136
AVCodecContext * enc
Definition: muxing.c:55
#define av_log2
Definition: intmath.h:83
#define AV_DISPOSITION_VISUAL_IMPAIRED
stream for visual impaired audiences
Definition: avformat.h:833
int ret
Definition: ffmpeg.h:342
int audio_volume
Definition: ffmpeg_opt.c:91
int64_t sys_usec
Definition: ffmpeg.c:126
void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout)
Return a description of a channel layout.
Stream structure.
Definition: avformat.h:874
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1982
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVFMT_NOTIMESTAMPS
Format does not need / have any timestamps.
Definition: avformat.h:469
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:299
InputFilter ** filters
Definition: ffmpeg.h:358
int fix_sub_duration
Definition: ffmpeg.h:339
static BenchmarkTimeStamps get_benchmark_time_stamps(void)
Definition: ffmpeg.c:4800
int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:677
#define VSYNC_DROP
Definition: ffmpeg.h:54
static int ifilter_has_all_input_formats(FilterGraph *fg)
Definition: ffmpeg.c:2125
int64_t recording_time
Definition: ffmpeg.h:408
int avformat_network_deinit(void)
Undo the initialization done by avformat_network_init.
Definition: utils.c:5022
Definition: ffmpeg.h:67
int frame_size
Number of samples per channel in an audio frame.
Definition: avcodec.h:2209
static av_always_inline int pthread_create(pthread_t *thread, const pthread_attr_t *attr, void *(*start_routine)(void *), void *arg)
Definition: os2threads.h:76
int(* init)(AVCodecContext *s)
Definition: ffmpeg.h:69
static int check_init_output_file(OutputFile *of, int file_index)
Definition: ffmpeg.c:2980
#define AV_DISPOSITION_DEFAULT
Definition: avformat.h:819
AVStream * st
Definition: ffmpeg.h:297
static int sub2video_get_blank_frame(InputStream *ist)
Definition: ffmpeg.c:176
sample_rate
static void set_encoder_id(OutputFile *of, OutputStream *ost)
Definition: ffmpeg.c:3176
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
AVRational av_buffersink_get_sample_aspect_ratio(const AVFilterContext *ctx)
int frame_size
Definition: mxfenc.c:2091
enum AVHWDeviceType hwaccel_device_type
Definition: ffmpeg.h:365
#define AV_DISPOSITION_DESCRIPTIONS
Definition: avformat.h:855
int ost_index
Definition: ffmpeg.h:557
struct InputStream * sync_ist
Definition: ffmpeg.h:451
#define AV_BPRINT_SIZE_AUTOMATIC
int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1656
enum AVMediaType codec_type
Definition: avcodec.h:1541
double ts_scale
Definition: ffmpeg.h:330
int unavailable
Definition: ffmpeg.h:513
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:506
void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, int err)
Set the sending error code.
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:171
static int init_input_stream(int ist_index, char *error, int error_len)
Definition: ffmpeg.c:2902
enum AVCodecID codec_id
Definition: avcodec.h:1543
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
int av_fifo_size(const AVFifoBuffer *f)
Return the amount of data in bytes in the AVFifoBuffer, that is the amount of data you can read from ...
Definition: fifo.c:77
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_opt_set_dict(void *obj, AVDictionary **options)
Set all the options from a given dictionary on an object.
Definition: opt.c:1603
static void sub2video_copy_rect(uint8_t *dst, int dst_linesize, int w, int h, AVSubtitleRect *r)
Definition: ffmpeg.c:191
float max_error_rate
Definition: ffmpeg_opt.c:110
int sample_rate
samples per second
Definition: avcodec.h:2189
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:257
uint64_t frames_encoded
Definition: ffmpeg.h:536
AVIOContext * pb
I/O context.
Definition: avformat.h:1393
int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb)
Rescale a timestamp while preserving known durations.
Definition: mathematics.c:168
int av_fifo_realloc2(AVFifoBuffer *f, unsigned int new_size)
Resize an AVFifoBuffer.
Definition: fifo.c:87
AVFifoBuffer * muxing_queue
Definition: ffmpeg.h:545
int ist_index
Definition: ffmpeg.h:397
const struct AVCodecTag *const * codec_tag
List of supported codec_id-codec_tag pairs, ordered by "better choice first".
Definition: avformat.h:532
static int loop
Definition: ffplay.c:339
int debug
debug
Definition: avcodec.h:2614
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static void print_sdp(void)
Definition: ffmpeg.c:2761
const char * graph_desc
Definition: ffmpeg.h:284
int guess_layout_max
Definition: ffmpeg.h:335
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint64_t av_buffersink_get_channel_layout(const AVFilterContext *ctx)
int64_t start_time
Definition: ffmpeg.h:406
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1830
main external API structure.
Definition: avcodec.h:1533
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:598
int64_t user_usec
Definition: ffmpeg.c:125
#define AV_PIX_FMT_RGB32
Definition: pixfmt.h:352
uint8_t * data
The data buffer.
Definition: buffer.h:89
static void ffmpeg_cleanup(int ret)
Definition: ffmpeg.c:483
static int command(AVFilterContext *ctx, const char *cmd, const char *arg, char *res, int res_len, int flags)
Definition: vf_drawtext.c:863
int * sample_rates
Definition: ffmpeg.h:279
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1100
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:314
const char * attachment_filename
Definition: ffmpeg.h:523
static int check_output_constraints(InputStream *ist, OutputStream *ost)
Definition: ffmpeg.c:1982
int avfilter_graph_request_oldest(AVFilterGraph *graph)
Request a frame on the oldest sink link.
a very simple circular buffer FIFO implementation
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:387
AVRational time_base
Definition: ffmpeg.h:401
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:767
void assert_avoptions(AVDictionary *m)
Definition: ffmpeg.c:646
AVCodecContext * enc_ctx
Definition: ffmpeg.h:465
void * buf
Definition: avisynth_c.h:690
AVFrame * decoded_frame
Definition: ffmpeg.h:306
int extradata_size
Definition: avcodec.h:1635
Perform non-blocking operation.
Definition: threadmessage.h:31
AVBufferRef * hw_frames_ctx
Definition: ffmpeg.h:254
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
Replacements for frequently missing libm functions.
static int transcode_from_filter(FilterGraph *graph, InputStream **best_ist)
Perform a step of transcoding for the specified filter graph.
Definition: ffmpeg.c:4538
int nb_coded_side_data
Definition: avcodec.h:3189
int channels
Definition: ffmpeg.h:251
int * audio_channels_map
Definition: ffmpeg.h:496
#define VSYNC_PASSTHROUGH
Definition: ffmpeg.h:50
Describe the class of an AVClass context structure.
Definition: log.h:67
int sample_rate
Sample rate of the audio data.
Definition: frame.h:399
int configure_filtergraph(FilterGraph *fg)
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2344
OutputStream ** output_streams
Definition: ffmpeg.c:152
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
int index
Definition: gxfenc.c:89
enum AVMediaType av_buffersink_get_type(const AVFilterContext *ctx)
int av_find_nearest_q_idx(AVRational q, const AVRational *q_list)
Find the value in a list of rationals nearest a given reference rational.
Definition: rational.c:142
#define FF_DEBUG_DCT_COEFF
Definition: avcodec.h:2626
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int file_index
Definition: ffmpeg.h:443
int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, unsigned nelem, unsigned elsize)
Allocate a new message queue.
Definition: threadmessage.c:40
#define ABORT_ON_FLAG_EMPTY_OUTPUT
Definition: ffmpeg.h:433
double rotate_override_value
Definition: ffmpeg.h:482
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2104
int64_t sync_opts
Definition: ffmpeg.h:452
char * vstats_filename
Definition: ffmpeg_opt.c:84
int64_t parse_time_or_die(const char *context, const char *timestr, int is_duration)
Parse a string specifying a time and return its corresponding value as a number of microseconds...
Definition: cmdutils.c:165
AVCodecContext * dec_ctx
Definition: ffmpeg.h:304
char * disposition
Definition: ffmpeg.h:526
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
cl_device_type type
int filtergraph_is_simple(FilterGraph *fg)
struct InputStream::@24 prev_sub
#define mid_pred
Definition: mathops.h:97
AVMediaType
Definition: avutil.h:199
discard useless packets like 0 size packets in avi
Definition: avcodec.h:798
int avio_open2(AVIOContext **s, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options)
Create and initialize a AVIOContext for accessing the resource indicated by url.
Definition: aviobuf.c:1177
int av_buffersink_get_w(const AVFilterContext *ctx)
int nb_streams_warn
Definition: ffmpeg.h:411
static void init_encoder_time_base(OutputStream *ost, AVRational default_time_base)
Definition: ffmpeg.c:3280
AVDictionary * decoder_opts
Definition: ffmpeg.h:332
int autorotate
Definition: ffmpeg.h:337
const char * name
Name of the codec described by this descriptor.
Definition: avcodec.h:716
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:567
#define snprintf
Definition: snprintf.h:34
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:491
int frame_bits_per_raw_sample
Definition: ffmpeg_opt.c:109
void avformat_free_context(AVFormatContext *s)
Free an AVFormatContext and all its streams.
Definition: utils.c:4403
int64_t ts_offset
Definition: ffmpeg.h:404
uint32_t DWORD
unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src)
Get the number of failed requests.
Definition: buffersrc.c:293
static int transcode_step(void)
Run a single step of transcoding.
Definition: ffmpeg.c:4584
char * filters_script
filtergraph script associated to the -filter_script option
Definition: ffmpeg.h:505
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:522
int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes)
Return audio frame duration.
Definition: utils.c:1781
misc parsing utilities
int av_read_frame(AVFormatContext *s, AVPacket *pkt)
Return the next frame of a stream.
Definition: utils.c:1777
static AVRational duration_max(int64_t tmp, int64_t *duration, AVRational tmp_time_base, AVRational time_base)
Definition: ffmpeg.c:4179
size_t av_strlcat(char *dst, const char *src, size_t size)
Append the string src to the string dst, but to a total length of no more than size - 1 bytes...
Definition: avstring.c:93
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
This struct describes the properties of a single codec described by an AVCodecID. ...
Definition: avcodec.h:708
AVFrame * filtered_frame
Definition: ffmpeg.h:469
int source_index
Definition: ffmpeg.h:445
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:324
static volatile int received_nb_signals
Definition: ffmpeg.c:335
int(* hwaccel_get_buffer)(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.h:372
int copy_prior_start
Definition: ffmpeg.h:525
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int global_quality
Global quality for codecs which cannot change it per frame.
Definition: avcodec.h:1599
static void close_all_output_streams(OutputStream *ost, OSTFinished this_stream, OSTFinished others)
Definition: ffmpeg.c:681
static int64_t pts
int64_t av_gettime_relative(void)
Get the current time in microseconds since some unspecified starting point.
Definition: time.c:56
const char * av_get_media_type_string(enum AVMediaType media_type)
Return a string describing the media_type enum, NULL if media_type is unknown.
Definition: utils.c:76
#define flags(name, subs,...)
Definition: cbs_av1.c:610
void av_thread_message_queue_free(AVThreadMessageQueue **mq)
Free a message queue.
Definition: threadmessage.c:91
int nb_filters
Definition: ffmpeg.h:359
static enum AVPixelFormat get_format(AVCodecContext *s, const enum AVPixelFormat *pix_fmts)
Definition: ffmpeg.c:2806
int64_t start_time
Position of the first frame of the component, in AV_TIME_BASE fractional seconds. ...
Definition: avformat.h:1456
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:240
uint8_t level
Definition: svq3.c:207
AVExpr * forced_keyframes_pexpr
Definition: ffmpeg.h:492
int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt)
Return number of bytes per sample.
Definition: samplefmt.c:106
int64_t real_usec
Definition: ffmpeg.c:124
int64_t dts
dts of the last packet read for this stream (in AV_TIME_BASE units)
Definition: ffmpeg.h:313
int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, int flags)
Seek to the keyframe at timestamp.
Definition: utils.c:2517
int forced_kf_count
Definition: ffmpeg.h:489
int64_t start
Definition: avformat.h:1311
int64_t duration
Decoding: duration of the stream, in stream time base.
Definition: avformat.h:923
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
OSTFinished finished
Definition: ffmpeg.h:512
char * forced_keyframes
Definition: ffmpeg.h:491
int sample_rate
Audio only.
Definition: avcodec.h:4010
uint64_t data_size
Definition: ffmpeg.h:380
int64_t bitrate
Definition: h264_levels.c:89
void avdevice_register_all(void)
Initialize libavdevice and register all the input and output devices.
Definition: alldevices.c:67
int64_t next_pts
synthetic pts for the next decode frame (in AV_TIME_BASE units)
Definition: ffmpeg.h:315
static AVStream * ost
Main libavformat public API header.
void print_error(const char *filename, int err)
Print an error message to stderr, indicating filename and a human readable description of the error c...
Definition: cmdutils.c:1081
struct FilterGraph * graph
Definition: ffmpeg.h:262
uint64_t limit_filesize
Definition: ffmpeg.h:560
const OptionDef options[]
Definition: ffmpeg_opt.c:3329
static void print_final_stats(int64_t total_size)
Definition: ffmpeg.c:1534
AVIOContext * progress_avio
Definition: ffmpeg.c:143
int main(int argc, char **argv)
Definition: ffmpeg.c:4848
int reinit_filters
Definition: ffmpeg.h:361
#define AVFMT_NOFILE
Demuxer will use avio_open, no opened file should be provided by the caller.
Definition: avformat.h:465
AVCodecParameters * ref_par
Definition: ffmpeg.h:466
#define VSYNC_CFR
Definition: ffmpeg.h:51
void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size)
Set the frame size for an audio buffer sink.
Definition: buffersink.c:175
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1032
int64_t start_time
Decoding: pts of the first frame of the stream in presentation order, in stream time base...
Definition: avformat.h:913
static double c[64]
#define AVFMT_NOSTREAMS
Format does not require any streams.
Definition: avformat.h:474
AVStream * st
Definition: muxing.c:54
static AVCodecContext * dec_ctx
int disposition
AV_DISPOSITION_* bit field.
Definition: avformat.h:927
uint32_t start_display_time
Definition: avcodec.h:3878
AVRational av_add_q(AVRational b, AVRational c)
Add two rationals.
Definition: rational.c:93
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
int pts_wrap_bits
number of bits in pts (used for wrapping control)
Definition: avformat.h:1066
uint64_t samples_encoded
Definition: ffmpeg.h:537
AVRational time_base
time base in which the start/end timestamps are specified
Definition: avformat.h:1310
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:33
int thread_safe_callbacks
Set by the client if its custom get_buffer() callback can be called synchronously from another thread...
Definition: avcodec.h:2814
int64_t nb_frames
number of frames in this stream if known or 0
Definition: avformat.h:925
char * key
Definition: dict.h:86
uint32_t BOOL
static FILE * vstats_file
Definition: ffmpeg.c:112
int den
Denominator.
Definition: rational.h:60
void uninit_opts(void)
Uninitialize the cmdutils option system, in particular free the *_opts contexts and their contents...
Definition: cmdutils.c:90
AVFrame * last_frame
Definition: ffmpeg.h:470
int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags)
Send a command to one or more filter instances.
int attribute_align_arg av_buffersrc_add_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags)
Add a frame to the buffer source.
Definition: buffersrc.c:152
uint64_t channel_layout
Definition: ffmpeg.h:252
#define AV_PKT_FLAG_CORRUPT
The packet content is corrupted.
Definition: avcodec.h:1478
int copy_ts
Definition: ffmpeg_opt.c:100
static void do_video_out(OutputFile *of, OutputStream *ost, AVFrame *next_picture, double sync_ipts)
Definition: ffmpeg.c:1054
struct AVInputFormat * iformat
The input container format.
Definition: avformat.h:1363
void avformat_close_input(AVFormatContext **s)
Close an opened input AVFormatContext.
Definition: utils.c:4441
AVFormatContext * ctx
Definition: ffmpeg.h:394
int pict_type
Definition: ffmpeg.h:548
AVSubtitle subtitle
Definition: ffmpeg.h:343
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:862
int eof_reached
Definition: ffmpeg.h:395
Flag telling rescaling functions to pass INT64_MIN/MAX through unchanged, avoiding special cases for ...
Definition: mathematics.h:108
int forced_kf_index
Definition: ffmpeg.h:490
#define AVFMT_VARIABLE_FPS
Format allows variable fps.
Definition: avformat.h:472
char * avfilter
Definition: ffmpeg.h:503
int hwaccel_decode_init(AVCodecContext *avctx)
Definition: ffmpeg_hw.c:472
uint8_t * name
Definition: ffmpeg.h:239
char * value
Definition: dict.h:87
AVFifoBuffer * av_fifo_alloc(unsigned int size)
Initialize an AVFifoBuffer.
Definition: fifo.c:43
Keep a reference to the frame.
Definition: buffersrc.h:53
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:378
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
float dts_delta_threshold
Definition: ffmpeg_opt.c:88
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
int channels
number of audio channels
Definition: avcodec.h:2190
int top_field_first
Definition: ffmpeg.h:480
int av_buffersink_get_channels(const AVFilterContext *ctx)
OutputFilter ** outputs
Definition: ffmpeg.h:291
InputFile ** input_files
Definition: ffmpeg.c:149
static int get_buffer(AVCodecContext *s, AVFrame *frame, int flags)
Definition: ffmpeg.c:2892
void av_log_set_flags(int arg)
Definition: log.c:390
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:304
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
static int ifilter_send_frame(InputFilter *ifilter, AVFrame *frame)
Definition: ffmpeg.c:2136
AVFormatContext * ctx
Definition: ffmpeg.h:555
#define lrint
Definition: tablegen.h:53
#define AV_DISPOSITION_COMMENT
Definition: avformat.h:822
void show_usage(void)
Definition: ffmpeg_opt.c:3202
int channels
Audio only.
Definition: avcodec.h:4006
An instance of a filter.
Definition: avfilter.h:338
static void do_audio_out(OutputFile *of, OutputStream *ost, AVFrame *frame)
Definition: ffmpeg.c:911
#define LIBAVCODEC_IDENT
Definition: version.h:42
char * hwaccel_device
Definition: ffmpeg.h:366
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1444
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
AVDictionary * encoder_opts
Definition: ffmpeg.h:507
int64_t av_get_default_channel_layout(int nb_channels)
Return default channel layout for a given number of channels.
int av_write_trailer(AVFormatContext *s)
Write the stream trailer to an output media file and free the file private data.
Definition: mux.c:1247
const int program_birth_year
program birth year, defined by the program for show_banner()
Definition: ffmpeg.c:110
int repeat_pict
This field is used for proper frame duration computation in lavf.
Definition: avcodec.h:5066
int height
Definition: frame.h:284
FILE * out
Definition: movenc.c:54
InputFilter ** inputs
Definition: ffmpeg.h:289
#define av_freep(p)
enum AVFieldOrder field_order
Field order.
Definition: avcodec.h:2186
enum AVPixelFormat hwaccel_retrieved_pix_fmt
Definition: ffmpeg.h:375
const char * name
A comma separated list of short names for the format.
Definition: avformat.h:647
static int decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, AVPacket *pkt)
Definition: ffmpeg.c:2240
#define AV_DICT_IGNORE_SUFFIX
Return first entry in a dictionary whose first part corresponds to the search key, ignoring the suffix of the found key string.
Definition: dict.h:70
static int send_filter_eof(InputStream *ist)
Definition: ffmpeg.c:2563
OutputFile ** output_files
Definition: ffmpeg.c:154
#define AV_LOG_FATAL
Something went wrong and recovery is not possible.
Definition: log.h:170
void av_fifo_freep(AVFifoBuffer **f)
Free an AVFifoBuffer and reset pointer to NULL.
Definition: fifo.c:63
AVCodecParameters * codecpar
Codec parameters associated with this stream.
Definition: avformat.h:1021
#define av_malloc_array(a, b)
int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, enum AVRounding rnd)
Rescale a 64-bit integer by 2 rational numbers with specified rounding.
Definition: mathematics.c:134
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3414
static void flush_encoders(void)
Definition: ffmpeg.c:1863
uint32_t codec_tag
Additional information about the codec (corresponds to the AVI FOURCC).
Definition: avcodec.h:3904
int copy_tb
Definition: ffmpeg_opt.c:102
int64_t min_pts
Definition: ffmpeg.h:321
int initialized
Definition: ffmpeg.h:519
static volatile int received_sigterm
Definition: ffmpeg.c:334
#define FFSWAP(type, a, b)
Definition: common.h:99
int discard
Definition: ffmpeg.h:298
static int get_input_packet(InputFile *f, AVPacket *pkt)
Definition: ffmpeg.c:4140
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:329
int stream_index
Definition: avcodec.h:1447
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avformat.h:903
enum HWAccelID hwaccel_id
Definition: ffmpeg.h:364
int depth
Number of bits in the component.
Definition: pixdesc.h:58
enum AVSubtitleType type
Definition: avcodec.h:3862
int64_t first_pts
Definition: ffmpeg.h:455
int nb_inputs
Definition: ffmpeg.h:290
enum AVDiscard discard
Selects which packets can be discarded at will and do not need to be demuxed.
Definition: avformat.h:929
#define DECODING_FOR_OST
Definition: ffmpeg.h:301
int index
Definition: ffmpeg.h:444
AVRational r_frame_rate
Real base framerate of the stream.
Definition: avformat.h:998
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
OSTFinished
Definition: ffmpeg.h:437
This structure stores compressed data.
Definition: avcodec.h:1422
int avio_closep(AVIOContext **s)
Close the resource accessed by the AVIOContext *s, free it and set the pointer pointing to it to NULL...
Definition: aviobuf.c:1214
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5731
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVSubtitle *sub)
Definition: encode.c:342
AVFifoBuffer * frame_queue
Definition: ffmpeg.h:242
int debug_ts
Definition: ffmpeg_opt.c:103
AVRational av_buffersink_get_time_base(const AVFilterContext *ctx)
static OutputStream * choose_output(void)
Select the output stream to process.
Definition: ffmpeg.c:3875
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:292
static void sigterm_handler(int sig)
Definition: ffmpeg.c:341
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1438
int64_t av_stream_get_end_pts(const AVStream *st)
Returns the pts of the last muxed packet + its duration.
Definition: utils.c:141
void * opaque
Private data of the user, can be used to carry app specific stuff.
Definition: avcodec.h:1575
for(j=16;j >0;--j)
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define tb
Definition: regdef.h:68
AVProgram ** programs
Definition: avformat.h:1531
#define AV_DISPOSITION_ORIGINAL
Definition: avformat.h:821
int avio_printf(AVIOContext *s, const char *fmt,...) av_printf_format(2
int attribute_align_arg av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame)
Add a frame to the buffer source.
Definition: buffersrc.c:144
InputStream ** input_streams
Definition: ffmpeg.c:147
static unsigned dup_warning
Definition: ffmpeg.c:136
const HWAccel hwaccels[]
Definition: ffmpeg_opt.c:69
Definition: ffmpeg.h:429
static void close_output_stream(OutputStream *ost)
Definition: ffmpeg.c:825
uint8_t * subtitle_header
Header containing style information for text subtitles.
Definition: avcodec.h:3001
static uint8_t tmp[11]
Definition: aes_ctr.c:26