FFmpeg  4.4.6
setpts.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2010 Stefano Sabatini
3  * Copyright (c) 2008 Victor Paesa
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 /**
23  * @file
24  * video presentation timestamp (PTS) modification filter
25  */
26 
27 #include <inttypes.h>
28 
29 #include "libavutil/eval.h"
30 #include "libavutil/internal.h"
31 #include "libavutil/mathematics.h"
32 #include "libavutil/opt.h"
33 #include "libavutil/time.h"
34 #include "audio.h"
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "internal.h"
38 #include "video.h"
39 
40 static const char *const var_names[] = {
41  "FRAME_RATE", ///< defined only for constant frame-rate video
42  "INTERLACED", ///< tell if the current frame is interlaced
43  "N", ///< frame / sample number (starting at zero)
44  "NB_CONSUMED_SAMPLES", ///< number of samples consumed by the filter (only audio)
45  "NB_SAMPLES", ///< number of samples in the current frame (only audio)
46  "POS", ///< original position in the file of the frame
47  "PREV_INPTS", ///< previous input PTS
48  "PREV_INT", ///< previous input time in seconds
49  "PREV_OUTPTS", ///< previous output PTS
50  "PREV_OUTT", ///< previous output time in seconds
51  "PTS", ///< original pts in the file of the frame
52  "SAMPLE_RATE", ///< sample rate (only audio)
53  "STARTPTS", ///< PTS at start of movie
54  "STARTT", ///< time at start of movie
55  "T", ///< original time in the file of the frame
56  "TB", ///< timebase
57  "RTCTIME", ///< wallclock (RTC) time in micro seconds
58  "RTCSTART", ///< wallclock (RTC) time at the start of the movie in micro seconds
59  "S", // Number of samples in the current frame
60  "SR", // Audio sample rate
61  "FR", ///< defined only for constant frame-rate video
62  NULL
63 };
64 
65 enum var_name {
88 };
89 
90 typedef struct SetPTSContext {
91  const AVClass *class;
92  char *expr_str;
95  enum AVMediaType type;
97 
99 {
100  SetPTSContext *setpts = ctx->priv;
101  int ret;
102 
103  if ((ret = av_expr_parse(&setpts->expr, setpts->expr_str,
104  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
105  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n", setpts->expr_str);
106  return ret;
107  }
108 
109  setpts->var_values[VAR_N] = 0.0;
110  setpts->var_values[VAR_S] = 0.0;
111  setpts->var_values[VAR_PREV_INPTS] = NAN;
112  setpts->var_values[VAR_PREV_INT] = NAN;
113  setpts->var_values[VAR_PREV_OUTPTS] = NAN;
114  setpts->var_values[VAR_PREV_OUTT] = NAN;
115  setpts->var_values[VAR_STARTPTS] = NAN;
116  setpts->var_values[VAR_STARTT] = NAN;
117  return 0;
118 }
119 
120 static int config_input(AVFilterLink *inlink)
121 {
122  AVFilterContext *ctx = inlink->dst;
123  SetPTSContext *setpts = ctx->priv;
124 
125  setpts->type = inlink->type;
126  setpts->var_values[VAR_TB] = av_q2d(inlink->time_base);
127  setpts->var_values[VAR_RTCSTART] = av_gettime();
128 
129  setpts->var_values[VAR_SR] =
130  setpts->var_values[VAR_SAMPLE_RATE] =
131  setpts->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
132 
133  setpts->var_values[VAR_FRAME_RATE] =
134  setpts->var_values[VAR_FR] = inlink->frame_rate.num &&
135  inlink->frame_rate.den ?
136  av_q2d(inlink->frame_rate) : NAN;
137 
138  av_log(inlink->src, AV_LOG_VERBOSE, "TB:%f FRAME_RATE:%f SAMPLE_RATE:%f\n",
139  setpts->var_values[VAR_TB],
140  setpts->var_values[VAR_FRAME_RATE],
141  setpts->var_values[VAR_SAMPLE_RATE]);
142  return 0;
143 }
144 
145 #define BUF_SIZE 64
146 
147 static inline char *double2int64str(char *buf, double v)
148 {
149  if (isnan(v)) snprintf(buf, BUF_SIZE, "nan");
150  else snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)v);
151  return buf;
152 }
153 
154 static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
155 {
156  if (isnan(setpts->var_values[VAR_STARTPTS])) {
157  setpts->var_values[VAR_STARTPTS] = TS2D(pts);
158  setpts->var_values[VAR_STARTT ] = TS2T(pts, inlink->time_base);
159  }
160  setpts->var_values[VAR_PTS ] = TS2D(pts);
161  setpts->var_values[VAR_T ] = TS2T(pts, inlink->time_base);
162  setpts->var_values[VAR_POS ] = !frame || frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
163  setpts->var_values[VAR_RTCTIME ] = av_gettime();
164 
165  if (frame) {
166  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
168  } else if (inlink->type == AVMEDIA_TYPE_AUDIO) {
169  setpts->var_values[VAR_S] = frame->nb_samples;
171  }
172  }
173 
174  return av_expr_eval(setpts->expr, setpts->var_values, NULL);
175 }
176 #define d2istr(v) double2int64str((char[BUF_SIZE]){0}, v)
177 
178 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
179 {
180  SetPTSContext *setpts = inlink->dst->priv;
181  int64_t in_pts = frame->pts;
182  double d;
183 
184  d = eval_pts(setpts, inlink, frame, frame->pts);
185  frame->pts = D2TS(d);
186 
187  av_log(inlink->dst, AV_LOG_TRACE,
188  "N:%"PRId64" PTS:%s T:%f POS:%s",
189  (int64_t)setpts->var_values[VAR_N],
190  d2istr(setpts->var_values[VAR_PTS]),
191  setpts->var_values[VAR_T],
192  d2istr(setpts->var_values[VAR_POS]));
193  switch (inlink->type) {
194  case AVMEDIA_TYPE_VIDEO:
195  av_log(inlink->dst, AV_LOG_TRACE, " INTERLACED:%"PRId64,
196  (int64_t)setpts->var_values[VAR_INTERLACED]);
197  break;
198  case AVMEDIA_TYPE_AUDIO:
199  av_log(inlink->dst, AV_LOG_TRACE, " NB_SAMPLES:%"PRId64" NB_CONSUMED_SAMPLES:%"PRId64,
202  break;
203  }
204  av_log(inlink->dst, AV_LOG_TRACE, " -> PTS:%s T:%f\n", d2istr(d), TS2T(d, inlink->time_base));
205 
206  if (inlink->type == AVMEDIA_TYPE_VIDEO) {
207  setpts->var_values[VAR_N] += 1.0;
208  } else {
209  setpts->var_values[VAR_N] += frame->nb_samples;
210  }
211 
212  setpts->var_values[VAR_PREV_INPTS ] = TS2D(in_pts);
213  setpts->var_values[VAR_PREV_INT ] = TS2T(in_pts, inlink->time_base);
214  setpts->var_values[VAR_PREV_OUTPTS] = TS2D(frame->pts);
215  setpts->var_values[VAR_PREV_OUTT] = TS2T(frame->pts, inlink->time_base);
216  if (setpts->type == AVMEDIA_TYPE_AUDIO) {
218  }
219  return ff_filter_frame(inlink->dst->outputs[0], frame);
220 }
221 
223 {
224  SetPTSContext *setpts = ctx->priv;
225  AVFilterLink *inlink = ctx->inputs[0];
226  AVFilterLink *outlink = ctx->outputs[0];
227  AVFrame *in;
228  int status;
229  int64_t pts;
230  int ret;
231 
232  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
233 
234  ret = ff_inlink_consume_frame(inlink, &in);
235  if (ret < 0)
236  return ret;
237  if (ret > 0)
238  return filter_frame(inlink, in);
239 
240  if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
241  double d = eval_pts(setpts, inlink, NULL, pts);
242 
243  av_log(ctx, AV_LOG_TRACE, "N:EOF PTS:%s T:%f POS:%s -> PTS:%s T:%f\n",
244  d2istr(setpts->var_values[VAR_PTS]),
245  setpts->var_values[VAR_T],
246  d2istr(setpts->var_values[VAR_POS]),
247  d2istr(d), TS2T(d, inlink->time_base));
248  ff_outlink_set_status(outlink, status, D2TS(d));
249  return 0;
250  }
251 
252  FF_FILTER_FORWARD_WANTED(outlink, inlink);
253 
254  return FFERROR_NOT_READY;
255 }
256 
258 {
259  SetPTSContext *setpts = ctx->priv;
260  av_expr_free(setpts->expr);
261  setpts->expr = NULL;
262 }
263 
264 #define OFFSET(x) offsetof(SetPTSContext, x)
265 #define V AV_OPT_FLAG_VIDEO_PARAM
266 #define A AV_OPT_FLAG_AUDIO_PARAM
267 #define F AV_OPT_FLAG_FILTERING_PARAM
268 
269 #if CONFIG_SETPTS_FILTER
270 static const AVOption setpts_options[] = {
271  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = V|F },
272  { NULL }
273 };
274 AVFILTER_DEFINE_CLASS(setpts);
275 
276 static const AVFilterPad avfilter_vf_setpts_inputs[] = {
277  {
278  .name = "default",
279  .type = AVMEDIA_TYPE_VIDEO,
280  .config_props = config_input,
281  },
282  { NULL }
283 };
284 
285 static const AVFilterPad avfilter_vf_setpts_outputs[] = {
286  {
287  .name = "default",
288  .type = AVMEDIA_TYPE_VIDEO,
289  },
290  { NULL }
291 };
292 
294  .name = "setpts",
295  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output video frame."),
296  .init = init,
297  .activate = activate,
298  .uninit = uninit,
299 
300  .priv_size = sizeof(SetPTSContext),
301  .priv_class = &setpts_class,
302 
303  .inputs = avfilter_vf_setpts_inputs,
304  .outputs = avfilter_vf_setpts_outputs,
305 };
306 #endif /* CONFIG_SETPTS_FILTER */
307 
308 #if CONFIG_ASETPTS_FILTER
309 
310 static const AVOption asetpts_options[] = {
311  { "expr", "Expression determining the frame timestamp", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "PTS" }, .flags = A|F },
312  { NULL }
313 };
314 AVFILTER_DEFINE_CLASS(asetpts);
315 
316 static const AVFilterPad asetpts_inputs[] = {
317  {
318  .name = "default",
319  .type = AVMEDIA_TYPE_AUDIO,
320  .config_props = config_input,
321  },
322  { NULL }
323 };
324 
325 static const AVFilterPad asetpts_outputs[] = {
326  {
327  .name = "default",
328  .type = AVMEDIA_TYPE_AUDIO,
329  },
330  { NULL }
331 };
332 
334  .name = "asetpts",
335  .description = NULL_IF_CONFIG_SMALL("Set PTS for the output audio frame."),
336  .init = init,
337  .activate = activate,
338  .uninit = uninit,
339  .priv_size = sizeof(SetPTSContext),
340  .priv_class = &asetpts_class,
341  .inputs = asetpts_inputs,
342  .outputs = asetpts_outputs,
343 };
344 #endif /* CONFIG_ASETPTS_FILTER */
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFilter ff_af_asetpts
AVFilter ff_vf_setpts
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
int ff_inlink_acknowledge_status(AVFilterLink *link, int *rstatus, int64_t *rpts)
Test and acknowledge the change of status on the link.
Definition: avfilter.c:1449
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1494
Main libavfilter public API header.
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
static AVFrame * frame
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:336
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:766
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:685
simple arithmetic expression evaluator
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
Definition: filters.h:254
static void ff_outlink_set_status(AVFilterLink *link, int status, int64_t pts)
Set the status field of a link from the source filter.
Definition: filters.h:189
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
#define AV_LOG_TRACE
Extremely verbose debugging, useful for libav* development.
Definition: log.h:220
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AVMediaType
Definition: avutil.h:199
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
#define TS2D(ts)
Definition: internal.h:208
#define D2TS(d)
Definition: internal.h:207
#define TS2T(ts, tb)
Definition: internal.h:209
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:288
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define isnan(x)
Definition: libm.h:340
#define NAN
Definition: mathematics.h:64
AVOptions.
@ VAR_INTERLACED
Definition: setpts.c:67
@ VAR_PREV_INT
Definition: setpts.c:73
@ VAR_NB_SAMPLES
Definition: setpts.c:70
@ VAR_STARTPTS
Definition: setpts.c:78
@ VAR_PTS
Definition: setpts.c:76
@ VAR_POS
Definition: setpts.c:71
@ VAR_NB_CONSUMED_SAMPLES
Definition: setpts.c:69
@ VAR_FRAME_RATE
Definition: setpts.c:66
@ VAR_TB
Definition: setpts.c:81
@ VAR_FR
Definition: setpts.c:86
@ VAR_PREV_OUTPTS
Definition: setpts.c:74
@ VAR_N
Definition: setpts.c:68
@ VAR_VARS_NB
Definition: setpts.c:87
@ VAR_PREV_OUTT
Definition: setpts.c:75
@ VAR_SR
Definition: setpts.c:85
@ VAR_PREV_INPTS
Definition: setpts.c:72
@ VAR_SAMPLE_RATE
Definition: setpts.c:77
@ VAR_T
Definition: setpts.c:80
@ VAR_STARTT
Definition: setpts.c:79
@ VAR_S
Definition: setpts.c:84
@ VAR_RTCSTART
Definition: setpts.c:83
@ VAR_RTCTIME
Definition: setpts.c:82
#define F
Definition: setpts.c:267
static char * double2int64str(char *buf, double v)
Definition: setpts.c:147
static int config_input(AVFilterLink *inlink)
Definition: setpts.c:120
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: setpts.c:178
#define BUF_SIZE
Definition: setpts.c:145
#define A
Definition: setpts.c:266
#define d2istr(v)
Definition: setpts.c:176
static double eval_pts(SetPTSContext *setpts, AVFilterLink *inlink, AVFrame *frame, int64_t pts)
Definition: setpts.c:154
static const char *const var_names[]
Definition: setpts.c:40
static int activate(AVFilterContext *ctx)
Definition: setpts.c:222
static av_cold int init(AVFilterContext *ctx)
Definition: setpts.c:98
static av_cold void uninit(AVFilterContext *ctx)
Definition: setpts.c:257
#define OFFSET(x)
Definition: setpts.c:264
#define V
Definition: setpts.c:265
var_name
Definition: setts_bsf.c:50
#define snprintf
Definition: snprintf.h:34
Describe the class of an AVClass context structure.
Definition: log.h:67
Definition: eval.c:157
An instance of a filter.
Definition: avfilter.h:341
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:589
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:465
AVOption.
Definition: opt.h:248
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
enum AVMediaType type
Definition: setpts.c:95
char * expr_str
Definition: setpts.c:92
AVExpr * expr
Definition: setpts.c:93
double var_values[VAR_VARS_NB]
Definition: setpts.c:94
#define av_log(a,...)
AVFormatContext * ctx
Definition: movenc.c:48
int64_t av_gettime(void)
Get the current time in microseconds.
Definition: time.c:39
static int64_t pts