FFmpeg  4.0
f_select.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2011 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * filter for selecting which frame passes in the filterchain
24  */
25 
26 #include "libavutil/avstring.h"
27 #include "libavutil/eval.h"
28 #include "libavutil/fifo.h"
29 #include "libavutil/internal.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixelutils.h"
32 #include "avfilter.h"
33 #include "audio.h"
34 #include "formats.h"
35 #include "internal.h"
36 #include "video.h"
37 
38 static const char *const var_names[] = {
39  "TB", ///< timebase
40 
41  "pts", ///< original pts in the file of the frame
42  "start_pts", ///< first PTS in the stream, expressed in TB units
43  "prev_pts", ///< previous frame PTS
44  "prev_selected_pts", ///< previous selected frame PTS
45 
46  "t", ///< timestamp expressed in seconds
47  "start_t", ///< first PTS in the stream, expressed in seconds
48  "prev_t", ///< previous frame time
49  "prev_selected_t", ///< previously selected time
50 
51  "pict_type", ///< the type of picture in the movie
52  "I",
53  "P",
54  "B",
55  "S",
56  "SI",
57  "SP",
58  "BI",
59  "PICT_TYPE_I",
60  "PICT_TYPE_P",
61  "PICT_TYPE_B",
62  "PICT_TYPE_S",
63  "PICT_TYPE_SI",
64  "PICT_TYPE_SP",
65  "PICT_TYPE_BI",
66 
67  "interlace_type", ///< the frame interlace type
68  "PROGRESSIVE",
69  "TOPFIRST",
70  "BOTTOMFIRST",
71 
72  "consumed_samples_n",///< number of samples consumed by the filter (only audio)
73  "samples_n", ///< number of samples in the current frame (only audio)
74  "sample_rate", ///< sample rate (only audio)
75 
76  "n", ///< frame number (starting from zero)
77  "selected_n", ///< selected frame number (starting from zero)
78  "prev_selected_n", ///< number of the last selected frame
79 
80  "key", ///< tell if the frame is a key frame
81  "pos", ///< original position in the file of the frame
82 
83  "scene",
84 
85  "concatdec_select", ///< frame is within the interval set by the concat demuxer
86 
87  NULL
88 };
89 
90 enum var_name {
92 
97 
102 
118 
123 
127 
131 
134 
136 
138 
140 };
141 
142 typedef struct SelectContext {
143  const AVClass *class;
144  char *expr_str;
147  int do_scene_detect; ///< 1 if the expression requires scene detection variables, 0 otherwise
148  av_pixelutils_sad_fn sad; ///< Sum of the absolute difference function (scene detect only)
149  double prev_mafd; ///< previous MAFD (scene detect only)
150  AVFrame *prev_picref; ///< previous frame (scene detect only)
151  double select;
152  int select_out; ///< mark the selected output pad index
154 } SelectContext;
155 
156 #define OFFSET(x) offsetof(SelectContext, x)
157 #define DEFINE_OPTIONS(filt_name, FLAGS) \
158 static const AVOption filt_name##_options[] = { \
159  { "expr", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
160  { "e", "set an expression to use for selecting frames", OFFSET(expr_str), AV_OPT_TYPE_STRING, { .str = "1" }, .flags=FLAGS }, \
161  { "outputs", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
162  { "n", "set the number of outputs", OFFSET(nb_outputs), AV_OPT_TYPE_INT, {.i64 = 1}, 1, INT_MAX, .flags=FLAGS }, \
163  { NULL } \
164 }
165 
166 static int request_frame(AVFilterLink *outlink);
167 
169 {
170  SelectContext *select = ctx->priv;
171  int i, ret;
172 
173  if ((ret = av_expr_parse(&select->expr, select->expr_str,
174  var_names, NULL, NULL, NULL, NULL, 0, ctx)) < 0) {
175  av_log(ctx, AV_LOG_ERROR, "Error while parsing expression '%s'\n",
176  select->expr_str);
177  return ret;
178  }
179  select->do_scene_detect = !!strstr(select->expr_str, "scene");
180 
181  for (i = 0; i < select->nb_outputs; i++) {
182  AVFilterPad pad = { 0 };
183 
184  pad.name = av_asprintf("output%d", i);
185  if (!pad.name)
186  return AVERROR(ENOMEM);
187  pad.type = ctx->filter->inputs[0].type;
189  if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
190  av_freep(&pad.name);
191  return ret;
192  }
193  }
194 
195  return 0;
196 }
197 
198 #define INTERLACE_TYPE_P 0
199 #define INTERLACE_TYPE_T 1
200 #define INTERLACE_TYPE_B 2
201 
202 static int config_input(AVFilterLink *inlink)
203 {
204  SelectContext *select = inlink->dst->priv;
205 
206  select->var_values[VAR_N] = 0.0;
207  select->var_values[VAR_SELECTED_N] = 0.0;
208 
209  select->var_values[VAR_TB] = av_q2d(inlink->time_base);
210 
211  select->var_values[VAR_PREV_PTS] = NAN;
214  select->var_values[VAR_PREV_T] = NAN;
215  select->var_values[VAR_START_PTS] = NAN;
216  select->var_values[VAR_START_T] = NAN;
217 
230 
234 
235  select->var_values[VAR_PICT_TYPE] = NAN;
236  select->var_values[VAR_INTERLACE_TYPE] = NAN;
237  select->var_values[VAR_SCENE] = NAN;
239  select->var_values[VAR_SAMPLES_N] = NAN;
240 
241  select->var_values[VAR_SAMPLE_RATE] =
242  inlink->type == AVMEDIA_TYPE_AUDIO ? inlink->sample_rate : NAN;
243 
244  if (select->do_scene_detect) {
245  select->sad = av_pixelutils_get_sad_fn(3, 3, 2, select); // 8x8 both sources aligned
246  if (!select->sad)
247  return AVERROR(EINVAL);
248  }
249  return 0;
250 }
251 
253 {
254  double ret = 0;
255  SelectContext *select = ctx->priv;
256  AVFrame *prev_picref = select->prev_picref;
257 
258  if (prev_picref &&
259  frame->height == prev_picref->height &&
260  frame->width == prev_picref->width) {
261  int x, y, nb_sad = 0;
262  int64_t sad = 0;
263  double mafd, diff;
264  uint8_t *p1 = frame->data[0];
265  uint8_t *p2 = prev_picref->data[0];
266  const int p1_linesize = frame->linesize[0];
267  const int p2_linesize = prev_picref->linesize[0];
268 
269  for (y = 0; y < frame->height - 7; y += 8) {
270  for (x = 0; x < frame->width*3 - 7; x += 8) {
271  sad += select->sad(p1 + x, p1_linesize, p2 + x, p2_linesize);
272  nb_sad += 8 * 8;
273  }
274  p1 += 8 * p1_linesize;
275  p2 += 8 * p2_linesize;
276  }
277  emms_c();
278  mafd = nb_sad ? (double)sad / nb_sad : 0;
279  diff = fabs(mafd - select->prev_mafd);
280  ret = av_clipf(FFMIN(mafd, diff) / 100., 0, 1);
281  select->prev_mafd = mafd;
282  av_frame_free(&prev_picref);
283  }
284  select->prev_picref = av_frame_clone(frame);
285  return ret;
286 }
287 
288 static double get_concatdec_select(AVFrame *frame, int64_t pts)
289 {
290  AVDictionary *metadata = frame->metadata;
291  AVDictionaryEntry *start_time_entry = av_dict_get(metadata, "lavf.concatdec.start_time", NULL, 0);
292  AVDictionaryEntry *duration_entry = av_dict_get(metadata, "lavf.concatdec.duration", NULL, 0);
293  if (start_time_entry) {
294  int64_t start_time = strtoll(start_time_entry->value, NULL, 10);
295  if (pts >= start_time) {
296  if (duration_entry) {
297  int64_t duration = strtoll(duration_entry->value, NULL, 10);
298  if (pts < start_time + duration)
299  return -1;
300  else
301  return 0;
302  }
303  return -1;
304  }
305  return 0;
306  }
307  return NAN;
308 }
309 
310 #define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
311 #define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
312 
314 {
315  SelectContext *select = ctx->priv;
316  AVFilterLink *inlink = ctx->inputs[0];
317  double res;
318 
319  if (isnan(select->var_values[VAR_START_PTS]))
320  select->var_values[VAR_START_PTS] = TS2D(frame->pts);
321  if (isnan(select->var_values[VAR_START_T]))
322  select->var_values[VAR_START_T] = TS2D(frame->pts) * av_q2d(inlink->time_base);
323 
324  select->var_values[VAR_N ] = inlink->frame_count_out;
325  select->var_values[VAR_PTS] = TS2D(frame->pts);
326  select->var_values[VAR_T ] = TS2D(frame->pts) * av_q2d(inlink->time_base);
327  select->var_values[VAR_POS] = frame->pkt_pos == -1 ? NAN : frame->pkt_pos;
328  select->var_values[VAR_KEY] = frame->key_frame;
330 
331  switch (inlink->type) {
332  case AVMEDIA_TYPE_AUDIO:
333  select->var_values[VAR_SAMPLES_N] = frame->nb_samples;
334  break;
335 
336  case AVMEDIA_TYPE_VIDEO:
337  select->var_values[VAR_INTERLACE_TYPE] =
340  select->var_values[VAR_PICT_TYPE] = frame->pict_type;
341  if (select->do_scene_detect) {
342  char buf[32];
343  select->var_values[VAR_SCENE] = get_scene_score(ctx, frame);
344  // TODO: document metadata
345  snprintf(buf, sizeof(buf), "%f", select->var_values[VAR_SCENE]);
346  av_dict_set(&frame->metadata, "lavfi.scene_score", buf, 0);
347  }
348  break;
349  }
350 
351  select->select = res = av_expr_eval(select->expr, select->var_values, NULL);
352  av_log(inlink->dst, AV_LOG_DEBUG,
353  "n:%f pts:%f t:%f key:%d",
354  select->var_values[VAR_N],
355  select->var_values[VAR_PTS],
356  select->var_values[VAR_T],
357  frame->key_frame);
358 
359  switch (inlink->type) {
360  case AVMEDIA_TYPE_VIDEO:
361  av_log(inlink->dst, AV_LOG_DEBUG, " interlace_type:%c pict_type:%c scene:%f",
362  (!frame->interlaced_frame) ? 'P' :
363  frame->top_field_first ? 'T' : 'B',
365  select->var_values[VAR_SCENE]);
366  break;
367  case AVMEDIA_TYPE_AUDIO:
368  av_log(inlink->dst, AV_LOG_DEBUG, " samples_n:%d consumed_samples_n:%f",
369  frame->nb_samples,
371  break;
372  }
373 
374  if (res == 0) {
375  select->select_out = -1; /* drop */
376  } else if (isnan(res) || res < 0) {
377  select->select_out = 0; /* first output */
378  } else {
379  select->select_out = FFMIN(ceilf(res)-1, select->nb_outputs-1); /* other outputs */
380  }
381 
382  av_log(inlink->dst, AV_LOG_DEBUG, " -> select:%f select_out:%d\n", res, select->select_out);
383 
384  if (res) {
385  select->var_values[VAR_PREV_SELECTED_N] = select->var_values[VAR_N];
387  select->var_values[VAR_PREV_SELECTED_T] = select->var_values[VAR_T];
388  select->var_values[VAR_SELECTED_N] += 1.0;
389  if (inlink->type == AVMEDIA_TYPE_AUDIO)
390  select->var_values[VAR_CONSUMED_SAMPLES_N] += frame->nb_samples;
391  }
392 
393  select->var_values[VAR_PREV_PTS] = select->var_values[VAR_PTS];
394  select->var_values[VAR_PREV_T] = select->var_values[VAR_T];
395 }
396 
397 static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
398 {
399  AVFilterContext *ctx = inlink->dst;
400  SelectContext *select = ctx->priv;
401 
402  select_frame(ctx, frame);
403  if (select->select)
404  return ff_filter_frame(ctx->outputs[select->select_out], frame);
405 
406  av_frame_free(&frame);
407  return 0;
408 }
409 
410 static int request_frame(AVFilterLink *outlink)
411 {
412  AVFilterLink *inlink = outlink->src->inputs[0];
413  int ret = ff_request_frame(inlink);
414  return ret;
415 }
416 
418 {
419  SelectContext *select = ctx->priv;
420  int i;
421 
422  av_expr_free(select->expr);
423  select->expr = NULL;
424 
425  for (i = 0; i < ctx->nb_outputs; i++)
426  av_freep(&ctx->output_pads[i].name);
427 
428  if (select->do_scene_detect) {
429  av_frame_free(&select->prev_picref);
430  }
431 }
432 
434 {
435  SelectContext *select = ctx->priv;
436 
437  if (!select->do_scene_detect) {
438  return ff_default_query_formats(ctx);
439  } else {
440  int ret;
441  static const enum AVPixelFormat pix_fmts[] = {
444  };
445  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
446 
447  if (!fmts_list)
448  return AVERROR(ENOMEM);
449  ret = ff_set_common_formats(ctx, fmts_list);
450  if (ret < 0)
451  return ret;
452  }
453  return 0;
454 }
455 
456 #if CONFIG_ASELECT_FILTER
457 
459 AVFILTER_DEFINE_CLASS(aselect);
460 
461 static av_cold int aselect_init(AVFilterContext *ctx)
462 {
463  SelectContext *select = ctx->priv;
464  int ret;
465 
466  if ((ret = init(ctx)) < 0)
467  return ret;
468 
469  if (select->do_scene_detect) {
470  av_log(ctx, AV_LOG_ERROR, "Scene detection is ignored in aselect filter\n");
471  return AVERROR(EINVAL);
472  }
473 
474  return 0;
475 }
476 
477 static const AVFilterPad avfilter_af_aselect_inputs[] = {
478  {
479  .name = "default",
480  .type = AVMEDIA_TYPE_AUDIO,
481  .config_props = config_input,
482  .filter_frame = filter_frame,
483  },
484  { NULL }
485 };
486 
488  .name = "aselect",
489  .description = NULL_IF_CONFIG_SMALL("Select audio frames to pass in output."),
490  .init = aselect_init,
491  .uninit = uninit,
492  .priv_size = sizeof(SelectContext),
493  .inputs = avfilter_af_aselect_inputs,
494  .priv_class = &aselect_class,
496 };
497 #endif /* CONFIG_ASELECT_FILTER */
498 
499 #if CONFIG_SELECT_FILTER
500 
503 
504 static av_cold int select_init(AVFilterContext *ctx)
505 {
506  int ret;
507 
508  if ((ret = init(ctx)) < 0)
509  return ret;
510 
511  return 0;
512 }
513 
514 static const AVFilterPad avfilter_vf_select_inputs[] = {
515  {
516  .name = "default",
517  .type = AVMEDIA_TYPE_VIDEO,
518  .config_props = config_input,
519  .filter_frame = filter_frame,
520  },
521  { NULL }
522 };
523 
525  .name = "select",
526  .description = NULL_IF_CONFIG_SMALL("Select video frames to pass in output."),
527  .init = select_init,
528  .uninit = uninit,
529  .query_formats = query_formats,
530  .priv_size = sizeof(SelectContext),
531  .priv_class = &select_class,
532  .inputs = avfilter_vf_select_inputs,
534 };
535 #endif /* CONFIG_SELECT_FILTER */
#define NULL
Definition: coverity.c:32
BI type.
Definition: avutil.h:280
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
static av_cold void uninit(AVFilterContext *ctx)
Definition: f_select.c:417
Main libavfilter public API header.
packed RGB 8:8:8, 24bpp, RGBRGB...
Definition: pixfmt.h:64
#define AV_OPT_FLAG_AUDIO_PARAM
Definition: opt.h:278
#define INTERLACE_TYPE_P
Definition: f_select.c:198
enum AVMediaType type
AVFilterPad type.
Definition: internal.h:65
int av_expr_parse(AVExpr **expr, const char *s, const char *const *const_names, const char *const *func1_names, double(*const *funcs1)(void *, double), const char *const *func2_names, double(*const *funcs2)(void *, double, double), int log_offset, void *log_ctx)
Parse an expression.
Definition: eval.c:679
static double get_concatdec_select(AVFrame *frame, int64_t pts)
Definition: f_select.c:288
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
Switching Intra.
Definition: avutil.h:278
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
AVFilterPad * output_pads
array of output pads
Definition: avfilter.h:349
static int64_t start_time
Definition: ffplay.c:327
uint8_t
#define av_cold
Definition: attributes.h:82
AVOptions.
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:313
#define emms_c()
Definition: internal.h:55
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
Definition: eval.c:157
int nb_outputs
Definition: f_select.c:153
#define INTERLACE_TYPE_T
Definition: f_select.c:199
int64_t duration
Definition: movenc.c:63
static AVFrame * frame
char * expr_str
Definition: f_select.c:144
AVDictionaryEntry * av_dict_get(const AVDictionary *m, const char *key, const AVDictionaryEntry *prev, int flags)
Get a dictionary entry with matching key.
Definition: dict.c:40
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
static int flags
Definition: log.c:55
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:88
AVDictionary * metadata
metadata.
Definition: frame.h:505
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:365
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
#define av_log(a,...)
AVFilter ff_vf_select
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
static int query_formats(AVFilterContext *ctx)
Definition: f_select.c:433
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:568
#define AV_OPT_FLAG_FILTERING_PARAM
a generic parameter which can be set by the user for filtering
Definition: opt.h:291
double var_values[VAR_VARS_NB]
Definition: f_select.c:146
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int select_out
mark the selected output pad index
Definition: f_select.c:152
int(* av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1, const uint8_t *src2, ptrdiff_t stride2)
Sum of abs(src1[x] - src2[x])
Definition: pixelutils.h:29
char * av_asprintf(const char *fmt,...)
Definition: avstring.c:113
var_name
Definition: aeval.c:46
static int config_input(AVFilterLink *inlink)
Definition: f_select.c:202
common internal API header
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
#define NAN
Definition: mathematics.h:64
#define FFMIN(a, b)
Definition: common.h:96
AVFormatContext * ctx
Definition: movenc.c:48
av_pixelutils_sad_fn sad
Sum of the absolute difference function (scene detect only)
Definition: f_select.c:148
packed RGB 8:8:8, 24bpp, BGRBGR...
Definition: pixfmt.h:65
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:538
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: f_select.c:397
const AVFilterPad * inputs
List of inputs, terminated by a zeroed element.
Definition: avfilter.h:164
int ff_default_query_formats(AVFilterContext *ctx)
Definition: formats.c:597
void av_expr_free(AVExpr *e)
Free a parsed expression previously created with av_expr_parse().
Definition: eval.c:334
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
#define AV_OPT_FLAG_VIDEO_PARAM
Definition: opt.h:279
static double get_scene_score(AVFilterContext *ctx, AVFrame *frame)
Definition: f_select.c:252
a very simple circular buffer FIFO implementation
void * buf
Definition: avisynth_c.h:690
double prev_mafd
previous MAFD (scene detect only)
Definition: f_select.c:149
AVFilter ff_af_aselect
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
av_pixelutils_sad_fn av_pixelutils_get_sad_fn(int w_bits, int h_bits, int aligned, void *log_ctx)
Get a potentially optimized pointer to a Sum-of-absolute-differences function (see the av_pixelutils_...
Definition: pixelutils.c:64
Definition: f_select.c:98
Switching Predicted.
Definition: avutil.h:279
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
#define isnan(x)
Definition: libm.h:340
#define TS2D(ts)
Definition: f_select.c:311
const char * name
Filter name.
Definition: avfilter.h:148
#define snprintf
Definition: snprintf.h:34
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:266
static int64_t pts
static int request_frame(AVFilterLink *outlink)
Definition: f_select.c:410
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
int do_scene_detect
1 if the expression requires scene detection variables, 0 otherwise
Definition: f_select.c:147
AVFrame * prev_picref
previous frame (scene detect only)
Definition: f_select.c:150
#define DEFINE_OPTIONS(filt_name, FLAGS)
Definition: f_select.c:157
static av_cold int init(AVFilterContext *ctx)
Definition: f_select.c:168
Bi-dir predicted.
Definition: avutil.h:276
static av_always_inline int diff(const uint32_t a, const uint32_t b)
char * value
Definition: dict.h:87
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:370
double av_expr_eval(AVExpr *e, const double *const_values, void *opaque)
Evaluate a previously parsed expression.
Definition: eval.c:734
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:334
AVExpr * expr
Definition: f_select.c:145
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:296
A list of supported formats for one end of a filter link.
Definition: formats.h:64
int(* request_frame)(AVFilterLink *link)
Frame request callback.
Definition: internal.h:113
An instance of a filter.
Definition: avfilter.h:338
static const char *const var_names[]
Definition: f_select.c:38
int height
Definition: frame.h:276
#define av_freep(p)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
#define INTERLACE_TYPE_B
Definition: f_select.c:200
internal API functions
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:285
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
double select
Definition: f_select.c:151
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
Predicted.
Definition: avutil.h:275
simple arithmetic expression evaluator