FFmpeg  4.4.6
vf_blackdetect.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Video black detector, loosely based on blackframe with extended
24  * syntax and features
25  */
26 
27 #include <float.h>
28 #include "libavutil/opt.h"
29 #include "libavutil/pixdesc.h"
30 #include "libavutil/timestamp.h"
31 #include "avfilter.h"
32 #include "internal.h"
33 
34 typedef struct BlackDetectContext {
35  const AVClass *class;
36  double black_min_duration_time; ///< minimum duration of detected black, in seconds
37  int64_t black_min_duration; ///< minimum duration of detected black, expressed in timebase units
38  int64_t black_start; ///< pts start time of the first black picture
39  int64_t black_end; ///< pts end time of the last black picture
40  int64_t last_picref_pts; ///< pts of the last input picture
42 
45  unsigned int pixel_black_th_i;
46 
47  unsigned int nb_black_pixels; ///< number of black pixels counted so far
49  int depth;
51  unsigned int *counter;
53 
54 #define OFFSET(x) offsetof(BlackDetectContext, x)
55 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
56 
57 static const AVOption blackdetect_options[] = {
58  { "d", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
59  { "black_min_duration", "set minimum detected black duration in seconds", OFFSET(black_min_duration_time), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, DBL_MAX, FLAGS },
60  { "picture_black_ratio_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
61  { "pic_th", "set the picture black ratio threshold", OFFSET(picture_black_ratio_th), AV_OPT_TYPE_DOUBLE, {.dbl=.98}, 0, 1, FLAGS },
62  { "pixel_black_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
63  { "pix_th", "set the pixel black threshold", OFFSET(pixel_black_th), AV_OPT_TYPE_DOUBLE, {.dbl=.10}, 0, 1, FLAGS },
64  { NULL }
65 };
66 
67 AVFILTER_DEFINE_CLASS(blackdetect);
68 
69 #define YUVJ_FORMATS \
70  AV_PIX_FMT_YUVJ411P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P
71 
72 static const enum AVPixelFormat yuvj_formats[] = {
74 };
75 
77 {
78  static const enum AVPixelFormat pix_fmts[] = {
99  };
100 
102  if (!fmts_list)
103  return AVERROR(ENOMEM);
104  return ff_set_common_formats(ctx, fmts_list);
105 }
106 
107 static int config_input(AVFilterLink *inlink)
108 {
109  AVFilterContext *ctx = inlink->dst;
110  BlackDetectContext *s = ctx->priv;
112  const int depth = desc->comp[0].depth;
113  const int max = (1 << depth) - 1;
114  const int factor = (1 << (depth - 8));
115 
116  s->depth = depth;
117  s->nb_threads = ff_filter_get_nb_threads(ctx);
118  s->time_base = inlink->time_base;
119  s->black_min_duration = s->black_min_duration_time / av_q2d(s->time_base);
120  s->counter = av_calloc(s->nb_threads, sizeof(*s->counter));
121  if (!s->counter)
122  return AVERROR(ENOMEM);
123 
124  s->pixel_black_th_i = ff_fmt_is_in(inlink->format, yuvj_formats) ?
125  // luminance_minimum_value + pixel_black_th * luminance_range_size
126  s->pixel_black_th * max :
127  16 * factor + s->pixel_black_th * (235 - 16) * factor;
128 
130  "black_min_duration:%s pixel_black_th:%f pixel_black_th_i:%d picture_black_ratio_th:%f\n",
131  av_ts2timestr(s->black_min_duration, &s->time_base),
132  s->pixel_black_th, s->pixel_black_th_i,
133  s->picture_black_ratio_th);
134  return 0;
135 }
136 
138 {
139  BlackDetectContext *s = ctx->priv;
140 
141  if ((s->black_end - s->black_start) >= s->black_min_duration) {
143  "black_start:%s black_end:%s black_duration:%s\n",
144  av_ts2timestr(s->black_start, &s->time_base),
145  av_ts2timestr(s->black_end, &s->time_base),
146  av_ts2timestr(s->black_end - s->black_start, &s->time_base));
147  }
148 }
149 
151  int jobnr, int nb_jobs)
152 {
153  BlackDetectContext *s = ctx->priv;
154  const unsigned int threshold = s->pixel_black_th_i;
155  unsigned int *counterp = &s->counter[jobnr];
156  AVFrame *in = arg;
157  const int linesize = in->linesize[0];
158  const int w = in->width;
159  const int h = in->height;
160  const int start = (h * jobnr) / nb_jobs;
161  const int end = (h * (jobnr+1)) / nb_jobs;
162  const int size = end - start;
163  unsigned int counter = 0;
164 
165  if (s->depth == 8) {
166  const uint8_t *p = in->data[0] + start * linesize;
167 
168  for (int i = 0; i < size; i++) {
169  for (int x = 0; x < w; x++)
170  counter += p[x] <= threshold;
171  p += linesize;
172  }
173  } else {
174  const uint16_t *p = (const uint16_t *)(in->data[0] + start * linesize);
175 
176  for (int i = 0; i < size; i++) {
177  for (int x = 0; x < w; x++)
178  counter += p[x] <= threshold;
179  p += linesize / 2;
180  }
181  }
182 
183  *counterp = counter;
184 
185  return 0;
186 }
187 
188 static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
189 {
190  AVFilterContext *ctx = inlink->dst;
191  BlackDetectContext *s = ctx->priv;
192  double picture_black_ratio = 0;
193 
194  ctx->internal->execute(ctx, black_counter, picref, NULL,
195  FFMIN(inlink->h, s->nb_threads));
196 
197  for (int i = 0; i < s->nb_threads; i++)
198  s->nb_black_pixels += s->counter[i];
199 
200  picture_black_ratio = (double)s->nb_black_pixels / (inlink->w * inlink->h);
201 
203  "frame:%"PRId64" picture_black_ratio:%f pts:%s t:%s type:%c\n",
204  inlink->frame_count_out, picture_black_ratio,
205  av_ts2str(picref->pts), av_ts2timestr(picref->pts, &s->time_base),
207 
208  if (picture_black_ratio >= s->picture_black_ratio_th) {
209  if (!s->black_started) {
210  /* black starts here */
211  s->black_started = 1;
212  s->black_start = picref->pts;
213  av_dict_set(&picref->metadata, "lavfi.black_start",
214  av_ts2timestr(s->black_start, &s->time_base), 0);
215  }
216  } else if (s->black_started) {
217  /* black ends here */
218  s->black_started = 0;
219  s->black_end = picref->pts;
221  av_dict_set(&picref->metadata, "lavfi.black_end",
222  av_ts2timestr(s->black_end, &s->time_base), 0);
223  }
224 
225  s->last_picref_pts = picref->pts;
226  s->nb_black_pixels = 0;
227  return ff_filter_frame(inlink->dst->outputs[0], picref);
228 }
229 
231 {
232  BlackDetectContext *s = ctx->priv;
233 
234  av_freep(&s->counter);
235 
236  if (s->black_started) {
237  // FIXME: black_end should be set to last_picref_pts + last_picref_duration
238  s->black_end = s->last_picref_pts;
240  }
241 }
242 
243 static const AVFilterPad blackdetect_inputs[] = {
244  {
245  .name = "default",
246  .type = AVMEDIA_TYPE_VIDEO,
247  .config_props = config_input,
248  .filter_frame = filter_frame,
249  },
250  { NULL }
251 };
252 
254  {
255  .name = "default",
256  .type = AVMEDIA_TYPE_VIDEO,
257  },
258  { NULL }
259 };
260 
262  .name = "blackdetect",
263  .description = NULL_IF_CONFIG_SMALL("Detect video intervals that are (almost) black."),
264  .priv_size = sizeof(BlackDetectContext),
268  .uninit = uninit,
269  .priv_class = &blackdetect_class,
271 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define max(a, b)
Definition: cuda_runtime.h:33
int ff_fmt_is_in(int fmt, const int *fmts)
Tell if an integer is contained in the provided -1-terminated list of integers.
Definition: formats.c:257
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
#define AVERROR(e)
Definition: error.h:43
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
char av_get_picture_type_char(enum AVPictureType pict_type)
Return a single letter to describe the given picture type pict_type.
Definition: utils.c:83
int i
Definition: input.c:407
const char * arg
Definition: jacosubdec.c:66
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
AVOptions.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:405
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:439
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:382
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:401
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:353
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
AVDictionary * metadata
metadata.
Definition: frame.h:604
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:401
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Rational number (pair of numerator and denominator).
Definition: rational.h:58
unsigned int * counter
AVRational time_base
int64_t black_end
pts end time of the last black picture
int64_t black_min_duration
minimum duration of detected black, expressed in timebase units
int64_t last_picref_pts
pts of the last input picture
int64_t black_start
pts start time of the first black picture
double black_min_duration_time
minimum duration of detected black, in seconds
unsigned int pixel_black_th_i
unsigned int nb_black_pixels
number of black pixels counted so far
double picture_black_ratio_th
#define av_freep(p)
#define av_log(a,...)
AVFormatContext * ctx
Definition: movenc.c:48
timestamp utils, mostly useful for debugging/logging purposes
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
int size
static const AVFilterPad blackdetect_outputs[]
static void check_black_end(AVFilterContext *ctx)
static int black_counter(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int query_formats(AVFilterContext *ctx)
static int config_input(AVFilterLink *inlink)
#define YUVJ_FORMATS
#define FLAGS
static enum AVPixelFormat yuvj_formats[]
static const AVOption blackdetect_options[]
static av_cold void uninit(AVFilterContext *ctx)
#define OFFSET(x)
AVFILTER_DEFINE_CLASS(blackdetect)
static const AVFilterPad blackdetect_inputs[]
static int filter_frame(AVFilterLink *inlink, AVFrame *picref)
AVFilter ff_vf_blackdetect
static const int factor[16]
Definition: vf_pp7.c:77