FFmpeg  4.4.6
avf_showwaves.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia filter
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "audio.h"
35 #include "video.h"
36 #include "internal.h"
37 
44 };
45 
52 };
53 
58 };
59 
64 };
65 
66 struct frame_node {
68  struct frame_node *next;
69 };
70 
71 typedef struct ShowWavesContext {
72  const AVClass *class;
73  int w, h;
75  char *colors;
76  int buf_idx;
77  int16_t *buf_idy; /* y coordinate of previous sample for each channel */
79  int n;
80  int pixstep;
82  int mode; ///< ShowWavesMode
83  int scale; ///< ShowWavesScale
84  int draw_mode; ///< ShowWavesDrawMode
88 
89  int (*get_h)(int16_t sample, int height);
90  void (*draw_sample)(uint8_t *buf, int height, int linesize,
91  int16_t *prev_y, const uint8_t color[4], int h);
92 
93  /* single picture */
98  int64_t *sum; /* abs sum of the samples per channel */
100 
101 #define OFFSET(x) offsetof(ShowWavesContext, x)
102 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
103 
104 static const AVOption showwaves_options[] = {
105  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
106  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
107  { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
108  { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
109  { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
110  { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
111  { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
112  { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
113  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
114  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
115  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
116  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
117  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
118  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
119  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
120  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
121  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
122  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
123  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
124  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
125  { NULL }
126 };
127 
129 
131 {
132  ShowWavesContext *showwaves = ctx->priv;
133 
134  av_frame_free(&showwaves->outpicref);
135  av_freep(&showwaves->buf_idy);
136  av_freep(&showwaves->fg);
137 
138  if (showwaves->single_pic) {
139  struct frame_node *node = showwaves->audio_frames;
140  while (node) {
141  struct frame_node *tmp = node;
142 
143  node = node->next;
144  av_frame_free(&tmp->frame);
145  av_freep(&tmp);
146  }
147  av_freep(&showwaves->sum);
148  showwaves->last_frame = NULL;
149  }
150 }
151 
153 {
156  AVFilterLink *inlink = ctx->inputs[0];
157  AVFilterLink *outlink = ctx->outputs[0];
160  int ret;
161 
162  /* set input audio formats */
164  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0)
165  return ret;
166 
168  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0)
169  return ret;
170 
172  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0)
173  return ret;
174 
175  /* set output video format */
177  if ((ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
178  return ret;
179 
180  return 0;
181 }
182 
183 static int get_lin_h(int16_t sample, int height)
184 {
185  return height/2 - av_rescale(sample, height/2, INT16_MAX);
186 }
187 
188 static int get_lin_h2(int16_t sample, int height)
189 {
190  return av_rescale(FFABS(sample), height, INT16_MAX);
191 }
192 
193 static int get_log_h(int16_t sample, int height)
194 {
195  return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
196 }
197 
198 static int get_log_h2(int16_t sample, int height)
199 {
200  return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
201 }
202 
203 static int get_sqrt_h(int16_t sample, int height)
204 {
205  return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
206 }
207 
208 static int get_sqrt_h2(int16_t sample, int height)
209 {
210  return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
211 }
212 
213 static int get_cbrt_h(int16_t sample, int height)
214 {
215  return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
216 }
217 
218 static int get_cbrt_h2(int16_t sample, int height)
219 {
220  return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
221 }
222 
223 static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
224  int16_t *prev_y,
225  const uint8_t color[4], int h)
226 {
227  if (h >= 0 && h < height) {
228  buf[h * linesize + 0] += color[0];
229  buf[h * linesize + 1] += color[1];
230  buf[h * linesize + 2] += color[2];
231  buf[h * linesize + 3] += color[3];
232  }
233 }
234 
235 static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
236  int16_t *prev_y,
237  const uint8_t color[4], int h)
238 {
239  if (h >= 0 && h < height) {
240  buf[h * linesize + 0] = color[0];
241  buf[h * linesize + 1] = color[1];
242  buf[h * linesize + 2] = color[2];
243  buf[h * linesize + 3] = color[3];
244  }
245 }
246 
247 static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
248  int16_t *prev_y,
249  const uint8_t color[4], int h)
250 {
251  int k;
252  int start = height/2;
253  int end = av_clip(h, 0, height-1);
254  if (start > end)
255  FFSWAP(int16_t, start, end);
256  for (k = start; k < end; k++) {
257  buf[k * linesize + 0] += color[0];
258  buf[k * linesize + 1] += color[1];
259  buf[k * linesize + 2] += color[2];
260  buf[k * linesize + 3] += color[3];
261  }
262 }
263 
264 static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
265  int16_t *prev_y,
266  const uint8_t color[4], int h)
267 {
268  int k;
269  int start = height/2;
270  int end = av_clip(h, 0, height-1);
271  if (start > end)
272  FFSWAP(int16_t, start, end);
273  for (k = start; k < end; k++) {
274  buf[k * linesize + 0] = color[0];
275  buf[k * linesize + 1] = color[1];
276  buf[k * linesize + 2] = color[2];
277  buf[k * linesize + 3] = color[3];
278  }
279 }
280 
281 static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
282  int16_t *prev_y,
283  const uint8_t color[4], int h)
284 {
285  int k;
286  if (h >= 0 && h < height) {
287  buf[h * linesize + 0] += color[0];
288  buf[h * linesize + 1] += color[1];
289  buf[h * linesize + 2] += color[2];
290  buf[h * linesize + 3] += color[3];
291  if (*prev_y && h != *prev_y) {
292  int start = *prev_y;
293  int end = av_clip(h, 0, height-1);
294  if (start > end)
295  FFSWAP(int16_t, start, end);
296  for (k = start + 1; k < end; k++) {
297  buf[k * linesize + 0] += color[0];
298  buf[k * linesize + 1] += color[1];
299  buf[k * linesize + 2] += color[2];
300  buf[k * linesize + 3] += color[3];
301  }
302  }
303  }
304  *prev_y = h;
305 }
306 
307 static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
308  int16_t *prev_y,
309  const uint8_t color[4], int h)
310 {
311  int k;
312  if (h >= 0 && h < height) {
313  buf[h * linesize + 0] = color[0];
314  buf[h * linesize + 1] = color[1];
315  buf[h * linesize + 2] = color[2];
316  buf[h * linesize + 3] = color[3];
317  if (*prev_y && h != *prev_y) {
318  int start = *prev_y;
319  int end = av_clip(h, 0, height-1);
320  if (start > end)
321  FFSWAP(int16_t, start, end);
322  for (k = start + 1; k < end; k++) {
323  buf[k * linesize + 0] = color[0];
324  buf[k * linesize + 1] = color[1];
325  buf[k * linesize + 2] = color[2];
326  buf[k * linesize + 3] = color[3];
327  }
328  }
329  }
330  *prev_y = h;
331 }
332 
333 static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
334  int16_t *prev_y,
335  const uint8_t color[4], int h)
336 {
337  int k;
338  const int start = (height - h) / 2;
339  const int end = start + h;
340  for (k = start; k < end; k++) {
341  buf[k * linesize + 0] += color[0];
342  buf[k * linesize + 1] += color[1];
343  buf[k * linesize + 2] += color[2];
344  buf[k * linesize + 3] += color[3];
345  }
346 }
347  static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
348  int16_t *prev_y,
349  const uint8_t color[4], int h)
350 {
351  int k;
352  const int start = (height - h) / 2;
353  const int end = start + h;
354  for (k = start; k < end; k++) {
355  buf[k * linesize + 0] = color[0];
356  buf[k * linesize + 1] = color[1];
357  buf[k * linesize + 2] = color[2];
358  buf[k * linesize + 3] = color[3];
359  }
360 }
361 
362 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
363  int16_t *prev_y,
364  const uint8_t color[4], int h)
365 {
366  if (h >= 0 && h < height)
367  buf[h * linesize] += color[0];
368 }
369 
370 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
371  int16_t *prev_y,
372  const uint8_t color[4], int h)
373 {
374  int k;
375  int start = height/2;
376  int end = av_clip(h, 0, height-1);
377  if (start > end)
378  FFSWAP(int16_t, start, end);
379  for (k = start; k < end; k++)
380  buf[k * linesize] += color[0];
381 }
382 
383 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
384  int16_t *prev_y,
385  const uint8_t color[4], int h)
386 {
387  int k;
388  if (h >= 0 && h < height) {
389  buf[h * linesize] += color[0];
390  if (*prev_y && h != *prev_y) {
391  int start = *prev_y;
392  int end = av_clip(h, 0, height-1);
393  if (start > end)
394  FFSWAP(int16_t, start, end);
395  for (k = start + 1; k < end; k++)
396  buf[k * linesize] += color[0];
397  }
398  }
399  *prev_y = h;
400 }
401 
402 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
403  int16_t *prev_y,
404  const uint8_t color[4], int h)
405 {
406  int k;
407  const int start = (height - h) / 2;
408  const int end = start + h;
409  for (k = start; k < end; k++)
410  buf[k * linesize] += color[0];
411 }
412 
413 static int config_output(AVFilterLink *outlink)
414 {
415  AVFilterContext *ctx = outlink->src;
416  AVFilterLink *inlink = ctx->inputs[0];
417  ShowWavesContext *showwaves = ctx->priv;
418  int nb_channels = inlink->channels;
419  char *colors, *saveptr = NULL;
420  uint8_t x;
421  int ch;
422 
423  if (showwaves->single_pic)
424  showwaves->n = 1;
425 
426  if (!showwaves->n)
427  showwaves->n = FFMAX(1, av_rescale_q(inlink->sample_rate, av_make_q(1, showwaves->w), showwaves->rate));
428 
429  showwaves->buf_idx = 0;
430  if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
431  av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
432  return AVERROR(ENOMEM);
433  }
434  outlink->w = showwaves->w;
435  outlink->h = showwaves->h;
436  outlink->sample_aspect_ratio = (AVRational){1,1};
437 
438  outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
439  (AVRational){showwaves->w,1});
440 
441  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
442  showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
443 
444  switch (outlink->format) {
445  case AV_PIX_FMT_GRAY8:
446  switch (showwaves->mode) {
447  case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
448  case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
449  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
450  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
451  default:
452  return AVERROR_BUG;
453  }
454  showwaves->pixstep = 1;
455  break;
456  case AV_PIX_FMT_RGBA:
457  switch (showwaves->mode) {
462  default:
463  return AVERROR_BUG;
464  }
465  showwaves->pixstep = 4;
466  break;
467  }
468 
469  switch (showwaves->scale) {
470  case SCALE_LIN:
471  switch (showwaves->mode) {
472  case MODE_POINT:
473  case MODE_LINE:
474  case MODE_P2P: showwaves->get_h = get_lin_h; break;
475  case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
476  default:
477  return AVERROR_BUG;
478  }
479  break;
480  case SCALE_LOG:
481  switch (showwaves->mode) {
482  case MODE_POINT:
483  case MODE_LINE:
484  case MODE_P2P: showwaves->get_h = get_log_h; break;
485  case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
486  default:
487  return AVERROR_BUG;
488  }
489  break;
490  case SCALE_SQRT:
491  switch (showwaves->mode) {
492  case MODE_POINT:
493  case MODE_LINE:
494  case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
495  case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
496  default:
497  return AVERROR_BUG;
498  }
499  break;
500  case SCALE_CBRT:
501  switch (showwaves->mode) {
502  case MODE_POINT:
503  case MODE_LINE:
504  case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
505  case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
506  default:
507  return AVERROR_BUG;
508  }
509  break;
510  }
511 
512  showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
513  if (!showwaves->fg)
514  return AVERROR(ENOMEM);
515 
516  colors = av_strdup(showwaves->colors);
517  if (!colors)
518  return AVERROR(ENOMEM);
519 
520  if (showwaves->draw_mode == DRAW_SCALE) {
521  /* multiplication factor, pre-computed to avoid in-loop divisions */
522  x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
523  } else {
524  x = 255;
525  }
526  if (outlink->format == AV_PIX_FMT_RGBA) {
527  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
528 
529  for (ch = 0; ch < nb_channels; ch++) {
530  char *color;
531 
532  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
533  if (color)
534  av_parse_color(fg, color, -1, ctx);
535  showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
536  showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
537  showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
538  showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
539  }
540  } else {
541  for (ch = 0; ch < nb_channels; ch++)
542  showwaves->fg[4 * ch + 0] = x;
543  }
544  av_free(colors);
545 
546  return 0;
547 }
548 
549 inline static int push_frame(AVFilterLink *outlink)
550 {
551  AVFilterContext *ctx = outlink->src;
552  AVFilterLink *inlink = ctx->inputs[0];
553  ShowWavesContext *showwaves = outlink->src->priv;
554  int nb_channels = inlink->channels;
555  int ret, i;
556 
557  ret = ff_filter_frame(outlink, showwaves->outpicref);
558  showwaves->outpicref = NULL;
559  showwaves->buf_idx = 0;
560  for (i = 0; i < nb_channels; i++)
561  showwaves->buf_idy[i] = 0;
562  return ret;
563 }
564 
565 static int push_single_pic(AVFilterLink *outlink)
566 {
567  AVFilterContext *ctx = outlink->src;
568  AVFilterLink *inlink = ctx->inputs[0];
569  ShowWavesContext *showwaves = ctx->priv;
570  int64_t n = 0, column_max_samples = showwaves->total_samples / outlink->w;
571  int64_t remaining_samples = showwaves->total_samples - (column_max_samples * outlink->w);
572  int64_t last_column_samples = column_max_samples + remaining_samples;
573  AVFrame *out = showwaves->outpicref;
574  struct frame_node *node;
575  const int nb_channels = inlink->channels;
576  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
577  const int linesize = out->linesize[0];
578  const int pixstep = showwaves->pixstep;
579  int col = 0;
580  int64_t *sum = showwaves->sum;
581 
582  if (column_max_samples == 0) {
583  av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
584  return AVERROR(EINVAL);
585  }
586 
587  av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", column_max_samples);
588 
589  memset(sum, 0, nb_channels);
590 
591  for (node = showwaves->audio_frames; node; node = node->next) {
592  int i;
593  const AVFrame *frame = node->frame;
594  const int16_t *p = (const int16_t *)frame->data[0];
595 
596  for (i = 0; i < frame->nb_samples; i++) {
597  int64_t max_samples = col == outlink->w - 1 ? last_column_samples: column_max_samples;
598  int ch;
599 
600  switch (showwaves->filter_mode) {
601  case FILTER_AVERAGE:
602  for (ch = 0; ch < nb_channels; ch++)
603  sum[ch] += abs(p[ch + i*nb_channels]) << 1;
604  break;
605  case FILTER_PEAK:
606  for (ch = 0; ch < nb_channels; ch++)
607  sum[ch] = FFMAX(sum[ch], abs(p[ch + i*nb_channels]));
608  break;
609  }
610 
611  n++;
612  if (n == max_samples) {
613  for (ch = 0; ch < nb_channels; ch++) {
614  int16_t sample = sum[ch] / (showwaves->filter_mode == FILTER_AVERAGE ? max_samples : 1);
615  uint8_t *buf = out->data[0] + col * pixstep;
616  int h;
617 
618  if (showwaves->split_channels)
619  buf += ch*ch_height*linesize;
620  av_assert0(col < outlink->w);
621  h = showwaves->get_h(sample, ch_height);
622  showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
623  sum[ch] = 0;
624  }
625  col++;
626  n = 0;
627  }
628  }
629  }
630 
631  return push_frame(outlink);
632 }
633 
634 
635 static int request_frame(AVFilterLink *outlink)
636 {
637  ShowWavesContext *showwaves = outlink->src->priv;
638  AVFilterLink *inlink = outlink->src->inputs[0];
639  int ret;
640 
641  ret = ff_request_frame(inlink);
642  if (ret == AVERROR_EOF && showwaves->outpicref) {
643  if (showwaves->single_pic)
644  push_single_pic(outlink);
645  else
646  push_frame(outlink);
647  }
648 
649  return ret;
650 }
651 
652 static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
653  const AVFilterLink *inlink, AVFilterLink *outlink,
654  const AVFrame *in)
655 {
656  if (!showwaves->outpicref) {
657  int j;
658  AVFrame *out = showwaves->outpicref =
659  ff_get_video_buffer(outlink, outlink->w, outlink->h);
660  if (!out)
661  return AVERROR(ENOMEM);
662  out->width = outlink->w;
663  out->height = outlink->h;
664  out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
665  av_make_q(1, inlink->sample_rate),
666  outlink->time_base);
667  for (j = 0; j < outlink->h; j++)
668  memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
669  }
670  return 0;
671 }
672 
674 {
675  ShowWavesContext *showwaves = ctx->priv;
676 
677  if (!strcmp(ctx->filter->name, "showwavespic")) {
678  showwaves->single_pic = 1;
679  showwaves->mode = MODE_CENTERED_LINE;
680  }
681 
682  return 0;
683 }
684 
685 #if CONFIG_SHOWWAVES_FILTER
686 
687 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
688 {
689  AVFilterContext *ctx = inlink->dst;
690  AVFilterLink *outlink = ctx->outputs[0];
691  ShowWavesContext *showwaves = ctx->priv;
692  const int nb_samples = insamples->nb_samples;
693  AVFrame *outpicref = showwaves->outpicref;
694  int16_t *p = (int16_t *)insamples->data[0];
695  int nb_channels = inlink->channels;
696  int i, j, ret = 0;
697  const int pixstep = showwaves->pixstep;
698  const int n = showwaves->n;
699  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
700 
701  /* draw data in the buffer */
702  for (i = 0; i < nb_samples; i++) {
703 
704  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
705  if (ret < 0)
706  goto end;
707  outpicref = showwaves->outpicref;
708 
709  for (j = 0; j < nb_channels; j++) {
710  uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
711  const int linesize = outpicref->linesize[0];
712  int h;
713 
714  if (showwaves->split_channels)
715  buf += j*ch_height*linesize;
716  h = showwaves->get_h(*p++, ch_height);
717  showwaves->draw_sample(buf, ch_height, linesize,
718  &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
719  }
720 
721  showwaves->sample_count_mod++;
722  if (showwaves->sample_count_mod == n) {
723  showwaves->sample_count_mod = 0;
724  showwaves->buf_idx++;
725  }
726  if (showwaves->buf_idx == showwaves->w ||
727  (ff_outlink_get_status(inlink) && i == nb_samples - 1))
728  if ((ret = push_frame(outlink)) < 0)
729  break;
730  outpicref = showwaves->outpicref;
731  }
732 
733 end:
734  av_frame_free(&insamples);
735  return ret;
736 }
737 
738 static int activate(AVFilterContext *ctx)
739 {
740  AVFilterLink *inlink = ctx->inputs[0];
741  AVFilterLink *outlink = ctx->outputs[0];
742  ShowWavesContext *showwaves = ctx->priv;
743  AVFrame *in;
744  const int nb_samples = showwaves->n * outlink->w;
745  int ret;
746 
747  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
748 
749  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
750  if (ret < 0)
751  return ret;
752  if (ret > 0)
753  return showwaves_filter_frame(inlink, in);
754 
755  FF_FILTER_FORWARD_STATUS(inlink, outlink);
756  FF_FILTER_FORWARD_WANTED(outlink, inlink);
757 
758  return FFERROR_NOT_READY;
759 }
760 
761 static const AVFilterPad showwaves_inputs[] = {
762  {
763  .name = "default",
764  .type = AVMEDIA_TYPE_AUDIO,
765  },
766  { NULL }
767 };
768 
769 static const AVFilterPad showwaves_outputs[] = {
770  {
771  .name = "default",
772  .type = AVMEDIA_TYPE_VIDEO,
773  .config_props = config_output,
774  },
775  { NULL }
776 };
777 
779  .name = "showwaves",
780  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
781  .init = init,
782  .uninit = uninit,
783  .query_formats = query_formats,
784  .priv_size = sizeof(ShowWavesContext),
785  .inputs = showwaves_inputs,
786  .activate = activate,
787  .outputs = showwaves_outputs,
788  .priv_class = &showwaves_class,
789 };
790 
791 #endif // CONFIG_SHOWWAVES_FILTER
792 
793 #if CONFIG_SHOWWAVESPIC_FILTER
794 
795 #define OFFSET(x) offsetof(ShowWavesContext, x)
796 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
797 
798 static const AVOption showwavespic_options[] = {
799  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
800  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
801  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
802  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
803  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
804  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
805  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
806  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
807  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
808  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
809  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
810  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
811  { "filter", "set filter mode", OFFSET(filter_mode), AV_OPT_TYPE_INT, {.i64 = FILTER_AVERAGE}, 0, FILTER_NB-1, FLAGS, .unit="filter" },
812  { "average", "use average samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_AVERAGE}, .flags=FLAGS, .unit="filter"},
813  { "peak", "use peak samples", 0, AV_OPT_TYPE_CONST, {.i64=FILTER_PEAK}, .flags=FLAGS, .unit="filter"},
814  { NULL }
815 };
816 
817 AVFILTER_DEFINE_CLASS(showwavespic);
818 
819 static int showwavespic_config_input(AVFilterLink *inlink)
820 {
821  AVFilterContext *ctx = inlink->dst;
822  ShowWavesContext *showwaves = ctx->priv;
823 
824  if (showwaves->single_pic) {
825  showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
826  if (!showwaves->sum)
827  return AVERROR(ENOMEM);
828  }
829 
830  return 0;
831 }
832 
833 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
834 {
835  AVFilterContext *ctx = inlink->dst;
836  AVFilterLink *outlink = ctx->outputs[0];
837  ShowWavesContext *showwaves = ctx->priv;
838  int16_t *p = (int16_t *)insamples->data[0];
839  int ret = 0;
840 
841  if (showwaves->single_pic) {
842  struct frame_node *f;
843 
844  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
845  if (ret < 0)
846  goto end;
847 
848  /* queue the audio frame */
849  f = av_malloc(sizeof(*f));
850  if (!f) {
851  ret = AVERROR(ENOMEM);
852  goto end;
853  }
854  f->frame = insamples;
855  f->next = NULL;
856  if (!showwaves->last_frame) {
857  showwaves->audio_frames =
858  showwaves->last_frame = f;
859  } else {
860  showwaves->last_frame->next = f;
861  showwaves->last_frame = f;
862  }
863  showwaves->total_samples += insamples->nb_samples;
864 
865  return 0;
866  }
867 
868 end:
869  av_frame_free(&insamples);
870  return ret;
871 }
872 
873 static const AVFilterPad showwavespic_inputs[] = {
874  {
875  .name = "default",
876  .type = AVMEDIA_TYPE_AUDIO,
877  .config_props = showwavespic_config_input,
878  .filter_frame = showwavespic_filter_frame,
879  },
880  { NULL }
881 };
882 
883 static const AVFilterPad showwavespic_outputs[] = {
884  {
885  .name = "default",
886  .type = AVMEDIA_TYPE_VIDEO,
887  .config_props = config_output,
888  .request_frame = request_frame,
889  },
890  { NULL }
891 };
892 
894  .name = "showwavespic",
895  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
896  .init = init,
897  .uninit = uninit,
898  .query_formats = query_formats,
899  .priv_size = sizeof(ShowWavesContext),
900  .inputs = showwavespic_inputs,
901  .outputs = showwavespic_outputs,
902  .priv_class = &showwavespic_class,
903 };
904 
905 #endif // CONFIG_SHOWWAVESPIC_FILTER
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static int activate(AVFilterContext *ctx)
Definition: af_adeclick.c:630
AVFilter ff_avf_showwaves
AVFilter ff_avf_showwavespic
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
static int get_sqrt_h(int16_t sample, int height)
ShowWavesFilterMode
Definition: avf_showwaves.c:60
@ FILTER_NB
Definition: avf_showwaves.c:63
@ FILTER_AVERAGE
Definition: avf_showwaves.c:61
@ FILTER_PEAK
Definition: avf_showwaves.c:62
ShowWavesMode
Definition: avf_showwaves.c:38
@ MODE_CENTERED_LINE
Definition: avf_showwaves.c:42
@ MODE_NB
Definition: avf_showwaves.c:43
@ MODE_LINE
Definition: avf_showwaves.c:40
@ MODE_POINT
Definition: avf_showwaves.c:39
@ MODE_P2P
Definition: avf_showwaves.c:41
static int get_lin_h2(int16_t sample, int height)
static int push_single_pic(AVFilterLink *outlink)
static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static int get_sqrt_h2(int16_t sample, int height)
static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
AVFILTER_DEFINE_CLASS(showwaves)
static int query_formats(AVFilterContext *ctx)
static int get_cbrt_h2(int16_t sample, int height)
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
#define FLAGS
static int request_frame(AVFilterLink *outlink)
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static int get_lin_h(int16_t sample, int height)
static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static const AVOption showwaves_options[]
static int push_frame(AVFilterLink *outlink)
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static av_cold int init(AVFilterContext *ctx)
static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static av_cold void uninit(AVFilterContext *ctx)
static int get_log_h(int16_t sample, int height)
ShowWavesScale
Definition: avf_showwaves.c:46
@ SCALE_LOG
Definition: avf_showwaves.c:48
@ SCALE_CBRT
Definition: avf_showwaves.c:50
@ SCALE_SQRT
Definition: avf_showwaves.c:49
@ SCALE_LIN
Definition: avf_showwaves.c:47
@ SCALE_NB
Definition: avf_showwaves.c:51
#define OFFSET(x)
static int config_output(AVFilterLink *outlink)
static int get_cbrt_h(int16_t sample, int height)
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
static int get_log_h2(int16_t sample, int height)
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
ShowWavesDrawMode
Definition: avf_showwaves.c:54
@ DRAW_FULL
Definition: avf_showwaves.c:56
@ DRAW_SCALE
Definition: avf_showwaves.c:55
@ DRAW_NB
Definition: avf_showwaves.c:57
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:408
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link's FIFO and update the link's stats.
Definition: avfilter.c:1513
Main libavfilter public API header.
#define f(width, name)
Definition: cbs_vp9.c:255
int nb_channels
audio channel layout utility functions
#define FFSWAP(type, a, b)
Definition: common.h:108
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define FFSIGN(a)
Definition: common.h:73
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define abs(x)
Definition: cuda_runtime.h:35
static AVFrame * frame
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
Definition: filters.h:254
#define FF_FILTER_FORWARD_STATUS(inlink, outlink)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:226
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
#define sample
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add ref as a new reference to formats.
Definition: formats.c:466
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (w...
Definition: formats.c:427
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:461
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_VIDEO_RATE
offset must point to AVRational
Definition: opt.h:238
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:190
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
@ AV_SAMPLE_FMT_S16
signed 16 bits
Definition: samplefmt.h:61
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
for(j=16;j >0;--j)
int i
Definition: input.c:407
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
uint8_t w
Definition: llviddspenc.c:39
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVOptions.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
misc parsing utilities
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
typedef void(RENAME(mix_any_func_type))
formats
Definition: signature.h:48
Describe the class of an AVClass context structure.
Definition: log.h:67
A list of supported channel layouts.
Definition: formats.h:86
An instance of a filter.
Definition: avfilter.h:341
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:349
void * priv
private data for use by the filter
Definition: avfilter.h:356
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVOption.
Definition: opt.h:248
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int draw_mode
ShowWavesDrawMode.
Definition: avf_showwaves.c:84
int mode
ShowWavesMode.
Definition: avf_showwaves.c:82
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:90
int64_t total_samples
Definition: avf_showwaves.c:97
struct frame_node * last_frame
Definition: avf_showwaves.c:96
int(* get_h)(int16_t sample, int height)
Definition: avf_showwaves.c:89
struct frame_node * audio_frames
Definition: avf_showwaves.c:95
AVRational rate
Definition: avf_showwaves.c:74
int16_t * buf_idy
Definition: avf_showwaves.c:77
int scale
ShowWavesScale.
Definition: avf_showwaves.c:83
AVFrame * outpicref
Definition: avf_showwaves.c:78
AVFrame * frame
Definition: avf_showwaves.c:67
struct frame_node * next
Definition: avf_showwaves.c:68
#define cbrt
Definition: tablegen.h:35
#define av_free(p)
#define av_malloc_array(a, b)
#define av_freep(p)
#define av_malloc(s)
#define av_log(a,...)
static uint8_t tmp[11]
Definition: aes_ctr.c:27
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
if(ret< 0)
Definition: vf_mcdeint.c:282
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104