FFmpeg  4.4.6
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
37  NB_TYPES
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
96  AudioNEqualizerContext *s = ctx->priv;
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
183  draw_curves(ctx, inlink, out);
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = "out0",
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  ret = ff_insert_outpad(ctx, 0, &pad);
200  if (ret < 0)
201  return ret;
202 
203  if (s->draw_curves) {
204  vpad = (AVFilterPad){
205  .name = "out1",
206  .type = AVMEDIA_TYPE_VIDEO,
207  .config_props = config_video,
208  };
209  ret = ff_insert_outpad(ctx, 1, &vpad);
210  if (ret < 0)
211  return ret;
212  }
213 
214  return 0;
215 }
216 
218 {
219  AVFilterLink *inlink = ctx->inputs[0];
220  AVFilterLink *outlink = ctx->outputs[0];
221  AudioNEqualizerContext *s = ctx->priv;
224  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
225  static const enum AVSampleFormat sample_fmts[] = {
228  };
229  int ret;
230 
231  if (s->draw_curves) {
232  AVFilterLink *videolink = ctx->outputs[1];
234  if ((ret = ff_formats_ref(formats, &videolink->incfg.formats)) < 0)
235  return ret;
236  }
237 
239  if ((ret = ff_formats_ref(formats, &inlink->outcfg.formats)) < 0 ||
240  (ret = ff_formats_ref(formats, &outlink->incfg.formats)) < 0)
241  return ret;
242 
244  if ((ret = ff_channel_layouts_ref(layouts, &inlink->outcfg.channel_layouts)) < 0 ||
245  (ret = ff_channel_layouts_ref(layouts, &outlink->incfg.channel_layouts)) < 0)
246  return ret;
247 
249  if ((ret = ff_formats_ref(formats, &inlink->outcfg.samplerates)) < 0 ||
250  (ret = ff_formats_ref(formats, &outlink->incfg.samplerates)) < 0)
251  return ret;
252 
253  return 0;
254 }
255 
257 {
258  AudioNEqualizerContext *s = ctx->priv;
259 
260  av_frame_free(&s->video);
261  av_freep(&s->filters);
262  s->nb_filters = 0;
263  s->nb_allocated = 0;
264 }
265 
266 static void butterworth_fo_section(FoSection *S, double beta,
267  double si, double g, double g0,
268  double D, double c0)
269 {
270  if (c0 == 1 || c0 == -1) {
271  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
272  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
273  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
274  S->b3 = 0;
275  S->b4 = 0;
276 
277  S->a0 = 1;
278  S->a1 = 2*c0*(beta*beta - 1)/D;
279  S->a2 = (beta*beta - 2*beta*si + 1)/D;
280  S->a3 = 0;
281  S->a4 = 0;
282  } else {
283  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
284  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
285  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
286  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
287  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
288 
289  S->a0 = 1;
290  S->a1 = -4*c0*(1 + si*beta)/D;
291  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
292  S->a3 = -4*c0*(1 - si*beta)/D;
293  S->a4 = (beta*beta - 2*si*beta + 1)/D;
294  }
295 }
296 
298  int N, double w0, double wb,
299  double G, double Gb, double G0)
300 {
301  double g, c0, g0, beta;
302  double epsilon;
303  int r = N % 2;
304  int L = (N - r) / 2;
305  int i;
306 
307  if (G == 0 && G0 == 0) {
308  f->section[0].a0 = 1;
309  f->section[0].b0 = 1;
310  f->section[1].a0 = 1;
311  f->section[1].b0 = 1;
312  return;
313  }
314 
315  G = ff_exp10(G/20);
316  Gb = ff_exp10(Gb/20);
317  G0 = ff_exp10(G0/20);
318 
319  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
320  g = pow(G, 1.0 / N);
321  g0 = pow(G0, 1.0 / N);
322  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
323  c0 = cos(w0);
324 
325  for (i = 1; i <= L; i++) {
326  double ui = (2.0 * i - 1) / N;
327  double si = sin(M_PI * ui / 2.0);
328  double Di = beta * beta + 2 * si * beta + 1;
329 
330  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
331  }
332 }
333 
334 static void chebyshev1_fo_section(FoSection *S, double a,
335  double c, double tetta_b,
336  double g0, double si, double b,
337  double D, double c0)
338 {
339  if (c0 == 1 || c0 == -1) {
340  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
341  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
342  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
343  S->b3 = 0;
344  S->b4 = 0;
345 
346  S->a0 = 1;
347  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
348  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
349  S->a3 = 0;
350  S->a4 = 0;
351  } else {
352  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
353  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
354  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
355  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
356  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
357 
358  S->a0 = 1;
359  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
360  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
361  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
362  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
363  }
364 }
365 
367  int N, double w0, double wb,
368  double G, double Gb, double G0)
369 {
370  double a, b, c0, g0, alfa, beta, tetta_b;
371  double epsilon;
372  int r = N % 2;
373  int L = (N - r) / 2;
374  int i;
375 
376  if (G == 0 && G0 == 0) {
377  f->section[0].a0 = 1;
378  f->section[0].b0 = 1;
379  f->section[1].a0 = 1;
380  f->section[1].b0 = 1;
381  return;
382  }
383 
384  G = ff_exp10(G/20);
385  Gb = ff_exp10(Gb/20);
386  G0 = ff_exp10(G0/20);
387 
388  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
389  g0 = pow(G0,1.0/N);
390  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
391  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
392  a = 0.5 * (alfa - 1.0/alfa);
393  b = 0.5 * (beta - g0*g0*(1/beta));
394  tetta_b = tan(wb/2);
395  c0 = cos(w0);
396 
397  for (i = 1; i <= L; i++) {
398  double ui = (2.0*i-1.0)/N;
399  double ci = cos(M_PI*ui/2.0);
400  double si = sin(M_PI*ui/2.0);
401  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
402 
403  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
404  }
405 }
406 
407 static void chebyshev2_fo_section(FoSection *S, double a,
408  double c, double tetta_b,
409  double g, double si, double b,
410  double D, double c0)
411 {
412  if (c0 == 1 || c0 == -1) {
413  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
414  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
415  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
416  S->b3 = 0;
417  S->b4 = 0;
418 
419  S->a0 = 1;
420  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
421  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
422  S->a3 = 0;
423  S->a4 = 0;
424  } else {
425  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
426  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
427  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
428  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
429  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
430 
431  S->a0 = 1;
432  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
433  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
434  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
435  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
436  }
437 }
438 
440  int N, double w0, double wb,
441  double G, double Gb, double G0)
442 {
443  double a, b, c0, tetta_b;
444  double epsilon, g, eu, ew;
445  int r = N % 2;
446  int L = (N - r) / 2;
447  int i;
448 
449  if (G == 0 && G0 == 0) {
450  f->section[0].a0 = 1;
451  f->section[0].b0 = 1;
452  f->section[1].a0 = 1;
453  f->section[1].b0 = 1;
454  return;
455  }
456 
457  G = ff_exp10(G/20);
458  Gb = ff_exp10(Gb/20);
459  G0 = ff_exp10(G0/20);
460 
461  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
462  g = pow(G, 1.0 / N);
463  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
464  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
465  a = (eu - 1.0/eu)/2.0;
466  b = (ew - g*g/ew)/2.0;
467  tetta_b = tan(wb/2);
468  c0 = cos(w0);
469 
470  for (i = 1; i <= L; i++) {
471  double ui = (2.0 * i - 1.0)/N;
472  double ci = cos(M_PI * ui / 2.0);
473  double si = sin(M_PI * ui / 2.0);
474  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
475 
476  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
477  }
478 }
479 
480 static double butterworth_compute_bw_gain_db(double gain)
481 {
482  double bw_gain = 0;
483 
484  if (gain <= -6)
485  bw_gain = gain + 3;
486  else if(gain > -6 && gain < 6)
487  bw_gain = gain * 0.5;
488  else if(gain >= 6)
489  bw_gain = gain - 3;
490 
491  return bw_gain;
492 }
493 
494 static double chebyshev1_compute_bw_gain_db(double gain)
495 {
496  double bw_gain = 0;
497 
498  if (gain <= -6)
499  bw_gain = gain + 1;
500  else if(gain > -6 && gain < 6)
501  bw_gain = gain * 0.9;
502  else if(gain >= 6)
503  bw_gain = gain - 1;
504 
505  return bw_gain;
506 }
507 
508 static double chebyshev2_compute_bw_gain_db(double gain)
509 {
510  double bw_gain = 0;
511 
512  if (gain <= -6)
513  bw_gain = -3;
514  else if(gain > -6 && gain < 6)
515  bw_gain = gain * 0.3;
516  else if(gain >= 6)
517  bw_gain = 3;
518 
519  return bw_gain;
520 }
521 
522 static inline double hz_2_rad(double x, double fs)
523 {
524  return 2 * M_PI * x / fs;
525 }
526 
528 {
529  double w0 = hz_2_rad(f->freq, sample_rate);
530  double wb = hz_2_rad(f->width, sample_rate);
531  double bw_gain;
532 
533  switch (f->type) {
534  case BUTTERWORTH:
535  bw_gain = butterworth_compute_bw_gain_db(f->gain);
536  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
537  break;
538  case CHEBYSHEV1:
539  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
540  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
541  break;
542  case CHEBYSHEV2:
543  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
544  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
545  break;
546  }
547 
548 }
549 
551 {
552  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
553  if (s->nb_filters >= s->nb_allocated - 1) {
555 
556  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
557  if (!filters)
558  return AVERROR(ENOMEM);
559  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
560  av_free(s->filters);
561  s->filters = filters;
562  s->nb_allocated *= 2;
563  }
564  s->nb_filters++;
565 
566  return 0;
567 }
568 
569 static int config_input(AVFilterLink *inlink)
570 {
571  AVFilterContext *ctx = inlink->dst;
572  AudioNEqualizerContext *s = ctx->priv;
573  char *args = av_strdup(s->args);
574  char *saveptr = NULL;
575  int ret = 0;
576 
577  if (!args)
578  return AVERROR(ENOMEM);
579 
580  s->nb_allocated = 32 * inlink->channels;
581  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
582  if (!s->filters) {
583  s->nb_allocated = 0;
584  av_free(args);
585  return AVERROR(ENOMEM);
586  }
587 
588  while (1) {
589  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
590 
591  if (!arg)
592  break;
593 
594  s->filters[s->nb_filters].type = 0;
595  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
596  &s->filters[s->nb_filters].freq,
597  &s->filters[s->nb_filters].width,
598  &s->filters[s->nb_filters].gain,
599  &s->filters[s->nb_filters].type) != 5 &&
600  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
601  &s->filters[s->nb_filters].freq,
602  &s->filters[s->nb_filters].width,
603  &s->filters[s->nb_filters].gain) != 4 ) {
604  av_free(args);
605  return AVERROR(EINVAL);
606  }
607 
608  if (s->filters[s->nb_filters].freq < 0 ||
609  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
610  s->filters[s->nb_filters].ignore = 1;
611 
612  if (s->filters[s->nb_filters].channel < 0 ||
613  s->filters[s->nb_filters].channel >= inlink->channels)
614  s->filters[s->nb_filters].ignore = 1;
615 
616  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
617  ret = add_filter(s, inlink);
618  if (ret < 0)
619  break;
620  }
621 
622  av_free(args);
623 
624  return ret;
625 }
626 
627 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
628  char *res, int res_len, int flags)
629 {
630  AudioNEqualizerContext *s = ctx->priv;
631  AVFilterLink *inlink = ctx->inputs[0];
632  int ret = AVERROR(ENOSYS);
633 
634  if (!strcmp(cmd, "change")) {
635  double freq, width, gain;
636  int filter;
637 
638  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
639  return AVERROR(EINVAL);
640 
641  if (filter < 0 || filter >= s->nb_filters)
642  return AVERROR(EINVAL);
643 
644  if (freq < 0 || freq > inlink->sample_rate / 2.0)
645  return AVERROR(EINVAL);
646 
647  s->filters[filter].freq = freq;
648  s->filters[filter].width = width;
649  s->filters[filter].gain = gain;
650  equalizer(&s->filters[filter], inlink->sample_rate);
651  if (s->draw_curves)
652  draw_curves(ctx, inlink, s->video);
653 
654  ret = 0;
655  }
656 
657  return ret;
658 }
659 
660 static inline double section_process(FoSection *S, double in)
661 {
662  double out;
663 
664  out = S->b0 * in;
665  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
666  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
667  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
668  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
669 
670  S->num[3] = S->num[2];
671  S->num[2] = S->num[1];
672  S->num[1] = S->num[0];
673  S->num[0] = in;
674 
675  S->denum[3] = S->denum[2];
676  S->denum[2] = S->denum[1];
677  S->denum[1] = S->denum[0];
678  S->denum[0] = out;
679 
680  return out;
681 }
682 
683 static double process_sample(FoSection *s1, double in)
684 {
685  double p0 = in, p1;
686  int i;
687 
688  for (i = 0; i < FILTER_ORDER / 2; i++) {
689  p1 = section_process(&s1[i], p0);
690  p0 = p1;
691  }
692 
693  return p1;
694 }
695 
697  int jobnr, int nb_jobs)
698 {
699  AudioNEqualizerContext *s = ctx->priv;
700  AVFrame *buf = arg;
701  const int start = (buf->channels * jobnr) / nb_jobs;
702  const int end = (buf->channels * (jobnr+1)) / nb_jobs;
703 
704  for (int i = 0; i < s->nb_filters; i++) {
705  EqualizatorFilter *f = &s->filters[i];
706  double *bptr;
707 
708  if (f->gain == 0. || f->ignore)
709  continue;
710  if (f->channel < start ||
711  f->channel >= end)
712  continue;
713 
714  bptr = (double *)buf->extended_data[f->channel];
715  for (int n = 0; n < buf->nb_samples; n++) {
716  double sample = bptr[n];
717 
718  sample = process_sample(f->section, sample);
719  bptr[n] = sample;
720  }
721  }
722 
723  return 0;
724 }
725 
726 static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
727 {
728  AVFilterContext *ctx = inlink->dst;
729  AudioNEqualizerContext *s = ctx->priv;
730  AVFilterLink *outlink = ctx->outputs[0];
731 
732  if (!ctx->is_disabled)
733  ctx->internal->execute(ctx, filter_channels, buf, NULL, FFMIN(inlink->channels,
735 
736  if (s->draw_curves) {
737  AVFrame *clone;
738 
739  const int64_t pts = buf->pts +
740  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
741  outlink->time_base);
742  int ret;
743 
744  s->video->pts = pts;
745  clone = av_frame_clone(s->video);
746  if (!clone)
747  return AVERROR(ENOMEM);
748  ret = ff_filter_frame(ctx->outputs[1], clone);
749  if (ret < 0)
750  return ret;
751  }
752 
753  return ff_filter_frame(outlink, buf);
754 }
755 
756 static const AVFilterPad inputs[] = {
757  {
758  .name = "default",
759  .type = AVMEDIA_TYPE_AUDIO,
760  .config_props = config_input,
761  .filter_frame = filter_frame,
762  .needs_writable = 1,
763  },
764  { NULL }
765 };
766 
768  .name = "anequalizer",
769  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
770  .priv_size = sizeof(AudioNEqualizerContext),
771  .priv_class = &anequalizer_class,
772  .init = init,
773  .uninit = uninit,
775  .inputs = inputs,
776  .outputs = NULL,
781 };
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:925
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
FilterType
Definition: af_adenorm.c:26
static double chebyshev2_compute_bw_gain_db(double gain)
static int config_video(AVFilterLink *outlink)
static double section_process(FoSection *S, double in)
#define FILTER_ORDER
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
#define F
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
static int query_formats(AVFilterContext *ctx)
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
static int config_input(AVFilterLink *inlink)
static const AVFilterPad inputs[]
static double process_sample(FoSection *s1, double in)
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
@ BUTTERWORTH
@ CHEBYSHEV1
@ CHEBYSHEV2
@ NB_TYPES
#define A
AVFilter ff_af_anequalizer
static double butterworth_compute_bw_gain_db(double gain)
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
static av_cold int init(AVFilterContext *ctx)
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
static av_cold void uninit(AVFilterContext *ctx)
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
#define OFFSET(x)
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
AVFILTER_DEFINE_CLASS(anequalizer)
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
static void equalizer(EqualizatorFilter *f, double sample_rate)
static double chebyshev1_compute_bw_gain_db(double gain)
static double hz_2_rad(double x, double fs)
#define V
static const AVOption anequalizer_options[]
#define N
Definition: af_mcompand.c:54
#define L(x)
Definition: vp56_arith.h:36
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_filter_get_nb_threads(AVFilterContext *ctx)
Get number of threads for current filter instance.
Definition: avfilter.c:802
Main libavfilter public API header.
#define AV_RL32
Definition: intreadwrite.h:146
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define ui(width, name)
Definition: cbs_mpeg2.c:43
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
static av_always_inline void filter(int16_t *output, ptrdiff_t out_stride, const int16_t *low, ptrdiff_t low_stride, const int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhddsp.c:27
#define FFMIN(a, b)
Definition: common.h:105
#define av_clip
Definition: common.h:122
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
internal math functions header
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
sample_rate
#define S(s, c, i)
#define sample
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:436
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add ref as a new reference to formats.
Definition: formats.c:466
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:461
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:421
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:235
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:227
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
@ AV_OPT_TYPE_STRING
Definition: opt.h:229
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:112
#define AVFILTER_FLAG_SLICE_THREADS
The filter supports multithreading by splitting frames into multiple parts and processing them concur...
Definition: avfilter.h:117
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:134
#define AVERROR(e)
Definition: error.h:43
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:540
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:186
for(j=16;j >0;--j)
#define G
Definition: huffyuvdsp.h:33
int i
Definition: input.c:407
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
const char * arg
Definition: jacosubdec.c:66
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:248
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
static av_const double hypot(double x, double y)
Definition: libm.h:366
uint8_t w
Definition: llviddspenc.c:39
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define M_PI
Definition: mathematics.h:52
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
AVOptions.
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
misc parsing utilities
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
static const struct PPFilter filters[]
Definition: postprocess.c:134
#define s1
Definition: regdef.h:38
D(D(float, sse)
Definition: rematrix_init.c:28
formats
Definition: signature.h:48
Describe the class of an AVClass context structure.
Definition: log.h:67
A list of supported channel layouts.
Definition: formats.h:86
An instance of a filter.
Definition: avfilter.h:341
AVFilterFormats * formats
List of supported formats (pixel or sample).
Definition: avfilter.h:445
AVFilterChannelLayouts * channel_layouts
Lists of supported channel layouts, only for audio.
Definition: avfilter.h:455
AVFilterFormats * samplerates
Lists of supported sample rates, only for audio.
Definition: avfilter.h:450
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
AVFormatInternal * internal
An opaque field for libavformat internal usage.
Definition: avformat.h:1699
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:384
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int channels
number of audio channels, only used for audio.
Definition: frame.h:624
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:365
AVOption.
Definition: opt.h:248
Rational number (pair of numerator and denominator).
Definition: rational.h:58
EqualizatorFilter * filters
double denum[4]
double num[4]
#define lrint
Definition: tablegen.h:53
#define av_free(p)
#define av_freep(p)
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define width
static int64_t pts
const char * b
Definition: vf_curves.c:118
const char * g
Definition: vf_curves.c:117
const char * r
Definition: vf_curves.c:116
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
static double c[64]