FFmpeg  4.4.6
vf_vaguedenoiser.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2003 LeFunGus, lefungus@altern.org
3  *
4  * This file is part of FFmpeg
5  *
6  * FFmpeg is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License as published by
8  * the Free Software Foundation; either version 2 of the License, or
9  * (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License along
17  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
18  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
19  */
20 
21 #include <float.h>
22 
23 #include "libavutil/imgutils.h"
24 #include "libavutil/attributes.h"
25 #include "libavutil/common.h"
26 #include "libavutil/pixdesc.h"
27 #include "libavutil/intreadwrite.h"
28 #include "libavutil/opt.h"
29 
30 #include "avfilter.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct VagueDenoiserContext {
36  const AVClass *class;
37 
38  float threshold;
39  float percent;
40  int method;
41  int type;
42  int nsteps;
43  int planes;
44 
45  int depth;
46  int bpc;
47  int peak;
48  int nb_planes;
49  int planeheight[4];
50  int planewidth[4];
51 
52  float *block;
53  float *in;
54  float *out;
55  float *tmp;
56 
57  int hlowsize[4][32];
58  int hhighsize[4][32];
59  int vlowsize[4][32];
60  int vhighsize[4][32];
61 
62  void (*thresholding)(float *block, const int width, const int height,
63  const int stride, const float threshold,
64  const float percent);
66 
67 #define OFFSET(x) offsetof(VagueDenoiserContext, x)
68 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
69 static const AVOption vaguedenoiser_options[] = {
70  { "threshold", "set filtering strength", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=2.}, 0,DBL_MAX, FLAGS },
71  { "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
72  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
73  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
74  { "garrote", "garrote thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
75  { "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
76  { "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
77  { "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },
78  { "type", "set threshold type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0 }, 0, 1, FLAGS, "type" },
79  { "universal", "universal (VisuShrink)", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
80  { "bayes", "bayes (BayesShrink)", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
81  { NULL }
82 };
83 
84 AVFILTER_DEFINE_CLASS(vaguedenoiser);
85 
86 #define NPAD 10
87 
88 static const float analysis_low[9] = {
89  0.037828455506995f, -0.023849465019380f, -0.110624404418423f, 0.377402855612654f,
90  0.852698679009403f, 0.377402855612654f, -0.110624404418423f, -0.023849465019380f, 0.037828455506995f
91 };
92 
93 static const float analysis_high[7] = {
94  -0.064538882628938f, 0.040689417609558f, 0.418092273222212f, -0.788485616405664f,
95  0.418092273222212f, 0.040689417609558f, -0.064538882628938f
96 };
97 
98 static const float synthesis_low[7] = {
99  -0.064538882628938f, -0.040689417609558f, 0.418092273222212f, 0.788485616405664f,
100  0.418092273222212f, -0.040689417609558f, -0.064538882628938f
101 };
102 
103 static const float synthesis_high[9] = {
104  -0.037828455506995f, -0.023849465019380f, 0.110624404418423f, 0.377402855612654f,
105  -0.852698679009403f, 0.377402855612654f, 0.110624404418423f, -0.023849465019380f, -0.037828455506995f
106 };
107 
109 {
110  static const enum AVPixelFormat pix_fmts[] = {
134  };
136  if (!fmts_list)
137  return AVERROR(ENOMEM);
138  return ff_set_common_formats(ctx, fmts_list);
139 }
140 
141 static int config_input(AVFilterLink *inlink)
142 {
143  VagueDenoiserContext *s = inlink->dst->priv;
145  int p, i, nsteps_width, nsteps_height, nsteps_max;
146 
147  s->depth = desc->comp[0].depth;
148  s->bpc = (s->depth + 7) / 8;
149  s->nb_planes = desc->nb_components;
150 
151  s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
152  s->planeheight[0] = s->planeheight[3] = inlink->h;
153  s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
154  s->planewidth[0] = s->planewidth[3] = inlink->w;
155 
156  s->block = av_malloc_array(inlink->w * inlink->h, sizeof(*s->block));
157  s->in = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->in));
158  s->out = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->out));
159  s->tmp = av_malloc_array(32 + FFMAX(inlink->w, inlink->h), sizeof(*s->tmp));
160 
161  if (!s->block || !s->in || !s->out || !s->tmp)
162  return AVERROR(ENOMEM);
163 
164  s->threshold *= 1 << (s->depth - 8);
165  s->peak = (1 << s->depth) - 1;
166 
167  nsteps_width = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planewidth[1] : s->planewidth[0];
168  nsteps_height = ((s->planes & 2 || s->planes & 4) && s->nb_planes > 1) ? s->planeheight[1] : s->planeheight[0];
169 
170  for (nsteps_max = 1; nsteps_max < 15; nsteps_max++) {
171  if (pow(2, nsteps_max) >= nsteps_width || pow(2, nsteps_max) >= nsteps_height)
172  break;
173  }
174 
175  s->nsteps = FFMIN(s->nsteps, nsteps_max - 2);
176 
177  for (p = 0; p < 4; p++) {
178  s->hlowsize[p][0] = (s->planewidth[p] + 1) >> 1;
179  s->hhighsize[p][0] = s->planewidth[p] >> 1;
180  s->vlowsize[p][0] = (s->planeheight[p] + 1) >> 1;
181  s->vhighsize[p][0] = s->planeheight[p] >> 1;
182 
183  for (i = 1; i < s->nsteps; i++) {
184  s->hlowsize[p][i] = (s->hlowsize[p][i - 1] + 1) >> 1;
185  s->hhighsize[p][i] = s->hlowsize[p][i - 1] >> 1;
186  s->vlowsize[p][i] = (s->vlowsize[p][i - 1] + 1) >> 1;
187  s->vhighsize[p][i] = s->vlowsize[p][i - 1] >> 1;
188  }
189  }
190 
191  return 0;
192 }
193 
194 static inline void copy(const float *p1, float *p2, const int length)
195 {
196  memcpy(p2, p1, length * sizeof(float));
197 }
198 
199 static inline void copyv(const float *p1, const int stride1, float *p2, const int length)
200 {
201  int i;
202 
203  for (i = 0; i < length; i++) {
204  p2[i] = *p1;
205  p1 += stride1;
206  }
207 }
208 
209 static inline void copyh(const float *p1, float *p2, const int stride2, const int length)
210 {
211  int i;
212 
213  for (i = 0; i < length; i++) {
214  *p2 = p1[i];
215  p2 += stride2;
216  }
217 }
218 
219 // Do symmetric extension of data using prescribed symmetries
220 // Original values are in output[npad] through output[npad+size-1]
221 // New values will be placed in output[0] through output[npad] and in output[npad+size] through output[2*npad+size-1] (note: end values may not be filled in)
222 // extension at left bdry is ... 3 2 1 0 | 0 1 2 3 ...
223 // same for right boundary
224 // if right_ext=1 then ... 3 2 1 0 | 1 2 3
225 static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
226 {
227  int first = NPAD;
228  int last = NPAD - 1 + size;
229  const int originalLast = last;
230  int i, nextend, idx;
231 
232  if (left_ext == 2)
233  output[--first] = output[NPAD];
234  if (right_ext == 2)
235  output[++last] = output[originalLast];
236 
237  // extend left end
238  nextend = first;
239  for (i = 0; i < nextend; i++)
240  output[--first] = output[NPAD + 1 + i];
241 
242  idx = NPAD + NPAD - 1 + size;
243 
244  // extend right end
245  nextend = idx - last;
246  for (i = 0; i < nextend; i++)
247  output[++last] = output[originalLast - 1 - i];
248 }
249 
250 static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
251 {
252  int i;
253 
254  symmetric_extension(input, size, 1, 1);
255 
256  for (i = NPAD; i < NPAD + low_size; i++) {
257  const float a = input[2 * i - 14] * analysis_low[0];
258  const float b = input[2 * i - 13] * analysis_low[1];
259  const float c = input[2 * i - 12] * analysis_low[2];
260  const float d = input[2 * i - 11] * analysis_low[3];
261  const float e = input[2 * i - 10] * analysis_low[4];
262  const float f = input[2 * i - 9] * analysis_low[3];
263  const float g = input[2 * i - 8] * analysis_low[2];
264  const float h = input[2 * i - 7] * analysis_low[1];
265  const float k = input[2 * i - 6] * analysis_low[0];
266 
267  output[i] = a + b + c + d + e + f + g + h + k;
268  }
269 
270  for (i = NPAD; i < NPAD + low_size; i++) {
271  const float a = input[2 * i - 12] * analysis_high[0];
272  const float b = input[2 * i - 11] * analysis_high[1];
273  const float c = input[2 * i - 10] * analysis_high[2];
274  const float d = input[2 * i - 9] * analysis_high[3];
275  const float e = input[2 * i - 8] * analysis_high[2];
276  const float f = input[2 * i - 7] * analysis_high[1];
277  const float g = input[2 * i - 6] * analysis_high[0];
278 
279  output[i + low_size] = a + b + c + d + e + f + g;
280  }
281 }
282 
283 static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
284 {
285  const int low_size = (size + 1) >> 1;
286  const int high_size = size >> 1;
287  int left_ext = 1, right_ext, i;
288  int findex;
289 
290  memcpy(temp + NPAD, input + NPAD, low_size * sizeof(float));
291 
292  right_ext = (size % 2 == 0) ? 2 : 1;
293  symmetric_extension(temp, low_size, left_ext, right_ext);
294 
295  memset(output, 0, (NPAD + NPAD + size) * sizeof(float));
296  findex = (size + 2) >> 1;
297 
298  for (i = 9; i < findex + 11; i++) {
299  const float a = temp[i] * synthesis_low[0];
300  const float b = temp[i] * synthesis_low[1];
301  const float c = temp[i] * synthesis_low[2];
302  const float d = temp[i] * synthesis_low[3];
303 
304  output[2 * i - 13] += a;
305  output[2 * i - 12] += b;
306  output[2 * i - 11] += c;
307  output[2 * i - 10] += d;
308  output[2 * i - 9] += c;
309  output[2 * i - 8] += b;
310  output[2 * i - 7] += a;
311  }
312 
313  memcpy(temp + NPAD, input + NPAD + low_size, high_size * sizeof(float));
314 
315  left_ext = 2;
316  right_ext = (size % 2 == 0) ? 1 : 2;
317  symmetric_extension(temp, high_size, left_ext, right_ext);
318 
319  for (i = 8; i < findex + 11; i++) {
320  const float a = temp[i] * synthesis_high[0];
321  const float b = temp[i] * synthesis_high[1];
322  const float c = temp[i] * synthesis_high[2];
323  const float d = temp[i] * synthesis_high[3];
324  const float e = temp[i] * synthesis_high[4];
325 
326  output[2 * i - 13] += a;
327  output[2 * i - 12] += b;
328  output[2 * i - 11] += c;
329  output[2 * i - 10] += d;
330  output[2 * i - 9] += e;
331  output[2 * i - 8] += d;
332  output[2 * i - 7] += c;
333  output[2 * i - 6] += b;
334  output[2 * i - 5] += a;
335  }
336 }
337 
338 static void hard_thresholding(float *block, const int width, const int height,
339  const int stride, const float threshold,
340  const float percent)
341 {
342  const float frac = 1.f - percent * 0.01f;
343  int y, x;
344 
345  for (y = 0; y < height; y++) {
346  for (x = 0; x < width; x++) {
347  if (FFABS(block[x]) <= threshold)
348  block[x] *= frac;
349  }
350  block += stride;
351  }
352 }
353 
354 static void soft_thresholding(float *block, const int width, const int height, const int stride,
355  const float threshold, const float percent)
356 {
357  const float frac = 1.f - percent * 0.01f;
358  const float shift = threshold * 0.01f * percent;
359  int y, x;
360 
361  for (y = 0; y < height; y++) {
362  for (x = 0; x < width; x++) {
363  const float temp = FFABS(block[x]);
364  if (temp <= threshold)
365  block[x] *= frac;
366  else
367  block[x] = (block[x] < 0.f ? -1.f : (block[x] > 0.f ? 1.f : 0.f)) * (temp - shift);
368  }
369  block += stride;
370  }
371 }
372 
373 static void qian_thresholding(float *block, const int width, const int height,
374  const int stride, const float threshold,
375  const float percent)
376 {
377  const float percent01 = percent * 0.01f;
378  const float tr2 = threshold * threshold * percent01;
379  const float frac = 1.f - percent01;
380  int y, x;
381 
382  for (y = 0; y < height; y++) {
383  for (x = 0; x < width; x++) {
384  const float temp = FFABS(block[x]);
385  if (temp <= threshold) {
386  block[x] *= frac;
387  } else {
388  const float tp2 = temp * temp;
389  block[x] *= (tp2 - tr2) / tp2;
390  }
391  }
392  block += stride;
393  }
394 }
395 
396 static float bayes_threshold(float *block, const int width, const int height,
397  const int stride, const float threshold)
398 {
399  float mean = 0.f;
400 
401  for (int y = 0; y < height; y++) {
402  for (int x = 0; x < width; x++) {
403  mean += block[x] * block[x];
404  }
405  block += stride;
406  }
407 
408  mean /= width * height;
409 
410  return threshold * threshold / (FFMAX(sqrtf(mean - threshold), FLT_EPSILON));
411 }
412 
414 {
415  int p, y, x, i, j;
416 
417  for (p = 0; p < s->nb_planes; p++) {
418  const int height = s->planeheight[p];
419  const int width = s->planewidth[p];
420  const uint8_t *srcp8 = in->data[p];
421  const uint16_t *srcp16 = (const uint16_t *)in->data[p];
422  uint8_t *dstp8 = out->data[p];
423  uint16_t *dstp16 = (uint16_t *)out->data[p];
424  float *output = s->block;
425  int h_low_size0 = width;
426  int v_low_size0 = height;
427  int nsteps_transform = s->nsteps;
428  int nsteps_invert = s->nsteps;
429  const float *input = s->block;
430 
431  if (!((1 << p) & s->planes)) {
432  av_image_copy_plane(out->data[p], out->linesize[p], in->data[p], in->linesize[p],
433  s->planewidth[p] * s->bpc, s->planeheight[p]);
434  continue;
435  }
436 
437  if (s->depth <= 8) {
438  for (y = 0; y < height; y++) {
439  for (x = 0; x < width; x++)
440  output[x] = srcp8[x];
441  srcp8 += in->linesize[p];
442  output += width;
443  }
444  } else {
445  for (y = 0; y < height; y++) {
446  for (x = 0; x < width; x++)
447  output[x] = srcp16[x];
448  srcp16 += in->linesize[p] / 2;
449  output += width;
450  }
451  }
452 
453  while (nsteps_transform--) {
454  int low_size = (h_low_size0 + 1) >> 1;
455  float *input = s->block;
456  for (j = 0; j < v_low_size0; j++) {
457  copy(input, s->in + NPAD, h_low_size0);
458  transform_step(s->in, s->out, h_low_size0, low_size, s);
459  copy(s->out + NPAD, input, h_low_size0);
460  input += width;
461  }
462 
463  low_size = (v_low_size0 + 1) >> 1;
464  input = s->block;
465  for (j = 0; j < h_low_size0; j++) {
466  copyv(input, width, s->in + NPAD, v_low_size0);
467  transform_step(s->in, s->out, v_low_size0, low_size, s);
468  copyh(s->out + NPAD, input, width, v_low_size0);
469  input++;
470  }
471 
472  h_low_size0 = (h_low_size0 + 1) >> 1;
473  v_low_size0 = (v_low_size0 + 1) >> 1;
474  }
475 
476  if (s->type == 0) {
477  s->thresholding(s->block, width, height, width, s->threshold, s->percent);
478  } else {
479  for (int n = 0; n < s->nsteps; n++) {
480  float threshold;
481  float *block;
482 
483  if (n == s->nsteps - 1) {
484  threshold = bayes_threshold(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, s->threshold);
485  s->thresholding(s->block, s->hlowsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
486  }
487  block = s->block + s->hlowsize[p][n];
488  threshold = bayes_threshold(block, s->hhighsize[p][n], s->vlowsize[p][n], width, s->threshold);
489  s->thresholding(block, s->hhighsize[p][n], s->vlowsize[p][n], width, threshold, s->percent);
490  block = s->block + s->vlowsize[p][n] * width;
491  threshold = bayes_threshold(block, s->hlowsize[p][n], s->vhighsize[p][n], width, s->threshold);
492  s->thresholding(block, s->hlowsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
493  block = s->block + s->hlowsize[p][n] + s->vlowsize[p][n] * width;
494  threshold = bayes_threshold(block, s->hhighsize[p][n], s->vhighsize[p][n], width, s->threshold);
495  s->thresholding(block, s->hhighsize[p][n], s->vhighsize[p][n], width, threshold, s->percent);
496  }
497  }
498 
499  while (nsteps_invert--) {
500  const int idx = s->vlowsize[p][nsteps_invert] + s->vhighsize[p][nsteps_invert];
501  const int idx2 = s->hlowsize[p][nsteps_invert] + s->hhighsize[p][nsteps_invert];
502  float * idx3 = s->block;
503  for (i = 0; i < idx2; i++) {
504  copyv(idx3, width, s->in + NPAD, idx);
505  invert_step(s->in, s->out, s->tmp, idx, s);
506  copyh(s->out + NPAD, idx3, width, idx);
507  idx3++;
508  }
509 
510  idx3 = s->block;
511  for (i = 0; i < idx; i++) {
512  copy(idx3, s->in + NPAD, idx2);
513  invert_step(s->in, s->out, s->tmp, idx2, s);
514  copy(s->out + NPAD, idx3, idx2);
515  idx3 += width;
516  }
517  }
518 
519  if (s->depth <= 8) {
520  for (y = 0; y < height; y++) {
521  for (x = 0; x < width; x++)
522  dstp8[x] = av_clip_uint8(input[x] + 0.5f);
523  input += width;
524  dstp8 += out->linesize[p];
525  }
526  } else {
527  for (y = 0; y < height; y++) {
528  for (x = 0; x < width; x++)
529  dstp16[x] = av_clip(input[x] + 0.5f, 0, s->peak);
530  input += width;
531  dstp16 += out->linesize[p] / 2;
532  }
533  }
534  }
535 }
536 
537 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
538 {
539  AVFilterContext *ctx = inlink->dst;
540  VagueDenoiserContext *s = ctx->priv;
541  AVFilterLink *outlink = ctx->outputs[0];
542  AVFrame *out;
544 
545  if (direct) {
546  out = in;
547  } else {
548  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
549  if (!out) {
550  av_frame_free(&in);
551  return AVERROR(ENOMEM);
552  }
553 
555  }
556 
557  filter(s, in, out);
558 
559  if (!direct)
560  av_frame_free(&in);
561 
562  return ff_filter_frame(outlink, out);
563 }
564 
566 {
567  VagueDenoiserContext *s = ctx->priv;
568 
569  switch (s->method) {
570  case 0:
571  s->thresholding = hard_thresholding;
572  break;
573  case 1:
574  s->thresholding = soft_thresholding;
575  break;
576  case 2:
577  s->thresholding = qian_thresholding;
578  break;
579  }
580 
581  return 0;
582 }
583 
585 {
586  VagueDenoiserContext *s = ctx->priv;
587 
588  av_freep(&s->block);
589  av_freep(&s->in);
590  av_freep(&s->out);
591  av_freep(&s->tmp);
592 }
593 
595  {
596  .name = "default",
597  .type = AVMEDIA_TYPE_VIDEO,
598  .config_props = config_input,
599  .filter_frame = filter_frame,
600  },
601  { NULL }
602 };
603 
604 
606  {
607  .name = "default",
608  .type = AVMEDIA_TYPE_VIDEO
609  },
610  { NULL }
611 };
612 
614  .name = "vaguedenoiser",
615  .description = NULL_IF_CONFIG_SMALL("Apply a Wavelet based Denoiser."),
616  .priv_size = sizeof(VagueDenoiserContext),
617  .priv_class = &vaguedenoiser_class,
618  .init = init,
619  .uninit = uninit,
624 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
Macro definitions for various function/variable attributes.
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
Main libavfilter public API header.
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define f(width, name)
Definition: cbs_vp9.c:255
common internal and external API header
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define av_clip
Definition: common.h:122
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uint8
Definition: common.h:128
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define NULL
Definition: coverity.c:32
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_FLOAT
Definition: opt.h:228
#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC
Some filters support a generic "enable" expression option that can be used to enable or disable a fil...
Definition: avfilter.h:126
#define AVERROR(e)
Definition: error.h:43
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
cl_device_type type
misc image utilities
int i
Definition: input.c:407
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
static const struct @322 planes[]
int stride
Definition: mace.c:144
AVOptions.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:405
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:379
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:439
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:382
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:401
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
typedef void(RENAME(mix_any_func_type))
static int shift(int a, int b)
Definition: sonic.c:82
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
void * priv
private data for use by the filter
Definition: avfilter.h:356
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
void(* thresholding)(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
#define av_malloc_array(a, b)
#define av_freep(p)
static int16_t block[64]
Definition: dct.c:116
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
int size
const char * b
Definition: vf_curves.c:118
const char * g
Definition: vf_curves.c:117
else temp
Definition: vf_mcdeint.c:259
static float mean(const float *input, int size)
Definition: vf_nnedi.c:864
static void hard_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
static const AVOption vaguedenoiser_options[]
static void copy(const float *p1, float *p2, const int length)
static void invert_step(const float *input, float *output, float *temp, const int size, VagueDenoiserContext *s)
static const float analysis_low[9]
static float bayes_threshold(float *block, const int width, const int height, const int stride, const float threshold)
static void symmetric_extension(float *output, const int size, const int left_ext, const int right_ext)
static int query_formats(AVFilterContext *ctx)
AVFILTER_DEFINE_CLASS(vaguedenoiser)
static int config_input(AVFilterLink *inlink)
static void filter(VagueDenoiserContext *s, AVFrame *in, AVFrame *out)
#define FLAGS
static const float synthesis_low[7]
static void qian_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
static void copyh(const float *p1, float *p2, const int stride2, const int length)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
static void transform_step(float *input, float *output, const int size, const int low_size, VagueDenoiserContext *s)
static void copyv(const float *p1, const int stride1, float *p2, const int length)
AVFilter ff_vf_vaguedenoiser
static const float analysis_high[7]
static av_cold int init(AVFilterContext *ctx)
static av_cold void uninit(AVFilterContext *ctx)
#define OFFSET(x)
static const AVFilterPad vaguedenoiser_inputs[]
static const AVFilterPad vaguedenoiser_outputs[]
static const float synthesis_high[9]
static void soft_thresholding(float *block, const int width, const int height, const int stride, const float threshold, const float percent)
#define NPAD
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104
static double c[64]