FFmpeg  4.4.6
vf_pp7.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2005 Michael Niedermayer <michaelni@gmx.at>
3  * Copyright (c) 2014 Arwa Arif <arwaarif1994@gmail.com>
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License as published by
9  * the Free Software Foundation; either version 2 of the License, or
10  * (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15  * GNU General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License along
18  * with FFmpeg; if not, write to the Free Software Foundation, Inc.,
19  * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20  */
21 
22 /**
23  * @file
24  * Postprocessing filter - 7
25  *
26  * Originally written by Michael Niedermayer for the MPlayer
27  * project, and ported by Arwa Arif for FFmpeg.
28  */
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/imgutils.h"
32 #include "libavutil/mem_internal.h"
33 #include "libavutil/opt.h"
34 #include "libavutil/pixdesc.h"
35 #include "internal.h"
36 #include "qp_table.h"
37 #include "vf_pp7.h"
38 
39 enum mode {
43 };
44 
45 #define OFFSET(x) offsetof(PP7Context, x)
46 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
47 static const AVOption pp7_options[] = {
48  { "qp", "force a constant quantizer parameter", OFFSET(qp), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 64, FLAGS },
49  { "mode", "set thresholding mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64 = MODE_MEDIUM}, 0, 2, FLAGS, "mode" },
50  { "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_HARD}, INT_MIN, INT_MAX, FLAGS, "mode" },
51  { "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_SOFT}, INT_MIN, INT_MAX, FLAGS, "mode" },
52  { "medium", "medium thresholding", 0, AV_OPT_TYPE_CONST, {.i64 = MODE_MEDIUM}, INT_MIN, INT_MAX, FLAGS, "mode" },
53  { NULL }
54 };
55 
57 
58 DECLARE_ALIGNED(8, static const uint8_t, dither)[8][8] = {
59  { 0, 48, 12, 60, 3, 51, 15, 63, },
60  { 32, 16, 44, 28, 35, 19, 47, 31, },
61  { 8, 56, 4, 52, 11, 59, 7, 55, },
62  { 40, 24, 36, 20, 43, 27, 39, 23, },
63  { 2, 50, 14, 62, 1, 49, 13, 61, },
64  { 34, 18, 46, 30, 33, 17, 45, 29, },
65  { 10, 58, 6, 54, 9, 57, 5, 53, },
66  { 42, 26, 38, 22, 41, 25, 37, 21, },
67 };
68 
69 #define N0 4
70 #define N1 5
71 #define N2 10
72 #define SN0 2
73 #define SN1 2.2360679775
74 #define SN2 3.16227766017
75 #define N (1 << 16)
76 
77 static const int factor[16] = {
78  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
79  N / (N1 * N0), N / (N1 * N1), N / (N1 * N0), N / (N1 * N2),
80  N / (N0 * N0), N / (N0 * N1), N / (N0 * N0), N / (N0 * N2),
81  N / (N2 * N0), N / (N2 * N1), N / (N2 * N0), N / (N2 * N2),
82 };
83 
84 static void init_thres2(PP7Context *p)
85 {
86  int qp, i;
87  int bias = 0; //FIXME
88 
89  for (qp = 0; qp < 99; qp++) {
90  for (i = 0; i < 16; i++) {
91  p->thres2[qp][i] = ((i&1) ? SN2 : SN0) * ((i&4) ? SN2 : SN0) * FFMAX(1, qp) * (1<<2) - 1 - bias;
92  }
93  }
94 }
95 
96 static inline void dctA_c(int16_t *dst, uint8_t *src, int stride)
97 {
98  int i;
99 
100  for (i = 0; i < 4; i++) {
101  int s0 = src[0 * stride] + src[6 * stride];
102  int s1 = src[1 * stride] + src[5 * stride];
103  int s2 = src[2 * stride] + src[4 * stride];
104  int s3 = src[3 * stride];
105  int s = s3 + s3;
106  s3 = s - s0;
107  s0 = s + s0;
108  s = s2 + s1;
109  s2 = s2 - s1;
110  dst[0] = s0 + s;
111  dst[2] = s0 - s;
112  dst[1] = 2 * s3 + s2;
113  dst[3] = s3 - 2 * s2;
114  src++;
115  dst += 4;
116  }
117 }
118 
119 static void dctB_c(int16_t *dst, int16_t *src)
120 {
121  int i;
122 
123  for (i = 0; i < 4; i++) {
124  int s0 = src[0 * 4] + src[6 * 4];
125  int s1 = src[1 * 4] + src[5 * 4];
126  int s2 = src[2 * 4] + src[4 * 4];
127  int s3 = src[3 * 4];
128  int s = s3 + s3;
129  s3 = s - s0;
130  s0 = s + s0;
131  s = s2 + s1;
132  s2 = s2 - s1;
133  dst[0 * 4] = s0 + s;
134  dst[2 * 4] = s0 - s;
135  dst[1 * 4] = 2 * s3 + s2;
136  dst[3 * 4] = s3 - 2 * s2;
137  src++;
138  dst++;
139  }
140 }
141 
142 static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
143 {
144  int i;
145  int a;
146 
147  a = src[0] * factor[0];
148  for (i = 1; i < 16; i++) {
149  unsigned int threshold1 = p->thres2[qp][i];
150  unsigned int threshold2 = threshold1 << 1;
151  int level = src[i];
152  if (((unsigned)(level + threshold1)) > threshold2)
153  a += level * factor[i];
154  }
155  return (a + (1 << 11)) >> 12;
156 }
157 
158 static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
159 {
160  int i;
161  int a;
162 
163  a = src[0] * factor[0];
164  for (i = 1; i < 16; i++) {
165  unsigned int threshold1 = p->thres2[qp][i];
166  unsigned int threshold2 = threshold1 << 1;
167  int level = src[i];
168  if (((unsigned)(level + threshold1)) > threshold2) {
169  if (((unsigned)(level + 2 * threshold1)) > 2 * threshold2)
170  a += level * factor[i];
171  else {
172  if (level > 0)
173  a += 2 * (level - (int)threshold1) * factor[i];
174  else
175  a += 2 * (level + (int)threshold1) * factor[i];
176  }
177  }
178  }
179  return (a + (1 << 11)) >> 12;
180 }
181 
182 static int softthresh_c(PP7Context *p, int16_t *src, int qp)
183 {
184  int i;
185  int a;
186 
187  a = src[0] * factor[0];
188  for (i = 1; i < 16; i++) {
189  unsigned int threshold1 = p->thres2[qp][i];
190  unsigned int threshold2 = threshold1 << 1;
191  int level = src[i];
192  if (((unsigned)(level + threshold1)) > threshold2) {
193  if (level > 0)
194  a += (level - (int)threshold1) * factor[i];
195  else
196  a += (level + (int)threshold1) * factor[i];
197  }
198  }
199  return (a + (1 << 11)) >> 12;
200 }
201 
202 static void filter(PP7Context *p, uint8_t *dst, uint8_t *src,
203  int dst_stride, int src_stride,
204  int width, int height,
205  uint8_t *qp_store, int qp_stride, int is_luma)
206 {
207  int x, y;
208  const int stride = is_luma ? p->temp_stride : ((width + 16 + 15) & (~15));
209  uint8_t *p_src = p->src + 8 * stride;
210  int16_t *block = (int16_t *)p->src;
211  int16_t *temp = (int16_t *)(p->src + 32);
212 
213  if (!src || !dst) return;
214  for (y = 0; y < height; y++) {
215  int index = 8 + 8 * stride + y * stride;
216  memcpy(p_src + index, src + y * src_stride, width);
217  for (x = 0; x < 8; x++) {
218  p_src[index - x - 1]= p_src[index + x ];
219  p_src[index + width + x ]= p_src[index + width - x - 1];
220  }
221  }
222  for (y = 0; y < 8; y++) {
223  memcpy(p_src + ( 7 - y ) * stride, p_src + ( y + 8 ) * stride, stride);
224  memcpy(p_src + (height + 8 + y) * stride, p_src + (height - y + 7) * stride, stride);
225  }
226  //FIXME (try edge emu)
227 
228  for (y = 0; y < height; y++) {
229  for (x = -8; x < 0; x += 4) {
230  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
231  uint8_t *src = p_src + index;
232  int16_t *tp = temp + 4 * x;
233 
234  dctA_c(tp + 4 * 8, src, stride);
235  }
236  for (x = 0; x < width; ) {
237  const int qps = 3 + is_luma;
238  int qp;
239  int end = FFMIN(x + 8, width);
240 
241  if (p->qp)
242  qp = p->qp;
243  else {
244  qp = qp_store[ (FFMIN(x, width - 1) >> qps) + (FFMIN(y, height - 1) >> qps) * qp_stride];
245  qp = ff_norm_qscale(qp, p->qscale_type);
246  }
247  for (; x < end; x++) {
248  const int index = x + y * stride + (8 - 3) * (1 + stride) + 8; //FIXME silly offset
249  uint8_t *src = p_src + index;
250  int16_t *tp = temp + 4 * x;
251  int v;
252 
253  if ((x & 3) == 0)
254  dctA_c(tp + 4 * 8, src, stride);
255 
256  p->dctB(block, tp);
257 
258  v = p->requantize(p, block, qp);
259  v = (v + dither[y & 7][x & 7]) >> 6;
260  if ((unsigned)v > 255)
261  v = (-v) >> 31;
262  dst[x + y * dst_stride] = v;
263  }
264  }
265  }
266 }
267 
269 {
270  static const enum AVPixelFormat pix_fmts[] = {
278  };
279 
281  if (!fmts_list)
282  return AVERROR(ENOMEM);
283  return ff_set_common_formats(ctx, fmts_list);
284 }
285 
286 static int config_input(AVFilterLink *inlink)
287 {
288  AVFilterContext *ctx = inlink->dst;
289  PP7Context *pp7 = ctx->priv;
290  const int h = FFALIGN(inlink->h + 16, 16);
292 
293  pp7->hsub = desc->log2_chroma_w;
294  pp7->vsub = desc->log2_chroma_h;
295 
296  pp7->temp_stride = FFALIGN(inlink->w + 16, 16);
297  pp7->src = av_malloc_array(pp7->temp_stride, (h + 8) * sizeof(uint8_t));
298 
299  if (!pp7->src)
300  return AVERROR(ENOMEM);
301 
302  init_thres2(pp7);
303 
304  switch (pp7->mode) {
305  case 0: pp7->requantize = hardthresh_c; break;
306  case 1: pp7->requantize = softthresh_c; break;
307  default:
308  case 2: pp7->requantize = mediumthresh_c; break;
309  }
310 
311  pp7->dctB = dctB_c;
312 
313  if (ARCH_X86)
314  ff_pp7_init_x86(pp7);
315 
316  return 0;
317 }
318 
319 static int filter_frame(AVFilterLink *inlink, AVFrame *in)
320 {
321  AVFilterContext *ctx = inlink->dst;
322  PP7Context *pp7 = ctx->priv;
323  AVFilterLink *outlink = ctx->outputs[0];
324  AVFrame *out = in;
325 
326  int qp_stride = 0;
327  int8_t *qp_table = NULL;
328 
329  if (!pp7->qp) {
330  int ret = ff_qp_table_extract(in, &qp_table, &qp_stride, NULL, &pp7->qscale_type);
331  if (ret < 0) {
332  av_frame_free(&in);
333  return ret;
334  }
335  }
336 
337  if (!ctx->is_disabled) {
338  const int cw = AV_CEIL_RSHIFT(inlink->w, pp7->hsub);
339  const int ch = AV_CEIL_RSHIFT(inlink->h, pp7->vsub);
340 
341  /* get a new frame if in-place is not possible or if the dimensions
342  * are not multiple of 8 */
343  if (!av_frame_is_writable(in) || (inlink->w & 7) || (inlink->h & 7)) {
344  const int aligned_w = FFALIGN(inlink->w, 8);
345  const int aligned_h = FFALIGN(inlink->h, 8);
346 
347  out = ff_get_video_buffer(outlink, aligned_w, aligned_h);
348  if (!out) {
349  av_frame_free(&in);
350  av_freep(&qp_table);
351  return AVERROR(ENOMEM);
352  }
354  out->width = in->width;
355  out->height = in->height;
356  }
357 
358  if (qp_table || pp7->qp) {
359 
360  filter(pp7, out->data[0], in->data[0], out->linesize[0], in->linesize[0],
361  inlink->w, inlink->h, qp_table, qp_stride, 1);
362  filter(pp7, out->data[1], in->data[1], out->linesize[1], in->linesize[1],
363  cw, ch, qp_table, qp_stride, 0);
364  filter(pp7, out->data[2], in->data[2], out->linesize[2], in->linesize[2],
365  cw, ch, qp_table, qp_stride, 0);
366  emms_c();
367  }
368  }
369 
370  if (in != out) {
371  if (in->data[3])
372  av_image_copy_plane(out->data[3], out->linesize[3],
373  in ->data[3], in ->linesize[3],
374  inlink->w, inlink->h);
375  av_frame_free(&in);
376  }
377  av_freep(&qp_table);
378  return ff_filter_frame(outlink, out);
379 }
380 
382 {
383  PP7Context *pp7 = ctx->priv;
384  av_freep(&pp7->src);
385 }
386 
387 static const AVFilterPad pp7_inputs[] = {
388  {
389  .name = "default",
390  .type = AVMEDIA_TYPE_VIDEO,
391  .config_props = config_input,
392  .filter_frame = filter_frame,
393  },
394  { NULL }
395 };
396 
397 static const AVFilterPad pp7_outputs[] = {
398  {
399  .name = "default",
400  .type = AVMEDIA_TYPE_VIDEO,
401  },
402  { NULL }
403 };
404 
406  .name = "pp7",
407  .description = NULL_IF_CONFIG_SMALL("Apply Postprocessing 7 filter."),
408  .priv_size = sizeof(PP7Context),
409  .uninit = uninit,
411  .inputs = pp7_inputs,
412  .outputs = pp7_outputs,
413  .priv_class = &pp7_class,
415 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
uint8_t
simple assert() macros that are a bit more flexible than ISO C assert().
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
#define flags(name, subs,...)
Definition: cbs_av1.c:572
#define s(width, name)
Definition: cbs_vp9.c:257
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define FFMAX(a, b)
Definition: common.h:103
#define ARCH_X86
Definition: config.h:39
#define NULL
Definition: coverity.c:32
mode
Use these values in ebur128_init (or'ed).
Definition: ebur128.h:83
int
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL
Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will have its filter_frame() c...
Definition: avfilter.h:134
#define AVERROR(e)
Definition: error.h:43
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:594
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define DECLARE_ALIGNED(n, t, v)
Declare a variable that is aligned in memory.
Definition: mem.h:117
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
void av_image_copy_plane(uint8_t *dst, int dst_linesize, const uint8_t *src, int src_linesize, int bytewidth, int height)
Copy image plane from src to dst.
Definition: imgutils.c:373
int index
Definition: gxfenc.c:89
misc image utilities
int i
Definition: input.c:407
static int ff_norm_qscale(int qscale, int type)
Normalize the qscale factor FIXME the H264 qscale is a log based scale, mpeg1/2 is not,...
Definition: internal.h:351
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define emms_c()
Definition: internal.h:54
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
const char * desc
Definition: libsvtav1.c:79
int stride
Definition: mace.c:144
#define FFALIGN(x, a)
Definition: macros.h:48
AVOptions.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
int ff_qp_table_extract(AVFrame *frame, int8_t **table, int *table_w, int *table_h, int *qscale_type)
Extract a libpostproc-compatible QP table - an 8-bit QP value per 16x16 macroblock,...
Definition: qp_table.c:30
#define s1
Definition: regdef.h:38
#define s3
Definition: regdef.h:40
#define s2
Definition: regdef.h:39
#define s0
Definition: regdef.h:37
An instance of a filter.
Definition: avfilter.h:341
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int(* requantize)(struct PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.h:39
int thres2[99][16]
Definition: vf_pp7.h:29
int temp_stride
Definition: vf_pp7.h:36
int vsub
Definition: vf_pp7.h:35
void(* dctB)(int16_t *dst, int16_t *src)
Definition: vf_pp7.h:40
int mode
Definition: vf_pp7.h:32
int qp
Definition: vf_pp7.h:31
int qscale_type
Definition: vf_pp7.h:33
int hsub
Definition: vf_pp7.h:34
uint8_t * src
Definition: vf_pp7.h:37
uint8_t level
Definition: svq3.c:206
#define av_malloc_array(a, b)
#define av_freep(p)
#define src
Definition: vp8dsp.c:255
static int16_t block[64]
Definition: dct.c:116
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
else temp
Definition: vf_mcdeint.c:259
#define N
Definition: vf_pp7.c:75
AVFILTER_DEFINE_CLASS(pp7)
@ MODE_HARD
Definition: vf_pp7.c:40
@ MODE_MEDIUM
Definition: vf_pp7.c:42
@ MODE_SOFT
Definition: vf_pp7.c:41
static int mediumthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:158
#define N0
Definition: vf_pp7.c:69
static const AVOption pp7_options[]
Definition: vf_pp7.c:47
static int hardthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:142
static const int factor[16]
Definition: vf_pp7.c:77
static const AVFilterPad pp7_outputs[]
Definition: vf_pp7.c:397
#define SN0
Definition: vf_pp7.c:72
static void filter(PP7Context *p, uint8_t *dst, uint8_t *src, int dst_stride, int src_stride, int width, int height, uint8_t *qp_store, int qp_stride, int is_luma)
Definition: vf_pp7.c:202
static int query_formats(AVFilterContext *ctx)
Definition: vf_pp7.c:268
static int config_input(AVFilterLink *inlink)
Definition: vf_pp7.c:286
#define FLAGS
Definition: vf_pp7.c:46
static void init_thres2(PP7Context *p)
Definition: vf_pp7.c:84
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
Definition: vf_pp7.c:319
static av_cold void uninit(AVFilterContext *ctx)
Definition: vf_pp7.c:381
#define N1
Definition: vf_pp7.c:70
AVFilter ff_vf_pp7
Definition: vf_pp7.c:405
static const uint8_t dither[8][8]
Definition: vf_pp7.c:58
static const AVFilterPad pp7_inputs[]
Definition: vf_pp7.c:387
#define N2
Definition: vf_pp7.c:71
#define OFFSET(x)
Definition: vf_pp7.c:45
static void dctA_c(int16_t *dst, uint8_t *src, int stride)
Definition: vf_pp7.c:96
#define SN2
Definition: vf_pp7.c:74
static int softthresh_c(PP7Context *p, int16_t *src, int qp)
Definition: vf_pp7.c:182
static void dctB_c(int16_t *dst, int16_t *src)
Definition: vf_pp7.c:119
void ff_pp7_init_x86(PP7Context *pp7)
Definition: vf_pp7_init.c:28
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104