FFmpeg  4.4.6
vf_framepack.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2013 Vittorio Giovara
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Generate a frame packed video, by combining two views in a single surface.
24  */
25 
26 #include <string.h>
27 
28 #include "libavutil/common.h"
29 #include "libavutil/imgutils.h"
30 #include "libavutil/opt.h"
31 #include "libavutil/pixdesc.h"
32 #include "libavutil/rational.h"
33 #include "libavutil/stereo3d.h"
34 
35 #include "avfilter.h"
36 #include "filters.h"
37 #include "formats.h"
38 #include "internal.h"
39 #include "video.h"
40 
41 #define LEFT 0
42 #define RIGHT 1
43 
44 typedef struct FramepackContext {
45  const AVClass *class;
46 
47  int depth;
48  const AVPixFmtDescriptor *pix_desc; ///< agreed pixel format
49 
50  enum AVStereo3DType format; ///< frame pack type output
51 
52  AVFrame *input_views[2]; ///< input frames
54 
55 static const enum AVPixelFormat formats_supported[] = {
80 };
81 
83 {
84  // this will ensure that formats are the same on all pads
86  if (!fmts_list)
87  return AVERROR(ENOMEM);
88  return ff_set_common_formats(ctx, fmts_list);
89 }
90 
92 {
93  FramepackContext *s = ctx->priv;
94 
95  // clean any leftover frame
96  av_frame_free(&s->input_views[LEFT]);
97  av_frame_free(&s->input_views[RIGHT]);
98 }
99 
100 static int config_output(AVFilterLink *outlink)
101 {
102  AVFilterContext *ctx = outlink->src;
103  FramepackContext *s = outlink->src->priv;
104 
105  int width = ctx->inputs[LEFT]->w;
106  int height = ctx->inputs[LEFT]->h;
107  AVRational time_base = ctx->inputs[LEFT]->time_base;
108  AVRational frame_rate = ctx->inputs[LEFT]->frame_rate;
109 
110  // check size and fps match on the other input
111  if (width != ctx->inputs[RIGHT]->w ||
112  height != ctx->inputs[RIGHT]->h) {
114  "Left and right sizes differ (%dx%d vs %dx%d).\n",
115  width, height,
116  ctx->inputs[RIGHT]->w, ctx->inputs[RIGHT]->h);
117  return AVERROR_INVALIDDATA;
118  } else if (av_cmp_q(time_base, ctx->inputs[RIGHT]->time_base) != 0) {
120  "Left and right time bases differ (%d/%d vs %d/%d).\n",
121  time_base.num, time_base.den,
122  ctx->inputs[RIGHT]->time_base.num,
123  ctx->inputs[RIGHT]->time_base.den);
124  return AVERROR_INVALIDDATA;
125  } else if (av_cmp_q(frame_rate, ctx->inputs[RIGHT]->frame_rate) != 0) {
127  "Left and right framerates differ (%d/%d vs %d/%d).\n",
128  frame_rate.num, frame_rate.den,
129  ctx->inputs[RIGHT]->frame_rate.num,
130  ctx->inputs[RIGHT]->frame_rate.den);
131  return AVERROR_INVALIDDATA;
132  }
133 
134  s->pix_desc = av_pix_fmt_desc_get(outlink->format);
135  if (!s->pix_desc)
136  return AVERROR_BUG;
137  s->depth = s->pix_desc->comp[0].depth;
138 
139  // modify output properties as needed
140  switch (s->format) {
142  time_base.den *= 2;
143  frame_rate.num *= 2;
144  break;
145  case AV_STEREO3D_COLUMNS:
147  width *= 2;
148  break;
149  case AV_STEREO3D_LINES:
151  height *= 2;
152  break;
153  default:
154  av_log(ctx, AV_LOG_ERROR, "Unknown packing mode.");
155  return AVERROR_INVALIDDATA;
156  }
157 
158  outlink->w = width;
159  outlink->h = height;
160  outlink->time_base = time_base;
161  outlink->frame_rate = frame_rate;
162 
163  return 0;
164 }
165 
166 static void horizontal_frame_pack(AVFilterLink *outlink,
167  AVFrame *out,
168  int interleaved)
169 {
170  AVFilterContext *ctx = outlink->src;
171  FramepackContext *s = ctx->priv;
172  int i, plane;
173 
174  if (interleaved && s->depth <= 8) {
175  const uint8_t *leftp = s->input_views[LEFT]->data[0];
176  const uint8_t *rightp = s->input_views[RIGHT]->data[0];
177  uint8_t *dstp = out->data[0];
178  int length = out->width / 2;
179  int lines = out->height;
180 
181  for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
182  if (plane == 1 || plane == 2) {
183  length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
184  lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
185  }
186  for (i = 0; i < lines; i++) {
187  int j;
188  leftp = s->input_views[LEFT]->data[plane] +
189  s->input_views[LEFT]->linesize[plane] * i;
190  rightp = s->input_views[RIGHT]->data[plane] +
191  s->input_views[RIGHT]->linesize[plane] * i;
192  dstp = out->data[plane] + out->linesize[plane] * i;
193  for (j = 0; j < length; j++) {
194  // interpolate chroma as necessary
195  if ((s->pix_desc->log2_chroma_w ||
196  s->pix_desc->log2_chroma_h) &&
197  (plane == 1 || plane == 2)) {
198  *dstp++ = (*leftp + *rightp) / 2;
199  *dstp++ = (*leftp + *rightp) / 2;
200  } else {
201  *dstp++ = *leftp;
202  *dstp++ = *rightp;
203  }
204  leftp += 1;
205  rightp += 1;
206  }
207  }
208  }
209  } else if (interleaved && s->depth > 8) {
210  const uint16_t *leftp = (const uint16_t *)s->input_views[LEFT]->data[0];
211  const uint16_t *rightp = (const uint16_t *)s->input_views[RIGHT]->data[0];
212  uint16_t *dstp = (uint16_t *)out->data[0];
213  int length = out->width / 2;
214  int lines = out->height;
215 
216  for (plane = 0; plane < s->pix_desc->nb_components; plane++) {
217  if (plane == 1 || plane == 2) {
218  length = AV_CEIL_RSHIFT(out->width / 2, s->pix_desc->log2_chroma_w);
219  lines = AV_CEIL_RSHIFT(out->height, s->pix_desc->log2_chroma_h);
220  }
221  for (i = 0; i < lines; i++) {
222  int j;
223  leftp = (const uint16_t *)s->input_views[LEFT]->data[plane] +
224  s->input_views[LEFT]->linesize[plane] * i / 2;
225  rightp = (const uint16_t *)s->input_views[RIGHT]->data[plane] +
226  s->input_views[RIGHT]->linesize[plane] * i / 2;
227  dstp = (uint16_t *)out->data[plane] + out->linesize[plane] * i / 2;
228  for (j = 0; j < length; j++) {
229  // interpolate chroma as necessary
230  if ((s->pix_desc->log2_chroma_w ||
231  s->pix_desc->log2_chroma_h) &&
232  (plane == 1 || plane == 2)) {
233  *dstp++ = (*leftp + *rightp) / 2;
234  *dstp++ = (*leftp + *rightp) / 2;
235  } else {
236  *dstp++ = *leftp;
237  *dstp++ = *rightp;
238  }
239  leftp += 1;
240  rightp += 1;
241  }
242  }
243  }
244  } else {
245  for (i = 0; i < 2; i++) {
246  const int psize = 1 + (s->depth > 8);
247  const uint8_t *src[4];
248  uint8_t *dst[4];
249  int sub_w = psize * s->input_views[i]->width >> s->pix_desc->log2_chroma_w;
250 
251  src[0] = s->input_views[i]->data[0];
252  src[1] = s->input_views[i]->data[1];
253  src[2] = s->input_views[i]->data[2];
254 
255  dst[0] = out->data[0] + i * s->input_views[i]->width * psize;
256  dst[1] = out->data[1] + i * sub_w;
257  dst[2] = out->data[2] + i * sub_w;
258 
259  av_image_copy(dst, out->linesize, src, s->input_views[i]->linesize,
260  s->input_views[i]->format,
261  s->input_views[i]->width,
262  s->input_views[i]->height);
263  }
264  }
265 }
266 
267 static void vertical_frame_pack(AVFilterLink *outlink,
268  AVFrame *out,
269  int interleaved)
270 {
271  AVFilterContext *ctx = outlink->src;
272  FramepackContext *s = ctx->priv;
273  int i;
274 
275  for (i = 0; i < 2; i++) {
276  const uint8_t *src[4];
277  uint8_t *dst[4];
278  int linesizes[4];
279  int sub_h = s->input_views[i]->height >> s->pix_desc->log2_chroma_h;
280 
281  src[0] = s->input_views[i]->data[0];
282  src[1] = s->input_views[i]->data[1];
283  src[2] = s->input_views[i]->data[2];
284 
285  dst[0] = out->data[0] + i * out->linesize[0] *
286  (interleaved + s->input_views[i]->height * (1 - interleaved));
287  dst[1] = out->data[1] + i * out->linesize[1] *
288  (interleaved + sub_h * (1 - interleaved));
289  dst[2] = out->data[2] + i * out->linesize[2] *
290  (interleaved + sub_h * (1 - interleaved));
291 
292  linesizes[0] = out->linesize[0] +
293  interleaved * out->linesize[0];
294  linesizes[1] = out->linesize[1] +
295  interleaved * out->linesize[1];
296  linesizes[2] = out->linesize[2] +
297  interleaved * out->linesize[2];
298 
299  av_image_copy(dst, linesizes, src, s->input_views[i]->linesize,
300  s->input_views[i]->format,
301  s->input_views[i]->width,
302  s->input_views[i]->height);
303  }
304 }
305 
307  AVFrame *dst)
308 {
309  AVFilterContext *ctx = outlink->src;
310  FramepackContext *s = ctx->priv;
311  switch (s->format) {
313  horizontal_frame_pack(outlink, dst, 0);
314  break;
315  case AV_STEREO3D_COLUMNS:
316  horizontal_frame_pack(outlink, dst, 1);
317  break;
319  vertical_frame_pack(outlink, dst, 0);
320  break;
321  case AV_STEREO3D_LINES:
322  vertical_frame_pack(outlink, dst, 1);
323  break;
324  }
325 }
326 
328 {
329  FramepackContext *s = ctx->priv;
330  AVFilterLink *outlink = ctx->outputs[0];
331  AVStereo3D *stereo;
332  int ret, i;
333 
334  if (!(s->input_views[0] && s->input_views[1]))
335  return 0;
336  if (s->format == AV_STEREO3D_FRAMESEQUENCE) {
337  int64_t pts = s->input_views[0]->pts;
338 
339  for (i = 0; i < 2; i++) {
340  // set correct timestamps
341  if (pts != AV_NOPTS_VALUE)
342  s->input_views[i]->pts = i == 0 ? pts * 2 : pts * 2 + av_rescale_q(1, av_inv_q(outlink->frame_rate), outlink->time_base);
343 
344  // set stereo3d side data
345  stereo = av_stereo3d_create_side_data(s->input_views[i]);
346  if (!stereo)
347  return AVERROR(ENOMEM);
348  stereo->type = s->format;
349  stereo->view = i == LEFT ? AV_STEREO3D_VIEW_LEFT
351 
352  // filter the frame and immediately relinquish its pointer
353  ret = ff_filter_frame(outlink, s->input_views[i]);
354  s->input_views[i] = NULL;
355  if (ret < 0)
356  return ret;
357  }
358  return ret;
359  } else {
360  AVFrame *dst = ff_get_video_buffer(outlink, outlink->w, outlink->h);
361  if (!dst)
362  return AVERROR(ENOMEM);
363 
364  spatial_frame_pack(outlink, dst);
365 
366  // get any property from the original frame
367  ret = av_frame_copy_props(dst, s->input_views[LEFT]);
368  if (ret < 0) {
369  av_frame_free(&dst);
370  return ret;
371  }
372 
373  for (i = 0; i < 2; i++)
374  av_frame_free(&s->input_views[i]);
375 
376  // set stereo3d side data
377  stereo = av_stereo3d_create_side_data(dst);
378  if (!stereo) {
379  av_frame_free(&dst);
380  return AVERROR(ENOMEM);
381  }
382  stereo->type = s->format;
383 
384  return ff_filter_frame(outlink, dst);
385  }
386 }
387 
389 {
390  AVFilterLink *outlink = ctx->outputs[0];
391  FramepackContext *s = ctx->priv;
392  int ret;
393 
395 
396  if (!s->input_views[0]) {
397  ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_views[0]);
398  if (ret < 0)
399  return ret;
400  }
401 
402  if (!s->input_views[1]) {
403  ret = ff_inlink_consume_frame(ctx->inputs[1], &s->input_views[1]);
404  if (ret < 0)
405  return ret;
406  }
407 
408  if (s->input_views[0] && s->input_views[1])
409  return try_push_frame(ctx);
410 
411  FF_FILTER_FORWARD_STATUS(ctx->inputs[0], outlink);
412  FF_FILTER_FORWARD_STATUS(ctx->inputs[1], outlink);
413 
414  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
415  !ff_outlink_get_status(ctx->inputs[0]) &&
416  !s->input_views[0]) {
417  ff_inlink_request_frame(ctx->inputs[0]);
418  return 0;
419  }
420 
421  if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
422  !ff_outlink_get_status(ctx->inputs[1]) &&
423  !s->input_views[1]) {
424  ff_inlink_request_frame(ctx->inputs[1]);
425  return 0;
426  }
427 
428  return FFERROR_NOT_READY;
429 }
430 
431 #define OFFSET(x) offsetof(FramepackContext, x)
432 #define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
433 static const AVOption framepack_options[] = {
434  { "format", "Frame pack output format", OFFSET(format), AV_OPT_TYPE_INT,
435  { .i64 = AV_STEREO3D_SIDEBYSIDE }, 0, INT_MAX, .flags = VF, .unit = "format" },
436  { "sbs", "Views are packed next to each other", 0, AV_OPT_TYPE_CONST,
437  { .i64 = AV_STEREO3D_SIDEBYSIDE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
438  { "tab", "Views are packed on top of each other", 0, AV_OPT_TYPE_CONST,
439  { .i64 = AV_STEREO3D_TOPBOTTOM }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
440  { "frameseq", "Views are one after the other", 0, AV_OPT_TYPE_CONST,
441  { .i64 = AV_STEREO3D_FRAMESEQUENCE }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
442  { "lines", "Views are interleaved by lines", 0, AV_OPT_TYPE_CONST,
443  { .i64 = AV_STEREO3D_LINES }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
444  { "columns", "Views are interleaved by columns", 0, AV_OPT_TYPE_CONST,
445  { .i64 = AV_STEREO3D_COLUMNS }, INT_MIN, INT_MAX, .flags = VF, .unit = "format" },
446  { NULL },
447 };
448 
450 
451 static const AVFilterPad framepack_inputs[] = {
452  {
453  .name = "left",
454  .type = AVMEDIA_TYPE_VIDEO,
455  },
456  {
457  .name = "right",
458  .type = AVMEDIA_TYPE_VIDEO,
459  },
460  { NULL }
461 };
462 
463 static const AVFilterPad framepack_outputs[] = {
464  {
465  .name = "packed",
466  .type = AVMEDIA_TYPE_VIDEO,
467  .config_props = config_output,
468  },
469  { NULL }
470 };
471 
473  .name = "framepack",
474  .description = NULL_IF_CONFIG_SMALL("Generate a frame packed stereoscopic video."),
475  .priv_size = sizeof(FramepackContext),
476  .priv_class = &framepack_class,
480  .activate = activate,
482 };
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
static const char *const format[]
Definition: af_aiir.c:456
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1643
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1096
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link's FIFO and update the link's stats.
Definition: avfilter.c:1494
void ff_inlink_request_frame(AVFilterLink *link)
Mark that a frame is wanted on the link.
Definition: avfilter.c:1620
Main libavfilter public API header.
#define s(width, name)
Definition: cbs_vp9.c:257
common internal and external API header
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
static av_cold int uninit(AVCodecContext *avctx)
Definition: crystalhd.c:279
#define FF_FILTER_FORWARD_STATUS(inlink, outlink)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:226
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
#define FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, filter)
Forward the status on an output link to all input links.
Definition: filters.h:212
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:587
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:286
@ AV_OPT_TYPE_CONST
Definition: opt.h:234
@ AV_OPT_TYPE_INT
Definition: opt.h:225
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
static int av_cmp_q(AVRational a, AVRational b)
Compare two rationals.
Definition: rational.h:89
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:422
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
AVStereo3DType
List of possible 3D Types.
Definition: stereo3d.h:51
AVStereo3D * av_stereo3d_create_side_data(AVFrame *frame)
Allocate a complete AVFrameSideData and add it to the frame.
Definition: stereo3d.c:33
@ AV_STEREO3D_COLUMNS
Views are packed per column.
Definition: stereo3d.h:141
@ AV_STEREO3D_LINES
Views are packed per line, as if interlaced.
Definition: stereo3d.h:129
@ AV_STEREO3D_TOPBOTTOM
Views are on top of each other.
Definition: stereo3d.h:79
@ AV_STEREO3D_FRAMESEQUENCE
Views are alternated temporally.
Definition: stereo3d.h:92
@ AV_STEREO3D_SIDEBYSIDE
Views are next to each other.
Definition: stereo3d.h:67
@ AV_STEREO3D_VIEW_RIGHT
Frame contains only the right view.
Definition: stereo3d.h:161
@ AV_STEREO3D_VIEW_LEFT
Frame contains only the left view.
Definition: stereo3d.h:156
for(j=16;j >0;--j)
misc image utilities
int i
Definition: input.c:407
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
Stereoscopic video.
AVOptions.
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
#define AV_PIX_FMT_GBRAP12
Definition: pixfmt.h:420
#define AV_PIX_FMT_YUV420P16
Definition: pixfmt.h:410
#define AV_PIX_FMT_YUV444P12
Definition: pixfmt.h:406
#define AV_PIX_FMT_YUV444P9
Definition: pixfmt.h:398
#define AV_PIX_FMT_YUV420P10
Definition: pixfmt.h:399
#define AV_PIX_FMT_YUV440P12
Definition: pixfmt.h:405
#define AV_PIX_FMT_GRAY9
Definition: pixfmt.h:379
#define AV_PIX_FMT_GBRAP16
Definition: pixfmt.h:421
#define AV_PIX_FMT_GBRP9
Definition: pixfmt.h:414
#define AV_PIX_FMT_YUV422P9
Definition: pixfmt.h:397
#define AV_PIX_FMT_YUVA444P10
Definition: pixfmt.h:438
#define AV_PIX_FMT_YUVA420P16
Definition: pixfmt.h:441
#define AV_PIX_FMT_YUV420P12
Definition: pixfmt.h:403
#define AV_PIX_FMT_YUVA420P10
Definition: pixfmt.h:436
#define AV_PIX_FMT_YUVA422P9
Definition: pixfmt.h:434
#define AV_PIX_FMT_YUV422P12
Definition: pixfmt.h:404
#define AV_PIX_FMT_GBRP10
Definition: pixfmt.h:415
#define AV_PIX_FMT_YUV422P10
Definition: pixfmt.h:400
#define AV_PIX_FMT_GRAY12
Definition: pixfmt.h:381
#define AV_PIX_FMT_GBRP12
Definition: pixfmt.h:416
#define AV_PIX_FMT_YUV420P9
Definition: pixfmt.h:396
#define AV_PIX_FMT_YUVA420P9
Definition: pixfmt.h:433
#define AV_PIX_FMT_YUVA422P10
Definition: pixfmt.h:437
#define AV_PIX_FMT_YUV420P14
Definition: pixfmt.h:407
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
@ AV_PIX_FMT_GBRAP
planar GBRA 4:4:4:4 32bpp
Definition: pixfmt.h:215
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176
@ AV_PIX_FMT_GBRP
planar GBR 4:4:4 24bpp
Definition: pixfmt.h:168
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_PIX_FMT_YUVA422P12
Definition: pixfmt.h:439
#define AV_PIX_FMT_YUV422P14
Definition: pixfmt.h:408
#define AV_PIX_FMT_GRAY10
Definition: pixfmt.h:380
#define AV_PIX_FMT_GRAY14
Definition: pixfmt.h:382
#define AV_PIX_FMT_YUV422P16
Definition: pixfmt.h:411
#define AV_PIX_FMT_YUV440P10
Definition: pixfmt.h:401
#define AV_PIX_FMT_GRAY16
Definition: pixfmt.h:383
#define AV_PIX_FMT_GBRAP10
Definition: pixfmt.h:419
#define AV_PIX_FMT_YUVA444P16
Definition: pixfmt.h:443
#define AV_PIX_FMT_YUVA422P16
Definition: pixfmt.h:442
#define AV_PIX_FMT_GBRP16
Definition: pixfmt.h:418
#define AV_PIX_FMT_YUV444P14
Definition: pixfmt.h:409
#define AV_PIX_FMT_YUVA444P9
Definition: pixfmt.h:435
#define AV_PIX_FMT_GBRP14
Definition: pixfmt.h:417
#define AV_PIX_FMT_YUVA444P12
Definition: pixfmt.h:440
#define AV_PIX_FMT_YUV444P16
Definition: pixfmt.h:412
#define AV_PIX_FMT_YUV444P10
Definition: pixfmt.h:402
Utilties for rational number calculation.
Describe the class of an AVClass context structure.
Definition: log.h:67
An instance of a filter.
Definition: avfilter.h:341
void * priv
private data for use by the filter
Definition: avfilter.h:356
A list of supported formats for one end of a filter link.
Definition: formats.h:65
A filter pad used for either input or output.
Definition: internal.h:54
const char * name
Pad name.
Definition: internal.h:60
Filter definition.
Definition: avfilter.h:145
const char * name
Filter name.
Definition: avfilter.h:149
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
AVOption.
Definition: opt.h:248
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
Stereo 3D type: this structure describes how two videos are packed within a single video surface,...
Definition: stereo3d.h:176
enum AVStereo3DType type
How views are packed within the video.
Definition: stereo3d.h:180
enum AVStereo3DView view
Determines which views are packed.
Definition: stereo3d.h:190
const AVPixFmtDescriptor * pix_desc
agreed pixel format
Definition: vf_framepack.c:48
AVFrame * input_views[2]
input frames
Definition: vf_framepack.c:52
enum AVStereo3DType format
frame pack type output
Definition: vf_framepack.c:50
#define av_log(a,...)
#define src
Definition: vp8dsp.c:255
FILE * out
Definition: movenc.c:54
AVFormatContext * ctx
Definition: movenc.c:48
#define height
#define width
static int64_t pts
#define LEFT
Definition: vf_framepack.c:41
AVFilter ff_vf_framepack
Definition: vf_framepack.c:472
static const AVFilterPad framepack_inputs[]
Definition: vf_framepack.c:451
static int query_formats(AVFilterContext *ctx)
Definition: vf_framepack.c:82
AVFILTER_DEFINE_CLASS(framepack)
static const AVOption framepack_options[]
Definition: vf_framepack.c:433
#define RIGHT
Definition: vf_framepack.c:42
static const AVFilterPad framepack_outputs[]
Definition: vf_framepack.c:463
#define VF
Definition: vf_framepack.c:432
static void horizontal_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
Definition: vf_framepack.c:166
static enum AVPixelFormat formats_supported[]
Definition: vf_framepack.c:55
static int activate(AVFilterContext *ctx)
Definition: vf_framepack.c:388
#define OFFSET(x)
Definition: vf_framepack.c:431
static int config_output(AVFilterLink *outlink)
Definition: vf_framepack.c:100
static av_always_inline void spatial_frame_pack(AVFilterLink *outlink, AVFrame *dst)
Definition: vf_framepack.c:306
static int try_push_frame(AVFilterContext *ctx)
Definition: vf_framepack.c:327
static av_cold void framepack_uninit(AVFilterContext *ctx)
Definition: vf_framepack.c:91
static void vertical_frame_pack(AVFilterLink *outlink, AVFrame *out, int interleaved)
Definition: vf_framepack.c:267
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:104