FFmpeg  4.4.6
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/mem_internal.h"
40 #include "libavutil/pixdesc.h"
41 #include "libavutil/opt.h"
42 #include "libavutil/thread.h"
43 #include "avcodec.h"
44 #include "dct.h"
45 #include "idctdsp.h"
46 #include "mpeg12.h"
47 #include "mpegvideo.h"
48 #include "mpegvideodata.h"
49 #include "h261.h"
50 #include "h263.h"
51 #include "h263data.h"
52 #include "mjpegenc_common.h"
53 #include "mathops.h"
54 #include "mpegutils.h"
55 #include "mjpegenc.h"
56 #include "speedhqenc.h"
57 #include "msmpeg4.h"
58 #include "pixblockdsp.h"
59 #include "qpeldsp.h"
60 #include "faandct.h"
61 #include "thread.h"
62 #include "aandcttab.h"
63 #include "flv.h"
64 #include "mpeg4video.h"
65 #include "internal.h"
66 #include "bytestream.h"
67 #include "wmv2.h"
68 #include "rv10.h"
69 #include "packet_internal.h"
70 #include <limits.h>
71 #include "sp5x.h"
72 
73 #define QUANT_BIAS_SHIFT 8
74 
75 #define QMAT_SHIFT_MMX 16
76 #define QMAT_SHIFT 21
77 
78 static int encode_picture(MpegEncContext *s, int picture_number);
79 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
80 static int sse_mb(MpegEncContext *s);
81 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
82 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
83 
86 
89  { NULL },
90 };
91 
92 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
93  uint16_t (*qmat16)[2][64],
94  const uint16_t *quant_matrix,
95  int bias, int qmin, int qmax, int intra)
96 {
97  FDCTDSPContext *fdsp = &s->fdsp;
98  int qscale;
99  int shift = 0;
100 
101  for (qscale = qmin; qscale <= qmax; qscale++) {
102  int i;
103  int qscale2;
104 
105  if (s->q_scale_type) qscale2 = ff_mpeg2_non_linear_qscale[qscale];
106  else qscale2 = qscale << 1;
107 
108  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
109 #if CONFIG_FAANDCT
110  fdsp->fdct == ff_faandct ||
111 #endif /* CONFIG_FAANDCT */
112  fdsp->fdct == ff_jpeg_fdct_islow_10) {
113  for (i = 0; i < 64; i++) {
114  const int j = s->idsp.idct_permutation[i];
115  int64_t den = (int64_t) qscale2 * quant_matrix[j];
116  /* 16 <= qscale * quant_matrix[i] <= 7905
117  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
118  * 19952 <= x <= 249205026
119  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
120  * 3444240 >= (1 << 36) / (x) >= 275 */
121 
122  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
123  }
124  } else if (fdsp->fdct == ff_fdct_ifast) {
125  for (i = 0; i < 64; i++) {
126  const int j = s->idsp.idct_permutation[i];
127  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
128  /* 16 <= qscale * quant_matrix[i] <= 7905
129  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
130  * 19952 <= x <= 249205026
131  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
132  * 3444240 >= (1 << 36) / (x) >= 275 */
133 
134  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
135  }
136  } else {
137  for (i = 0; i < 64; i++) {
138  const int j = s->idsp.idct_permutation[i];
139  int64_t den = (int64_t) qscale2 * quant_matrix[j];
140  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
141  * Assume x = qscale * quant_matrix[i]
142  * So 16 <= x <= 7905
143  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
144  * so 32768 >= (1 << 19) / (x) >= 67 */
145  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
146  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
147  // (qscale * quant_matrix[i]);
148  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
149 
150  if (qmat16[qscale][0][i] == 0 ||
151  qmat16[qscale][0][i] == 128 * 256)
152  qmat16[qscale][0][i] = 128 * 256 - 1;
153  qmat16[qscale][1][i] =
154  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
155  qmat16[qscale][0][i]);
156  }
157  }
158 
159  for (i = intra; i < 64; i++) {
160  int64_t max = 8191;
161  if (fdsp->fdct == ff_fdct_ifast) {
162  max = (8191LL * ff_aanscales[i]) >> 14;
163  }
164  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
165  shift++;
166  }
167  }
168  }
169  if (shift) {
170  av_log(s->avctx, AV_LOG_INFO,
171  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
172  QMAT_SHIFT - shift);
173  }
174 }
175 
176 static inline void update_qscale(MpegEncContext *s)
177 {
178  if (s->q_scale_type == 1 && 0) {
179  int i;
180  int bestdiff=INT_MAX;
181  int best = 1;
182 
183  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
184  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
185  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
186  (ff_mpeg2_non_linear_qscale[i] > s->avctx->qmax && !s->vbv_ignore_qmax))
187  continue;
188  if (diff < bestdiff) {
189  bestdiff = diff;
190  best = i;
191  }
192  }
193  s->qscale = best;
194  } else {
195  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
196  (FF_LAMBDA_SHIFT + 7);
197  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
198  }
199 
200  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
202 }
203 
204 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
205 {
206  int i;
207 
208  if (matrix) {
209  put_bits(pb, 1, 1);
210  for (i = 0; i < 64; i++) {
211  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
212  }
213  } else
214  put_bits(pb, 1, 0);
215 }
216 
217 /**
218  * init s->current_picture.qscale_table from s->lambda_table
219  */
221 {
222  int8_t * const qscale_table = s->current_picture.qscale_table;
223  int i;
224 
225  for (i = 0; i < s->mb_num; i++) {
226  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
227  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
228  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
229  s->avctx->qmax);
230  }
231 }
232 
235 {
236 #define COPY(a) dst->a= src->a
237  COPY(pict_type);
238  COPY(current_picture);
239  COPY(f_code);
240  COPY(b_code);
241  COPY(qscale);
242  COPY(lambda);
243  COPY(lambda2);
244  COPY(picture_in_gop_number);
245  COPY(gop_picture_number);
246  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
247  COPY(progressive_frame); // FIXME don't set in encode_header
248  COPY(partitioned_frame); // FIXME don't set in encode_header
249 #undef COPY
250 }
251 
252 static void mpv_encode_init_static(void)
253 {
254  for (int i = -16; i < 16; i++)
255  default_fcode_tab[i + MAX_MV] = 1;
256 }
257 
258 /**
259  * Set the given MpegEncContext to defaults for encoding.
260  * the changed fields will not depend upon the prior state of the MpegEncContext.
261  */
263 {
264  static AVOnce init_static_once = AV_ONCE_INIT;
265 
267 
268  ff_thread_once(&init_static_once, mpv_encode_init_static);
269 
270  s->me.mv_penalty = default_mv_penalty;
271  s->fcode_tab = default_fcode_tab;
272 
273  s->input_picture_number = 0;
274  s->picture_in_gop_number = 0;
275 }
276 
278 {
279  if (ARCH_X86)
281 
283  ff_h263dsp_init(&s->h263dsp);
284  if (!s->dct_quantize)
285  s->dct_quantize = ff_dct_quantize_c;
286  if (!s->denoise_dct)
287  s->denoise_dct = denoise_dct_c;
288  s->fast_dct_quantize = s->dct_quantize;
289  if (s->avctx->trellis)
290  s->dct_quantize = dct_quantize_trellis_c;
291 
292  return 0;
293 }
294 
295 /* init video encoder */
297 {
298  MpegEncContext *s = avctx->priv_data;
299  AVCPBProperties *cpb_props;
300  int i, ret, format_supported;
301 
303 
304  switch (avctx->codec_id) {
306  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
307  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
308  av_log(avctx, AV_LOG_ERROR,
309  "only YUV420 and YUV422 are supported\n");
310  return AVERROR(EINVAL);
311  }
312  break;
313  case AV_CODEC_ID_MJPEG:
314  case AV_CODEC_ID_AMV:
315  format_supported = 0;
316  /* JPEG color space */
317  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
318  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
319  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
320  (avctx->color_range == AVCOL_RANGE_JPEG &&
321  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
322  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
323  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
324  format_supported = 1;
325  /* MPEG color space */
326  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
327  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
328  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
329  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
330  format_supported = 1;
331 
332  if (!format_supported) {
333  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
334  return AVERROR(EINVAL);
335  }
336  break;
337  case AV_CODEC_ID_SPEEDHQ:
338  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
339  avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
340  avctx->pix_fmt != AV_PIX_FMT_YUV444P) {
341  av_log(avctx, AV_LOG_ERROR,
342  "only YUV420/YUV422/YUV444 are supported (no alpha support yet)\n");
343  return AVERROR(EINVAL);
344  }
345  break;
346  default:
347  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
348  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
349  return AVERROR(EINVAL);
350  }
351  }
352 
353  switch (avctx->pix_fmt) {
354  case AV_PIX_FMT_YUVJ444P:
355  case AV_PIX_FMT_YUV444P:
356  s->chroma_format = CHROMA_444;
357  break;
358  case AV_PIX_FMT_YUVJ422P:
359  case AV_PIX_FMT_YUV422P:
360  s->chroma_format = CHROMA_422;
361  break;
362  case AV_PIX_FMT_YUVJ420P:
363  case AV_PIX_FMT_YUV420P:
364  default:
365  s->chroma_format = CHROMA_420;
366  break;
367  }
368 
369  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
370 
371 #if FF_API_PRIVATE_OPT
373  if (avctx->rtp_payload_size)
374  s->rtp_payload_size = avctx->rtp_payload_size;
375  if (avctx->me_penalty_compensation)
376  s->me_penalty_compensation = avctx->me_penalty_compensation;
377  if (avctx->pre_me)
378  s->me_pre = avctx->pre_me;
380 #endif
381 
382  s->bit_rate = avctx->bit_rate;
383  s->width = avctx->width;
384  s->height = avctx->height;
385  if (avctx->gop_size > 600 &&
387  av_log(avctx, AV_LOG_WARNING,
388  "keyframe interval too large!, reducing it from %d to %d\n",
389  avctx->gop_size, 600);
390  avctx->gop_size = 600;
391  }
392  s->gop_size = avctx->gop_size;
393  s->avctx = avctx;
394  if (avctx->max_b_frames > MAX_B_FRAMES) {
395  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
396  "is %d.\n", MAX_B_FRAMES);
397  avctx->max_b_frames = MAX_B_FRAMES;
398  }
399  s->max_b_frames = avctx->max_b_frames;
400  s->codec_id = avctx->codec->id;
401  s->strict_std_compliance = avctx->strict_std_compliance;
402  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
403  s->rtp_mode = !!s->rtp_payload_size;
404  s->intra_dc_precision = avctx->intra_dc_precision;
405 
406  // workaround some differences between how applications specify dc precision
407  if (s->intra_dc_precision < 0) {
408  s->intra_dc_precision += 8;
409  } else if (s->intra_dc_precision >= 8)
410  s->intra_dc_precision -= 8;
411 
412  if (s->intra_dc_precision < 0) {
413  av_log(avctx, AV_LOG_ERROR,
414  "intra dc precision must be positive, note some applications use"
415  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
416  return AVERROR(EINVAL);
417  }
418 
419  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
420  s->huffman = 0;
421 
422  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
423  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
424  return AVERROR(EINVAL);
425  }
426  s->user_specified_pts = AV_NOPTS_VALUE;
427 
428  if (s->gop_size <= 1) {
429  s->intra_only = 1;
430  s->gop_size = 12;
431  } else {
432  s->intra_only = 0;
433  }
434 
435  /* Fixed QSCALE */
436  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
437 
438  s->adaptive_quant = (avctx->lumi_masking ||
439  avctx->dark_masking ||
440  avctx->temporal_cplx_masking ||
441  avctx->spatial_cplx_masking ||
442  avctx->p_masking ||
443  s->border_masking ||
444  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
445  !s->fixed_qscale;
446 
447  s->loop_filter = !!(avctx->flags & AV_CODEC_FLAG_LOOP_FILTER);
448 
449  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
450  switch(avctx->codec_id) {
453  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
454  break;
455  case AV_CODEC_ID_MPEG4:
459  if (avctx->rc_max_rate >= 15000000) {
460  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
461  } else if(avctx->rc_max_rate >= 2000000) {
462  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
463  } else if(avctx->rc_max_rate >= 384000) {
464  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
465  } else
466  avctx->rc_buffer_size = 40;
467  avctx->rc_buffer_size *= 16384;
468  break;
469  }
470  if (avctx->rc_buffer_size) {
471  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
472  }
473  }
474 
475  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
476  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
477  return AVERROR(EINVAL);
478  }
479 
480  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
481  av_log(avctx, AV_LOG_INFO,
482  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
483  }
484 
485  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
486  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
487  return AVERROR(EINVAL);
488  }
489 
490  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
491  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
492  return AVERROR(EINVAL);
493  }
494 
495  if (avctx->rc_max_rate &&
496  avctx->rc_max_rate == avctx->bit_rate &&
497  avctx->rc_max_rate != avctx->rc_min_rate) {
498  av_log(avctx, AV_LOG_INFO,
499  "impossible bitrate constraints, this will fail\n");
500  }
501 
502  if (avctx->rc_buffer_size &&
503  avctx->bit_rate * (int64_t)avctx->time_base.num >
504  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
505  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
506  return AVERROR(EINVAL);
507  }
508 
509  if (!s->fixed_qscale &&
510  avctx->bit_rate * av_q2d(avctx->time_base) >
511  avctx->bit_rate_tolerance) {
512  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
513  av_log(avctx, AV_LOG_WARNING,
514  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
515  if (nbt <= INT_MAX) {
516  avctx->bit_rate_tolerance = nbt;
517  } else
518  avctx->bit_rate_tolerance = INT_MAX;
519  }
520 
521  if (avctx->rc_max_rate &&
522  avctx->rc_min_rate == avctx->rc_max_rate &&
523  (s->codec_id == AV_CODEC_ID_MPEG1VIDEO ||
524  s->codec_id == AV_CODEC_ID_MPEG2VIDEO) &&
525  90000LL * (avctx->rc_buffer_size - 1) >
526  avctx->rc_max_rate * 0xFFFFLL) {
527  av_log(avctx, AV_LOG_INFO,
528  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
529  "specified vbv buffer is too large for the given bitrate!\n");
530  }
531 
532  if ((avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
533  s->codec_id != AV_CODEC_ID_H263 && s->codec_id != AV_CODEC_ID_H263P &&
534  s->codec_id != AV_CODEC_ID_FLV1) {
535  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
536  return AVERROR(EINVAL);
537  }
538 
539  if (s->obmc && avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
540  av_log(avctx, AV_LOG_ERROR,
541  "OBMC is only supported with simple mb decision\n");
542  return AVERROR(EINVAL);
543  }
544 
545  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
546  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
547  return AVERROR(EINVAL);
548  }
549 
550  if (s->max_b_frames &&
551  s->codec_id != AV_CODEC_ID_MPEG4 &&
552  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
553  s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
554  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
555  return AVERROR(EINVAL);
556  }
557  if (s->max_b_frames < 0) {
558  av_log(avctx, AV_LOG_ERROR,
559  "max b frames must be 0 or positive for mpegvideo based encoders\n");
560  return AVERROR(EINVAL);
561  }
562 
563  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
564  s->codec_id == AV_CODEC_ID_H263 ||
565  s->codec_id == AV_CODEC_ID_H263P) &&
566  (avctx->sample_aspect_ratio.num > 255 ||
567  avctx->sample_aspect_ratio.den > 255)) {
568  av_log(avctx, AV_LOG_WARNING,
569  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
572  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
573  }
574 
575  if ((s->codec_id == AV_CODEC_ID_H263 ||
576  s->codec_id == AV_CODEC_ID_H263P) &&
577  (avctx->width > 2048 ||
578  avctx->height > 1152 )) {
579  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
580  return AVERROR(EINVAL);
581  }
582  if (s->codec_id == AV_CODEC_ID_FLV1 &&
583  (avctx->width > 65535 ||
584  avctx->height > 65535 )) {
585  av_log(avctx, AV_LOG_ERROR, "FLV does not support resolutions above 16bit\n");
586  return AVERROR(EINVAL);
587  }
588  if ((s->codec_id == AV_CODEC_ID_H263 ||
589  s->codec_id == AV_CODEC_ID_H263P) &&
590  ((avctx->width &3) ||
591  (avctx->height&3) )) {
592  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
593  return AVERROR(EINVAL);
594  }
595 
596  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
597  (avctx->width > 4095 ||
598  avctx->height > 4095 )) {
599  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
600  return AVERROR(EINVAL);
601  }
602 
603  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
604  (avctx->width > 16383 ||
605  avctx->height > 16383 )) {
606  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
607  return AVERROR(EINVAL);
608  }
609 
610  if (s->codec_id == AV_CODEC_ID_RV10 &&
611  (avctx->width &15 ||
612  avctx->height&15 )) {
613  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
614  return AVERROR(EINVAL);
615  }
616 
617  if (s->codec_id == AV_CODEC_ID_RV20 &&
618  (avctx->width &3 ||
619  avctx->height&3 )) {
620  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
621  return AVERROR(EINVAL);
622  }
623 
624  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
625  s->codec_id == AV_CODEC_ID_WMV2) &&
626  avctx->width & 1) {
627  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
628  return AVERROR(EINVAL);
629  }
630 
632  s->codec_id != AV_CODEC_ID_MPEG4 && s->codec_id != AV_CODEC_ID_MPEG2VIDEO) {
633  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
634  return AVERROR(EINVAL);
635  }
636 
637 #if FF_API_PRIVATE_OPT
639  if (avctx->mpeg_quant)
640  s->mpeg_quant = 1;
642 #endif
643 
644  // FIXME mpeg2 uses that too
645  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
646  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
647  av_log(avctx, AV_LOG_ERROR,
648  "mpeg2 style quantization not supported by codec\n");
649  return AVERROR(EINVAL);
650  }
651 
652  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
653  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
654  return AVERROR(EINVAL);
655  }
656 
657  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
658  avctx->mb_decision != FF_MB_DECISION_RD) {
659  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
660  return AVERROR(EINVAL);
661  }
662 
663  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
664  (s->codec_id == AV_CODEC_ID_AMV ||
665  s->codec_id == AV_CODEC_ID_MJPEG)) {
666  // Used to produce garbage with MJPEG.
667  av_log(avctx, AV_LOG_ERROR,
668  "QP RD is no longer compatible with MJPEG or AMV\n");
669  return AVERROR(EINVAL);
670  }
671 
672 #if FF_API_PRIVATE_OPT
674  if (avctx->scenechange_threshold)
675  s->scenechange_threshold = avctx->scenechange_threshold;
677 #endif
678 
679  if (s->scenechange_threshold < 1000000000 &&
680  (avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)) {
681  av_log(avctx, AV_LOG_ERROR,
682  "closed gop with scene change detection are not supported yet, "
683  "set threshold to 1000000000\n");
684  return AVERROR_PATCHWELCOME;
685  }
686 
687  if (avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
688  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
689  s->strict_std_compliance >= FF_COMPLIANCE_NORMAL) {
690  av_log(avctx, AV_LOG_ERROR,
691  "low delay forcing is only available for mpeg2, "
692  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
693  return AVERROR(EINVAL);
694  }
695  if (s->max_b_frames != 0) {
696  av_log(avctx, AV_LOG_ERROR,
697  "B-frames cannot be used with low delay\n");
698  return AVERROR(EINVAL);
699  }
700  }
701 
702  if (s->q_scale_type == 1) {
703  if (avctx->qmax > 28) {
704  av_log(avctx, AV_LOG_ERROR,
705  "non linear quant only supports qmax <= 28 currently\n");
706  return AVERROR_PATCHWELCOME;
707  }
708  }
709 
710  if (avctx->slices > 1 &&
711  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
712  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
713  return AVERROR(EINVAL);
714  }
715 
716  if (avctx->thread_count > 1 &&
717  s->codec_id != AV_CODEC_ID_MPEG4 &&
718  s->codec_id != AV_CODEC_ID_MPEG1VIDEO &&
719  s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
720  s->codec_id != AV_CODEC_ID_MJPEG &&
721  (s->codec_id != AV_CODEC_ID_H263P)) {
722  av_log(avctx, AV_LOG_ERROR,
723  "multi threaded encoding not supported by codec\n");
724  return AVERROR_PATCHWELCOME;
725  }
726 
727  if (avctx->thread_count < 1) {
728  av_log(avctx, AV_LOG_ERROR,
729  "automatic thread number detection not supported by codec, "
730  "patch welcome\n");
731  return AVERROR_PATCHWELCOME;
732  }
733 
734  if (!avctx->time_base.den || !avctx->time_base.num) {
735  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
736  return AVERROR(EINVAL);
737  }
738 
739 #if FF_API_PRIVATE_OPT
741  if (avctx->b_frame_strategy)
742  s->b_frame_strategy = avctx->b_frame_strategy;
743  if (avctx->b_sensitivity != 40)
744  s->b_sensitivity = avctx->b_sensitivity;
746 #endif
747 
748  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
749  av_log(avctx, AV_LOG_INFO,
750  "notice: b_frame_strategy only affects the first pass\n");
751  s->b_frame_strategy = 0;
752  }
753 
754  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
755  if (i > 1) {
756  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
757  avctx->time_base.den /= i;
758  avctx->time_base.num /= i;
759  //return -1;
760  }
761 
762  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || s->codec_id == AV_CODEC_ID_MJPEG || s->codec_id == AV_CODEC_ID_AMV || s->codec_id == AV_CODEC_ID_SPEEDHQ) {
763  // (a + x * 3 / 8) / x
764  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
765  s->inter_quant_bias = 0;
766  } else {
767  s->intra_quant_bias = 0;
768  // (a - x / 4) / x
769  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
770  }
771 
772  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
773  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
774  return AVERROR(EINVAL);
775  }
776 
777  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
778 
779  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
780  avctx->time_base.den > (1 << 16) - 1) {
781  av_log(avctx, AV_LOG_ERROR,
782  "timebase %d/%d not supported by MPEG 4 standard, "
783  "the maximum admitted value for the timebase denominator "
784  "is %d\n", avctx->time_base.num, avctx->time_base.den,
785  (1 << 16) - 1);
786  return AVERROR(EINVAL);
787  }
788  s->time_increment_bits = av_log2(avctx->time_base.den - 1) + 1;
789 
790  switch (avctx->codec->id) {
792  s->out_format = FMT_MPEG1;
793  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
794  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
795  break;
797  s->out_format = FMT_MPEG1;
798  s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
799  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
800  s->rtp_mode = 1;
801  break;
802  case AV_CODEC_ID_MJPEG:
803  case AV_CODEC_ID_AMV:
804  s->out_format = FMT_MJPEG;
805  s->intra_only = 1; /* force intra only for jpeg */
808  if ((ret = ff_mjpeg_encode_init(s)) < 0)
809  return ret;
810  avctx->delay = 0;
811  s->low_delay = 1;
812  break;
813  case AV_CODEC_ID_SPEEDHQ:
814  s->out_format = FMT_SPEEDHQ;
815  s->intra_only = 1; /* force intra only for SHQ */
818  if ((ret = ff_speedhq_encode_init(s)) < 0)
819  return ret;
820  avctx->delay = 0;
821  s->low_delay = 1;
822  break;
823  case AV_CODEC_ID_H261:
824  if (!CONFIG_H261_ENCODER)
826  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
827  av_log(avctx, AV_LOG_ERROR,
828  "The specified picture size of %dx%d is not valid for the "
829  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
830  s->width, s->height);
831  return AVERROR(EINVAL);
832  }
833  s->out_format = FMT_H261;
834  avctx->delay = 0;
835  s->low_delay = 1;
836  s->rtp_mode = 0; /* Sliced encoding not supported */
837  break;
838  case AV_CODEC_ID_H263:
839  if (!CONFIG_H263_ENCODER)
842  s->width, s->height) == 8) {
843  av_log(avctx, AV_LOG_ERROR,
844  "The specified picture size of %dx%d is not valid for "
845  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
846  "352x288, 704x576, and 1408x1152. "
847  "Try H.263+.\n", s->width, s->height);
848  return AVERROR(EINVAL);
849  }
850  s->out_format = FMT_H263;
851  avctx->delay = 0;
852  s->low_delay = 1;
853  break;
854  case AV_CODEC_ID_H263P:
855  s->out_format = FMT_H263;
856  s->h263_plus = 1;
857  /* Fx */
858  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
859  s->modified_quant = s->h263_aic;
860  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
861  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
862 
863  /* /Fx */
864  /* These are just to be sure */
865  avctx->delay = 0;
866  s->low_delay = 1;
867  break;
868  case AV_CODEC_ID_FLV1:
869  s->out_format = FMT_H263;
870  s->h263_flv = 2; /* format = 1; 11-bit codes */
871  s->unrestricted_mv = 1;
872  s->rtp_mode = 0; /* don't allow GOB */
873  avctx->delay = 0;
874  s->low_delay = 1;
875  break;
876  case AV_CODEC_ID_RV10:
877  s->out_format = FMT_H263;
878  avctx->delay = 0;
879  s->low_delay = 1;
880  break;
881  case AV_CODEC_ID_RV20:
882  s->out_format = FMT_H263;
883  avctx->delay = 0;
884  s->low_delay = 1;
885  s->modified_quant = 1;
886  s->h263_aic = 1;
887  s->h263_plus = 1;
888  s->loop_filter = 1;
889  s->unrestricted_mv = 0;
890  break;
891  case AV_CODEC_ID_MPEG4:
892  s->out_format = FMT_H263;
893  s->h263_pred = 1;
894  s->unrestricted_mv = 1;
895  s->low_delay = s->max_b_frames ? 0 : 1;
896  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
897  break;
899  s->out_format = FMT_H263;
900  s->h263_pred = 1;
901  s->unrestricted_mv = 1;
902  s->msmpeg4_version = 2;
903  avctx->delay = 0;
904  s->low_delay = 1;
905  break;
907  s->out_format = FMT_H263;
908  s->h263_pred = 1;
909  s->unrestricted_mv = 1;
910  s->msmpeg4_version = 3;
911  s->flipflop_rounding = 1;
912  avctx->delay = 0;
913  s->low_delay = 1;
914  break;
915  case AV_CODEC_ID_WMV1:
916  s->out_format = FMT_H263;
917  s->h263_pred = 1;
918  s->unrestricted_mv = 1;
919  s->msmpeg4_version = 4;
920  s->flipflop_rounding = 1;
921  avctx->delay = 0;
922  s->low_delay = 1;
923  break;
924  case AV_CODEC_ID_WMV2:
925  s->out_format = FMT_H263;
926  s->h263_pred = 1;
927  s->unrestricted_mv = 1;
928  s->msmpeg4_version = 5;
929  s->flipflop_rounding = 1;
930  avctx->delay = 0;
931  s->low_delay = 1;
932  break;
933  default:
934  return AVERROR(EINVAL);
935  }
936 
937 #if FF_API_PRIVATE_OPT
939  if (avctx->noise_reduction)
940  s->noise_reduction = avctx->noise_reduction;
942 #endif
943 
944  avctx->has_b_frames = !s->low_delay;
945 
946  s->encoding = 1;
947 
948  s->progressive_frame =
949  s->progressive_sequence = !(avctx->flags & (AV_CODEC_FLAG_INTERLACED_DCT |
951  s->alternate_scan);
952 
953  /* init */
955  if ((ret = ff_mpv_common_init(s)) < 0)
956  return ret;
957 
958  ff_fdctdsp_init(&s->fdsp, avctx);
959  ff_me_cmp_init(&s->mecc, avctx);
960  ff_mpegvideoencdsp_init(&s->mpvencdsp, avctx);
961  ff_pixblockdsp_init(&s->pdsp, avctx);
962  ff_qpeldsp_init(&s->qdsp);
963 
964  if (s->msmpeg4_version) {
965  int ac_stats_size = 2 * 2 * (MAX_LEVEL + 1) * (MAX_RUN + 1) * 2 * sizeof(int);
966  if (!(s->ac_stats = av_mallocz(ac_stats_size)))
967  return AVERROR(ENOMEM);
968  }
969 
970  if (!(avctx->stats_out = av_mallocz(256)) ||
971  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix, 32) ||
972  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix, 32) ||
973  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix, 32) ||
974  !FF_ALLOCZ_TYPED_ARRAY(s->q_intra_matrix16, 32) ||
975  !FF_ALLOCZ_TYPED_ARRAY(s->q_chroma_intra_matrix16, 32) ||
976  !FF_ALLOCZ_TYPED_ARRAY(s->q_inter_matrix16, 32) ||
977  !FF_ALLOCZ_TYPED_ARRAY(s->input_picture, MAX_PICTURE_COUNT) ||
978  !FF_ALLOCZ_TYPED_ARRAY(s->reordered_input_picture, MAX_PICTURE_COUNT))
979  return AVERROR(ENOMEM);
980 
981  if (s->noise_reduction) {
982  if (!FF_ALLOCZ_TYPED_ARRAY(s->dct_offset, 2))
983  return AVERROR(ENOMEM);
984  }
985 
987 
988  if ((CONFIG_H263P_ENCODER || CONFIG_RV20_ENCODER) && s->modified_quant)
989  s->chroma_qscale_table = ff_h263_chroma_qscale_table;
990 
991  if (s->slice_context_count > 1) {
992  s->rtp_mode = 1;
993 
994  if (avctx->codec_id == AV_CODEC_ID_H263P)
995  s->h263_slice_structured = 1;
996  }
997 
998  s->quant_precision = 5;
999 
1000 #if FF_API_PRIVATE_OPT
1002  if (avctx->frame_skip_threshold)
1003  s->frame_skip_threshold = avctx->frame_skip_threshold;
1004  if (avctx->frame_skip_factor)
1005  s->frame_skip_factor = avctx->frame_skip_factor;
1006  if (avctx->frame_skip_exp)
1007  s->frame_skip_exp = avctx->frame_skip_exp;
1008  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
1009  s->frame_skip_cmp = avctx->frame_skip_cmp;
1011 #endif
1012 
1013  ff_set_cmp(&s->mecc, s->mecc.ildct_cmp, avctx->ildct_cmp);
1014  ff_set_cmp(&s->mecc, s->mecc.frame_skip_cmp, s->frame_skip_cmp);
1015 
1016  if (CONFIG_H261_ENCODER && s->out_format == FMT_H261)
1018  if (CONFIG_H263_ENCODER && s->out_format == FMT_H263)
1020  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
1023  && s->out_format == FMT_MPEG1)
1025 
1026  /* init q matrix */
1027  for (i = 0; i < 64; i++) {
1028  int j = s->idsp.idct_permutation[i];
1029  if (CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4 &&
1030  s->mpeg_quant) {
1031  s->intra_matrix[j] = ff_mpeg4_default_intra_matrix[i];
1032  s->inter_matrix[j] = ff_mpeg4_default_non_intra_matrix[i];
1033  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1034  s->intra_matrix[j] =
1035  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1036  } else if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
1037  s->intra_matrix[j] =
1038  s->inter_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1039  } else {
1040  /* MPEG-1/2 */
1041  s->chroma_intra_matrix[j] =
1042  s->intra_matrix[j] = ff_mpeg1_default_intra_matrix[i];
1043  s->inter_matrix[j] = ff_mpeg1_default_non_intra_matrix[i];
1044  }
1045  if (avctx->intra_matrix)
1046  s->intra_matrix[j] = avctx->intra_matrix[i];
1047  if (avctx->inter_matrix)
1048  s->inter_matrix[j] = avctx->inter_matrix[i];
1049  }
1050 
1051  /* precompute matrix */
1052  /* for mjpeg, we do include qscale in the matrix */
1053  if (s->out_format != FMT_MJPEG) {
1054  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
1055  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1056  31, 1);
1057  ff_convert_matrix(s, s->q_inter_matrix, s->q_inter_matrix16,
1058  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1059  31, 0);
1060  }
1061 
1062  if ((ret = ff_rate_control_init(s)) < 0)
1063  return ret;
1064 
1065 #if FF_API_PRIVATE_OPT
1067  if (avctx->brd_scale)
1068  s->brd_scale = avctx->brd_scale;
1069 
1070  if (avctx->prediction_method)
1071  s->pred = avctx->prediction_method + 1;
1073 #endif
1074 
1075  if (s->b_frame_strategy == 2) {
1076  for (i = 0; i < s->max_b_frames + 2; i++) {
1077  s->tmp_frames[i] = av_frame_alloc();
1078  if (!s->tmp_frames[i])
1079  return AVERROR(ENOMEM);
1080 
1081  s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
1082  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1083  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1084 
1085  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1086  if (ret < 0)
1087  return ret;
1088  }
1089  }
1090 
1091  cpb_props = ff_add_cpb_side_data(avctx);
1092  if (!cpb_props)
1093  return AVERROR(ENOMEM);
1094  cpb_props->max_bitrate = avctx->rc_max_rate;
1095  cpb_props->min_bitrate = avctx->rc_min_rate;
1096  cpb_props->avg_bitrate = avctx->bit_rate;
1097  cpb_props->buffer_size = avctx->rc_buffer_size;
1098 
1099  return 0;
1100 }
1101 
1103 {
1104  MpegEncContext *s = avctx->priv_data;
1105  int i;
1106 
1108 
1110  if (CONFIG_MJPEG_ENCODER &&
1111  s->out_format == FMT_MJPEG)
1113 
1114  av_freep(&avctx->extradata);
1115 
1116  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1117  av_frame_free(&s->tmp_frames[i]);
1118 
1119  ff_free_picture_tables(&s->new_picture);
1120  ff_mpeg_unref_picture(avctx, &s->new_picture);
1121 
1122  av_freep(&avctx->stats_out);
1123  av_freep(&s->ac_stats);
1124 
1125  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
1126  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
1127  s->q_chroma_intra_matrix= NULL;
1128  s->q_chroma_intra_matrix16= NULL;
1129  av_freep(&s->q_intra_matrix);
1130  av_freep(&s->q_inter_matrix);
1131  av_freep(&s->q_intra_matrix16);
1132  av_freep(&s->q_inter_matrix16);
1133  av_freep(&s->input_picture);
1134  av_freep(&s->reordered_input_picture);
1135  av_freep(&s->dct_offset);
1136 
1137  return 0;
1138 }
1139 
1140 static int get_sae(uint8_t *src, int ref, int stride)
1141 {
1142  int x,y;
1143  int acc = 0;
1144 
1145  for (y = 0; y < 16; y++) {
1146  for (x = 0; x < 16; x++) {
1147  acc += FFABS(src[x + y * stride] - ref);
1148  }
1149  }
1150 
1151  return acc;
1152 }
1153 
1155  uint8_t *ref, int stride)
1156 {
1157  int x, y, w, h;
1158  int acc = 0;
1159 
1160  w = s->width & ~15;
1161  h = s->height & ~15;
1162 
1163  for (y = 0; y < h; y += 16) {
1164  for (x = 0; x < w; x += 16) {
1165  int offset = x + y * stride;
1166  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1167  stride, 16);
1168  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1169  int sae = get_sae(src + offset, mean, stride);
1170 
1171  acc += sae + 500 < sad;
1172  }
1173  }
1174  return acc;
1175 }
1176 
1177 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1178 {
1179  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1180  s->chroma_x_shift, s->chroma_y_shift, s->out_format,
1181  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1182  &s->linesize, &s->uvlinesize);
1183 }
1184 
1185 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1186 {
1187  Picture *pic = NULL;
1188  int64_t pts;
1189  int i, display_picture_number = 0, ret;
1190  int encoding_delay = s->max_b_frames ? s->max_b_frames
1191  : (s->low_delay ? 0 : 1);
1192  int flush_offset = 1;
1193  int direct = 1;
1194 
1195  if (pic_arg) {
1196  pts = pic_arg->pts;
1197  display_picture_number = s->input_picture_number++;
1198 
1199  if (pts != AV_NOPTS_VALUE) {
1200  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1201  int64_t last = s->user_specified_pts;
1202 
1203  if (pts <= last) {
1204  av_log(s->avctx, AV_LOG_ERROR,
1205  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1206  pts, last);
1207  return AVERROR(EINVAL);
1208  }
1209 
1210  if (!s->low_delay && display_picture_number == 1)
1211  s->dts_delta = pts - last;
1212  }
1213  s->user_specified_pts = pts;
1214  } else {
1215  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1216  s->user_specified_pts =
1217  pts = s->user_specified_pts + 1;
1218  av_log(s->avctx, AV_LOG_INFO,
1219  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1220  pts);
1221  } else {
1222  pts = display_picture_number;
1223  }
1224  }
1225 
1226  if (!pic_arg->buf[0] ||
1227  pic_arg->linesize[0] != s->linesize ||
1228  pic_arg->linesize[1] != s->uvlinesize ||
1229  pic_arg->linesize[2] != s->uvlinesize)
1230  direct = 0;
1231  if ((s->width & 15) || (s->height & 15))
1232  direct = 0;
1233  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1234  direct = 0;
1235  if (s->linesize & (STRIDE_ALIGN-1))
1236  direct = 0;
1237 
1238  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1239  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1240 
1241  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1242  if (i < 0)
1243  return i;
1244 
1245  pic = &s->picture[i];
1246  pic->reference = 3;
1247 
1248  if (direct) {
1249  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1250  return ret;
1251  }
1252  ret = alloc_picture(s, pic, direct);
1253  if (ret < 0)
1254  return ret;
1255 
1256  if (!direct) {
1257  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1258  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1259  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1260  // empty
1261  } else {
1262  int h_chroma_shift, v_chroma_shift;
1263  av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
1264  &h_chroma_shift,
1265  &v_chroma_shift);
1266 
1267  for (i = 0; i < 3; i++) {
1268  ptrdiff_t src_stride = pic_arg->linesize[i];
1269  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1270  int h_shift = i ? h_chroma_shift : 0;
1271  int v_shift = i ? v_chroma_shift : 0;
1272  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1273  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1274  uint8_t *src = pic_arg->data[i];
1275  uint8_t *dst = pic->f->data[i];
1276  int vpad = 16;
1277 
1278  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1279  && !s->progressive_sequence
1280  && FFALIGN(s->height, 32) - s->height > 16)
1281  vpad = 32;
1282 
1283  if (!s->avctx->rc_buffer_size)
1284  dst += INPLACE_OFFSET;
1285 
1286  if (src_stride == dst_stride)
1287  memcpy(dst, src, src_stride * h - src_stride + w);
1288  else {
1289  int h2 = h;
1290  uint8_t *dst2 = dst;
1291  while (h2--) {
1292  memcpy(dst2, src, w);
1293  dst2 += dst_stride;
1294  src += src_stride;
1295  }
1296  }
1297  if ((s->width & 15) || (s->height & (vpad-1))) {
1298  s->mpvencdsp.draw_edges(dst, dst_stride,
1299  w, h,
1300  16 >> h_shift,
1301  vpad >> v_shift,
1302  EDGE_BOTTOM);
1303  }
1304  }
1305  emms_c();
1306  }
1307  }
1308  ret = av_frame_copy_props(pic->f, pic_arg);
1309  if (ret < 0)
1310  return ret;
1311 
1312  pic->f->display_picture_number = display_picture_number;
1313  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1314  } else {
1315  /* Flushing: When we have not received enough input frames,
1316  * ensure s->input_picture[0] contains the first picture */
1317  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1318  if (s->input_picture[flush_offset])
1319  break;
1320 
1321  if (flush_offset <= 1)
1322  flush_offset = 1;
1323  else
1324  encoding_delay = encoding_delay - flush_offset + 1;
1325  }
1326 
1327  /* shift buffer entries */
1328  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1329  s->input_picture[i - flush_offset] = s->input_picture[i];
1330  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1331  s->input_picture[i] = NULL;
1332 
1333  s->input_picture[encoding_delay] = (Picture*) pic;
1334 
1335  return 0;
1336 }
1337 
1339 {
1340  int x, y, plane;
1341  int score = 0;
1342  int64_t score64 = 0;
1343 
1344  for (plane = 0; plane < 3; plane++) {
1345  const int stride = p->f->linesize[plane];
1346  const int bw = plane ? 1 : 2;
1347  for (y = 0; y < s->mb_height * bw; y++) {
1348  for (x = 0; x < s->mb_width * bw; x++) {
1349  int off = p->shared ? 0 : 16;
1350  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1351  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1352  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1353 
1354  switch (FFABS(s->frame_skip_exp)) {
1355  case 0: score = FFMAX(score, v); break;
1356  case 1: score += FFABS(v); break;
1357  case 2: score64 += v * (int64_t)v; break;
1358  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1359  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1360  }
1361  }
1362  }
1363  }
1364  emms_c();
1365 
1366  if (score)
1367  score64 = score;
1368  if (s->frame_skip_exp < 0)
1369  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1370  -1.0/s->frame_skip_exp);
1371 
1372  if (score64 < s->frame_skip_threshold)
1373  return 1;
1374  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1375  return 1;
1376  return 0;
1377 }
1378 
1380 {
1381  int ret;
1382  int size = 0;
1383 
1384  ret = avcodec_send_frame(c, frame);
1385  if (ret < 0)
1386  return ret;
1387 
1388  do {
1389  ret = avcodec_receive_packet(c, pkt);
1390  if (ret >= 0) {
1391  size += pkt->size;
1393  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1394  return ret;
1395  } while (ret >= 0);
1396 
1397  return size;
1398 }
1399 
1401 {
1402  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1403  AVPacket *pkt;
1404  const int scale = s->brd_scale;
1405  int width = s->width >> scale;
1406  int height = s->height >> scale;
1407  int i, j, out_size, p_lambda, b_lambda, lambda2;
1408  int64_t best_rd = INT64_MAX;
1409  int best_b_count = -1;
1410  int ret = 0;
1411 
1412  av_assert0(scale >= 0 && scale <= 3);
1413 
1414  pkt = av_packet_alloc();
1415  if (!pkt)
1416  return AVERROR(ENOMEM);
1417 
1418  //emms_c();
1419  //s->next_picture_ptr->quality;
1420  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1421  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1422  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1423  if (!b_lambda) // FIXME we should do this somewhere else
1424  b_lambda = p_lambda;
1425  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1427 
1428  for (i = 0; i < s->max_b_frames + 2; i++) {
1429  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1430  s->next_picture_ptr;
1431  uint8_t *data[4];
1432 
1433  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1434  pre_input = *pre_input_ptr;
1435  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1436 
1437  if (!pre_input.shared && i) {
1438  data[0] += INPLACE_OFFSET;
1439  data[1] += INPLACE_OFFSET;
1440  data[2] += INPLACE_OFFSET;
1441  }
1442 
1443  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1444  s->tmp_frames[i]->linesize[0],
1445  data[0],
1446  pre_input.f->linesize[0],
1447  width, height);
1448  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1449  s->tmp_frames[i]->linesize[1],
1450  data[1],
1451  pre_input.f->linesize[1],
1452  width >> 1, height >> 1);
1453  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1454  s->tmp_frames[i]->linesize[2],
1455  data[2],
1456  pre_input.f->linesize[2],
1457  width >> 1, height >> 1);
1458  }
1459  }
1460 
1461  for (j = 0; j < s->max_b_frames + 1; j++) {
1462  AVCodecContext *c;
1463  int64_t rd = 0;
1464 
1465  if (!s->input_picture[j])
1466  break;
1467 
1469  if (!c) {
1470  ret = AVERROR(ENOMEM);
1471  goto fail;
1472  }
1473 
1474  c->width = width;
1475  c->height = height;
1477  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1478  c->mb_decision = s->avctx->mb_decision;
1479  c->me_cmp = s->avctx->me_cmp;
1480  c->mb_cmp = s->avctx->mb_cmp;
1481  c->me_sub_cmp = s->avctx->me_sub_cmp;
1482  c->pix_fmt = AV_PIX_FMT_YUV420P;
1483  c->time_base = s->avctx->time_base;
1484  c->max_b_frames = s->max_b_frames;
1485 
1486  ret = avcodec_open2(c, codec, NULL);
1487  if (ret < 0)
1488  goto fail;
1489 
1490 
1491  s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
1492  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1493 
1494  out_size = encode_frame(c, s->tmp_frames[0], pkt);
1495  if (out_size < 0) {
1496  ret = out_size;
1497  goto fail;
1498  }
1499 
1500  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1501 
1502  for (i = 0; i < s->max_b_frames + 1; i++) {
1503  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1504 
1505  s->tmp_frames[i + 1]->pict_type = is_p ?
1507  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1508 
1509  out_size = encode_frame(c, s->tmp_frames[i + 1], pkt);
1510  if (out_size < 0) {
1511  ret = out_size;
1512  goto fail;
1513  }
1514 
1515  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1516  }
1517 
1518  /* get the delayed frames */
1520  if (out_size < 0) {
1521  ret = out_size;
1522  goto fail;
1523  }
1524  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1525 
1526  rd += c->error[0] + c->error[1] + c->error[2];
1527 
1528  if (rd < best_rd) {
1529  best_rd = rd;
1530  best_b_count = j;
1531  }
1532 
1533 fail:
1536  if (ret < 0) {
1537  best_b_count = ret;
1538  break;
1539  }
1540  }
1541 
1542  av_packet_free(&pkt);
1543 
1544  return best_b_count;
1545 }
1546 
1548 {
1549  int i, ret;
1550 
1551  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1552  s->reordered_input_picture[i - 1] = s->reordered_input_picture[i];
1553  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1554 
1555  /* set next picture type & ordering */
1556  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1557  if (s->frame_skip_threshold || s->frame_skip_factor) {
1558  if (s->picture_in_gop_number < s->gop_size &&
1559  s->next_picture_ptr &&
1560  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1561  // FIXME check that the gop check above is +-1 correct
1562  av_frame_unref(s->input_picture[0]->f);
1563 
1564  ff_vbv_update(s, 0);
1565 
1566  goto no_output_pic;
1567  }
1568  }
1569 
1570  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1571  !s->next_picture_ptr || s->intra_only) {
1572  s->reordered_input_picture[0] = s->input_picture[0];
1573  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_I;
1574  s->reordered_input_picture[0]->f->coded_picture_number =
1575  s->coded_picture_number++;
1576  } else {
1577  int b_frames = 0;
1578 
1579  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1580  for (i = 0; i < s->max_b_frames + 1; i++) {
1581  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1582 
1583  if (pict_num >= s->rc_context.num_entries)
1584  break;
1585  if (!s->input_picture[i]) {
1586  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1587  break;
1588  }
1589 
1590  s->input_picture[i]->f->pict_type =
1591  s->rc_context.entry[pict_num].new_pict_type;
1592  }
1593  }
1594 
1595  if (s->b_frame_strategy == 0) {
1596  b_frames = s->max_b_frames;
1597  while (b_frames && !s->input_picture[b_frames])
1598  b_frames--;
1599  } else if (s->b_frame_strategy == 1) {
1600  for (i = 1; i < s->max_b_frames + 1; i++) {
1601  if (s->input_picture[i] &&
1602  s->input_picture[i]->b_frame_score == 0) {
1603  s->input_picture[i]->b_frame_score =
1605  s->input_picture[i ]->f->data[0],
1606  s->input_picture[i - 1]->f->data[0],
1607  s->linesize) + 1;
1608  }
1609  }
1610  for (i = 0; i < s->max_b_frames + 1; i++) {
1611  if (!s->input_picture[i] ||
1612  s->input_picture[i]->b_frame_score - 1 >
1613  s->mb_num / s->b_sensitivity)
1614  break;
1615  }
1616 
1617  b_frames = FFMAX(0, i - 1);
1618 
1619  /* reset scores */
1620  for (i = 0; i < b_frames + 1; i++) {
1621  s->input_picture[i]->b_frame_score = 0;
1622  }
1623  } else if (s->b_frame_strategy == 2) {
1624  b_frames = estimate_best_b_count(s);
1625  if (b_frames < 0)
1626  return b_frames;
1627  }
1628 
1629  emms_c();
1630 
1631  for (i = b_frames - 1; i >= 0; i--) {
1632  int type = s->input_picture[i]->f->pict_type;
1633  if (type && type != AV_PICTURE_TYPE_B)
1634  b_frames = i;
1635  }
1636  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1637  b_frames == s->max_b_frames) {
1638  av_log(s->avctx, AV_LOG_ERROR,
1639  "warning, too many B-frames in a row\n");
1640  }
1641 
1642  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1643  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1644  s->gop_size > s->picture_in_gop_number) {
1645  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1646  } else {
1647  if (s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP)
1648  b_frames = 0;
1649  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1650  }
1651  }
1652 
1653  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1654  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1655  b_frames--;
1656 
1657  s->reordered_input_picture[0] = s->input_picture[b_frames];
1658  if (s->reordered_input_picture[0]->f->pict_type != AV_PICTURE_TYPE_I)
1659  s->reordered_input_picture[0]->f->pict_type = AV_PICTURE_TYPE_P;
1660  s->reordered_input_picture[0]->f->coded_picture_number =
1661  s->coded_picture_number++;
1662  for (i = 0; i < b_frames; i++) {
1663  s->reordered_input_picture[i + 1] = s->input_picture[i];
1664  s->reordered_input_picture[i + 1]->f->pict_type =
1666  s->reordered_input_picture[i + 1]->f->coded_picture_number =
1667  s->coded_picture_number++;
1668  }
1669  }
1670  }
1671 no_output_pic:
1672  ff_mpeg_unref_picture(s->avctx, &s->new_picture);
1673 
1674  if (s->reordered_input_picture[0]) {
1675  s->reordered_input_picture[0]->reference =
1676  s->reordered_input_picture[0]->f->pict_type !=
1677  AV_PICTURE_TYPE_B ? 3 : 0;
1678 
1679  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1680  return ret;
1681 
1682  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1683  // input is a shared pix, so we can't modify it -> allocate a new
1684  // one & ensure that the shared one is reuseable
1685 
1686  Picture *pic;
1687  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1688  if (i < 0)
1689  return i;
1690  pic = &s->picture[i];
1691 
1692  pic->reference = s->reordered_input_picture[0]->reference;
1693  if (alloc_picture(s, pic, 0) < 0) {
1694  return -1;
1695  }
1696 
1697  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1698  if (ret < 0)
1699  return ret;
1700 
1701  /* mark us unused / free shared pic */
1702  av_frame_unref(s->reordered_input_picture[0]->f);
1703  s->reordered_input_picture[0]->shared = 0;
1704 
1705  s->current_picture_ptr = pic;
1706  } else {
1707  // input is not a shared pix -> reuse buffer for current_pix
1708  s->current_picture_ptr = s->reordered_input_picture[0];
1709  for (i = 0; i < 4; i++) {
1710  if (s->new_picture.f->data[i])
1711  s->new_picture.f->data[i] += INPLACE_OFFSET;
1712  }
1713  }
1714  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1715  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1716  s->current_picture_ptr)) < 0)
1717  return ret;
1718 
1719  s->picture_number = s->new_picture.f->display_picture_number;
1720  }
1721  return 0;
1722 }
1723 
1725 {
1726  if (s->unrestricted_mv &&
1727  s->current_picture.reference &&
1728  !s->intra_only) {
1729  const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(s->avctx->pix_fmt);
1730  int hshift = desc->log2_chroma_w;
1731  int vshift = desc->log2_chroma_h;
1732  s->mpvencdsp.draw_edges(s->current_picture.f->data[0],
1733  s->current_picture.f->linesize[0],
1734  s->h_edge_pos, s->v_edge_pos,
1736  EDGE_TOP | EDGE_BOTTOM);
1737  s->mpvencdsp.draw_edges(s->current_picture.f->data[1],
1738  s->current_picture.f->linesize[1],
1739  s->h_edge_pos >> hshift,
1740  s->v_edge_pos >> vshift,
1741  EDGE_WIDTH >> hshift,
1742  EDGE_WIDTH >> vshift,
1743  EDGE_TOP | EDGE_BOTTOM);
1744  s->mpvencdsp.draw_edges(s->current_picture.f->data[2],
1745  s->current_picture.f->linesize[2],
1746  s->h_edge_pos >> hshift,
1747  s->v_edge_pos >> vshift,
1748  EDGE_WIDTH >> hshift,
1749  EDGE_WIDTH >> vshift,
1750  EDGE_TOP | EDGE_BOTTOM);
1751  }
1752 
1753  emms_c();
1754 
1755  s->last_pict_type = s->pict_type;
1756  s->last_lambda_for [s->pict_type] = s->current_picture_ptr->f->quality;
1757  if (s->pict_type!= AV_PICTURE_TYPE_B)
1758  s->last_non_b_pict_type = s->pict_type;
1759 
1760 #if FF_API_CODED_FRAME
1762  av_frame_unref(s->avctx->coded_frame);
1763  av_frame_copy_props(s->avctx->coded_frame, s->current_picture.f);
1765 #endif
1766 #if FF_API_ERROR_FRAME
1768  memcpy(s->current_picture.f->error, s->current_picture.encoding_error,
1769  sizeof(s->current_picture.encoding_error));
1771 #endif
1772 }
1773 
1775 {
1776  int intra, i;
1777 
1778  for (intra = 0; intra < 2; intra++) {
1779  if (s->dct_count[intra] > (1 << 16)) {
1780  for (i = 0; i < 64; i++) {
1781  s->dct_error_sum[intra][i] >>= 1;
1782  }
1783  s->dct_count[intra] >>= 1;
1784  }
1785 
1786  for (i = 0; i < 64; i++) {
1787  s->dct_offset[intra][i] = (s->noise_reduction *
1788  s->dct_count[intra] +
1789  s->dct_error_sum[intra][i] / 2) /
1790  (s->dct_error_sum[intra][i] + 1);
1791  }
1792  }
1793 }
1794 
1796 {
1797  int ret;
1798 
1799  /* mark & release old frames */
1800  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1801  s->last_picture_ptr != s->next_picture_ptr &&
1802  s->last_picture_ptr->f->buf[0]) {
1803  ff_mpeg_unref_picture(s->avctx, s->last_picture_ptr);
1804  }
1805 
1806  s->current_picture_ptr->f->pict_type = s->pict_type;
1807  s->current_picture_ptr->f->key_frame = s->pict_type == AV_PICTURE_TYPE_I;
1808 
1809  ff_mpeg_unref_picture(s->avctx, &s->current_picture);
1810  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1811  s->current_picture_ptr)) < 0)
1812  return ret;
1813 
1814  if (s->pict_type != AV_PICTURE_TYPE_B) {
1815  s->last_picture_ptr = s->next_picture_ptr;
1816  if (!s->droppable)
1817  s->next_picture_ptr = s->current_picture_ptr;
1818  }
1819 
1820  if (s->last_picture_ptr) {
1821  ff_mpeg_unref_picture(s->avctx, &s->last_picture);
1822  if (s->last_picture_ptr->f->buf[0] &&
1823  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1824  s->last_picture_ptr)) < 0)
1825  return ret;
1826  }
1827  if (s->next_picture_ptr) {
1828  ff_mpeg_unref_picture(s->avctx, &s->next_picture);
1829  if (s->next_picture_ptr->f->buf[0] &&
1830  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1831  s->next_picture_ptr)) < 0)
1832  return ret;
1833  }
1834 
1835  if (s->picture_structure!= PICT_FRAME) {
1836  int i;
1837  for (i = 0; i < 4; i++) {
1838  if (s->picture_structure == PICT_BOTTOM_FIELD) {
1839  s->current_picture.f->data[i] +=
1840  s->current_picture.f->linesize[i];
1841  }
1842  s->current_picture.f->linesize[i] *= 2;
1843  s->last_picture.f->linesize[i] *= 2;
1844  s->next_picture.f->linesize[i] *= 2;
1845  }
1846  }
1847 
1848  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1849  s->dct_unquantize_intra = s->dct_unquantize_mpeg2_intra;
1850  s->dct_unquantize_inter = s->dct_unquantize_mpeg2_inter;
1851  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1852  s->dct_unquantize_intra = s->dct_unquantize_h263_intra;
1853  s->dct_unquantize_inter = s->dct_unquantize_h263_inter;
1854  } else {
1855  s->dct_unquantize_intra = s->dct_unquantize_mpeg1_intra;
1856  s->dct_unquantize_inter = s->dct_unquantize_mpeg1_inter;
1857  }
1858 
1859  if (s->dct_error_sum) {
1860  av_assert2(s->noise_reduction && s->encoding);
1862  }
1863 
1864  return 0;
1865 }
1866 
1868  const AVFrame *pic_arg, int *got_packet)
1869 {
1870  MpegEncContext *s = avctx->priv_data;
1871  int i, stuffing_count, ret;
1872  int context_count = s->slice_context_count;
1873 
1874  s->vbv_ignore_qmax = 0;
1875 
1876  s->picture_in_gop_number++;
1877 
1878  if (load_input_picture(s, pic_arg) < 0)
1879  return -1;
1880 
1881  if (select_input_picture(s) < 0) {
1882  return -1;
1883  }
1884 
1885  /* output? */
1886  if (s->new_picture.f->data[0]) {
1887  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1888  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1889  :
1890  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1891  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1892  return ret;
1893  if (s->mb_info) {
1894  s->mb_info_ptr = av_packet_new_side_data(pkt,
1896  s->mb_width*s->mb_height*12);
1897  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1898  }
1899 
1900  for (i = 0; i < context_count; i++) {
1901  int start_y = s->thread_context[i]->start_mb_y;
1902  int end_y = s->thread_context[i]-> end_mb_y;
1903  int h = s->mb_height;
1904  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1905  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1906 
1907  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1908  }
1909 
1910  s->pict_type = s->new_picture.f->pict_type;
1911  //emms_c();
1912  ret = frame_start(s);
1913  if (ret < 0)
1914  return ret;
1915 vbv_retry:
1916  ret = encode_picture(s, s->picture_number);
1917  if (growing_buffer) {
1918  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1919  pkt->data = s->pb.buf;
1920  pkt->size = avctx->internal->byte_buffer_size;
1921  }
1922  if (ret < 0)
1923  return -1;
1924 
1925 #if FF_API_STAT_BITS
1927  avctx->header_bits = s->header_bits;
1928  avctx->mv_bits = s->mv_bits;
1929  avctx->misc_bits = s->misc_bits;
1930  avctx->i_tex_bits = s->i_tex_bits;
1931  avctx->p_tex_bits = s->p_tex_bits;
1932  avctx->i_count = s->i_count;
1933  // FIXME f/b_count in avctx
1934  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1935  avctx->skip_count = s->skip_count;
1937 #endif
1938 
1939  frame_end(s);
1940 
1941  if (CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG)
1942  ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits);
1943 
1944  if (avctx->rc_buffer_size) {
1945  RateControlContext *rcc = &s->rc_context;
1946  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1947  int hq = (avctx->mb_decision == FF_MB_DECISION_RD || avctx->trellis);
1948  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1949 
1950  if (put_bits_count(&s->pb) > max_size &&
1951  s->lambda < s->lmax) {
1952  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1953  (s->qscale + 1) / s->qscale);
1954  if (s->adaptive_quant) {
1955  int i;
1956  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1957  s->lambda_table[i] =
1958  FFMAX(s->lambda_table[i] + min_step,
1959  s->lambda_table[i] * (s->qscale + 1) /
1960  s->qscale);
1961  }
1962  s->mb_skipped = 0; // done in frame_start()
1963  // done in encode_picture() so we must undo it
1964  if (s->pict_type == AV_PICTURE_TYPE_P) {
1965  if (s->flipflop_rounding ||
1966  s->codec_id == AV_CODEC_ID_H263P ||
1967  s->codec_id == AV_CODEC_ID_MPEG4)
1968  s->no_rounding ^= 1;
1969  }
1970  if (s->pict_type != AV_PICTURE_TYPE_B) {
1971  s->time_base = s->last_time_base;
1972  s->last_non_b_time = s->time - s->pp_time;
1973  }
1974  for (i = 0; i < context_count; i++) {
1975  PutBitContext *pb = &s->thread_context[i]->pb;
1976  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1977  }
1978  s->vbv_ignore_qmax = 1;
1979  av_log(avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1980  goto vbv_retry;
1981  }
1982 
1983  av_assert0(avctx->rc_max_rate);
1984  }
1985 
1986  if (avctx->flags & AV_CODEC_FLAG_PASS1)
1988 
1989  for (i = 0; i < 4; i++) {
1990  s->current_picture_ptr->encoding_error[i] = s->current_picture.encoding_error[i];
1991  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1992  }
1993  ff_side_data_set_encoder_stats(pkt, s->current_picture.f->quality,
1994  s->current_picture_ptr->encoding_error,
1995  (avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1996  s->pict_type);
1997 
1998  if (avctx->flags & AV_CODEC_FLAG_PASS1)
1999  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
2000  s->misc_bits + s->i_tex_bits +
2001  s->p_tex_bits);
2002  flush_put_bits(&s->pb);
2003  s->frame_bits = put_bits_count(&s->pb);
2004 
2005  stuffing_count = ff_vbv_update(s, s->frame_bits);
2006  s->stuffing_bits = 8*stuffing_count;
2007  if (stuffing_count) {
2008  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
2009  stuffing_count + 50) {
2010  av_log(avctx, AV_LOG_ERROR, "stuffing too large\n");
2011  return -1;
2012  }
2013 
2014  switch (s->codec_id) {
2017  while (stuffing_count--) {
2018  put_bits(&s->pb, 8, 0);
2019  }
2020  break;
2021  case AV_CODEC_ID_MPEG4:
2022  put_bits(&s->pb, 16, 0);
2023  put_bits(&s->pb, 16, 0x1C3);
2024  stuffing_count -= 4;
2025  while (stuffing_count--) {
2026  put_bits(&s->pb, 8, 0xFF);
2027  }
2028  break;
2029  default:
2030  av_log(avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
2031  s->stuffing_bits = 0;
2032  }
2033  flush_put_bits(&s->pb);
2034  s->frame_bits = put_bits_count(&s->pb);
2035  }
2036 
2037  /* update MPEG-1/2 vbv_delay for CBR */
2038  if (avctx->rc_max_rate &&
2039  avctx->rc_min_rate == avctx->rc_max_rate &&
2040  s->out_format == FMT_MPEG1 &&
2041  90000LL * (avctx->rc_buffer_size - 1) <=
2042  avctx->rc_max_rate * 0xFFFFLL) {
2043  AVCPBProperties *props;
2044  size_t props_size;
2045 
2046  int vbv_delay, min_delay;
2047  double inbits = avctx->rc_max_rate *
2048  av_q2d(avctx->time_base);
2049  int minbits = s->frame_bits - 8 *
2050  (s->vbv_delay_ptr - s->pb.buf - 1);
2051  double bits = s->rc_context.buffer_index + minbits - inbits;
2052 
2053  if (bits < 0)
2054  av_log(avctx, AV_LOG_ERROR,
2055  "Internal error, negative bits\n");
2056 
2057  av_assert1(s->repeat_first_field == 0);
2058 
2059  vbv_delay = bits * 90000 / avctx->rc_max_rate;
2060  min_delay = (minbits * 90000LL + avctx->rc_max_rate - 1) /
2061  avctx->rc_max_rate;
2062 
2063  vbv_delay = FFMAX(vbv_delay, min_delay);
2064 
2065  av_assert0(vbv_delay < 0xFFFF);
2066 
2067  s->vbv_delay_ptr[0] &= 0xF8;
2068  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2069  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2070  s->vbv_delay_ptr[2] &= 0x07;
2071  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2072 
2073  props = av_cpb_properties_alloc(&props_size);
2074  if (!props)
2075  return AVERROR(ENOMEM);
2076  props->vbv_delay = vbv_delay * 300;
2077 
2079  (uint8_t*)props, props_size);
2080  if (ret < 0) {
2081  av_freep(&props);
2082  return ret;
2083  }
2084 
2085 #if FF_API_VBV_DELAY
2087  avctx->vbv_delay = vbv_delay * 300;
2089 #endif
2090  }
2091  s->total_bits += s->frame_bits;
2092 #if FF_API_STAT_BITS
2094  avctx->frame_bits = s->frame_bits;
2096 #endif
2097 
2098 
2099  pkt->pts = s->current_picture.f->pts;
2100  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2101  if (!s->current_picture.f->coded_picture_number)
2102  pkt->dts = pkt->pts - s->dts_delta;
2103  else
2104  pkt->dts = s->reordered_pts;
2105  s->reordered_pts = pkt->pts;
2106  } else
2107  pkt->dts = pkt->pts;
2108  if (s->current_picture.f->key_frame)
2110  if (s->mb_info)
2112  } else {
2113  s->frame_bits = 0;
2114  }
2115 
2116  /* release non-reference frames */
2117  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2118  if (!s->picture[i].reference)
2119  ff_mpeg_unref_picture(avctx, &s->picture[i]);
2120  }
2121 
2122  av_assert1((s->frame_bits & 7) == 0);
2123 
2124  pkt->size = s->frame_bits / 8;
2125  *got_packet = !!pkt->size;
2126  return 0;
2127 }
2128 
2130  int n, int threshold)
2131 {
2132  static const char tab[64] = {
2133  3, 2, 2, 1, 1, 1, 1, 1,
2134  1, 1, 1, 1, 1, 1, 1, 1,
2135  1, 1, 1, 1, 1, 1, 1, 1,
2136  0, 0, 0, 0, 0, 0, 0, 0,
2137  0, 0, 0, 0, 0, 0, 0, 0,
2138  0, 0, 0, 0, 0, 0, 0, 0,
2139  0, 0, 0, 0, 0, 0, 0, 0,
2140  0, 0, 0, 0, 0, 0, 0, 0
2141  };
2142  int score = 0;
2143  int run = 0;
2144  int i;
2145  int16_t *block = s->block[n];
2146  const int last_index = s->block_last_index[n];
2147  int skip_dc;
2148 
2149  if (threshold < 0) {
2150  skip_dc = 0;
2151  threshold = -threshold;
2152  } else
2153  skip_dc = 1;
2154 
2155  /* Are all we could set to zero already zero? */
2156  if (last_index <= skip_dc - 1)
2157  return;
2158 
2159  for (i = 0; i <= last_index; i++) {
2160  const int j = s->intra_scantable.permutated[i];
2161  const int level = FFABS(block[j]);
2162  if (level == 1) {
2163  if (skip_dc && i == 0)
2164  continue;
2165  score += tab[run];
2166  run = 0;
2167  } else if (level > 1) {
2168  return;
2169  } else {
2170  run++;
2171  }
2172  }
2173  if (score >= threshold)
2174  return;
2175  for (i = skip_dc; i <= last_index; i++) {
2176  const int j = s->intra_scantable.permutated[i];
2177  block[j] = 0;
2178  }
2179  if (block[0])
2180  s->block_last_index[n] = 0;
2181  else
2182  s->block_last_index[n] = -1;
2183 }
2184 
2185 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2186  int last_index)
2187 {
2188  int i;
2189  const int maxlevel = s->max_qcoeff;
2190  const int minlevel = s->min_qcoeff;
2191  int overflow = 0;
2192 
2193  if (s->mb_intra) {
2194  i = 1; // skip clipping of intra dc
2195  } else
2196  i = 0;
2197 
2198  for (; i <= last_index; i++) {
2199  const int j = s->intra_scantable.permutated[i];
2200  int level = block[j];
2201 
2202  if (level > maxlevel) {
2203  level = maxlevel;
2204  overflow++;
2205  } else if (level < minlevel) {
2206  level = minlevel;
2207  overflow++;
2208  }
2209 
2210  block[j] = level;
2211  }
2212 
2213  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2214  av_log(s->avctx, AV_LOG_INFO,
2215  "warning, clipping %d dct coefficients to %d..%d\n",
2216  overflow, minlevel, maxlevel);
2217 }
2218 
2219 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2220 {
2221  int x, y;
2222  // FIXME optimize
2223  for (y = 0; y < 8; y++) {
2224  for (x = 0; x < 8; x++) {
2225  int x2, y2;
2226  int sum = 0;
2227  int sqr = 0;
2228  int count = 0;
2229 
2230  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2231  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2232  int v = ptr[x2 + y2 * stride];
2233  sum += v;
2234  sqr += v * v;
2235  count++;
2236  }
2237  }
2238  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2239  }
2240  }
2241 }
2242 
2244  int motion_x, int motion_y,
2245  int mb_block_height,
2246  int mb_block_width,
2247  int mb_block_count)
2248 {
2249  int16_t weight[12][64];
2250  int16_t orig[12][64];
2251  const int mb_x = s->mb_x;
2252  const int mb_y = s->mb_y;
2253  int i;
2254  int skip_dct[12];
2255  int dct_offset = s->linesize * 8; // default for progressive frames
2256  int uv_dct_offset = s->uvlinesize * 8;
2257  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2258  ptrdiff_t wrap_y, wrap_c;
2259 
2260  for (i = 0; i < mb_block_count; i++)
2261  skip_dct[i] = s->skipdct;
2262 
2263  if (s->adaptive_quant) {
2264  const int last_qp = s->qscale;
2265  const int mb_xy = mb_x + mb_y * s->mb_stride;
2266 
2267  s->lambda = s->lambda_table[mb_xy];
2268  update_qscale(s);
2269 
2270  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2271  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2272  s->dquant = s->qscale - last_qp;
2273 
2274  if (s->out_format == FMT_H263) {
2275  s->dquant = av_clip(s->dquant, -2, 2);
2276 
2277  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2278  if (!s->mb_intra) {
2279  if (s->pict_type == AV_PICTURE_TYPE_B) {
2280  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2281  s->dquant = 0;
2282  }
2283  if (s->mv_type == MV_TYPE_8X8)
2284  s->dquant = 0;
2285  }
2286  }
2287  }
2288  }
2289  ff_set_qscale(s, last_qp + s->dquant);
2290  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2291  ff_set_qscale(s, s->qscale + s->dquant);
2292 
2293  wrap_y = s->linesize;
2294  wrap_c = s->uvlinesize;
2295  ptr_y = s->new_picture.f->data[0] +
2296  (mb_y * 16 * wrap_y) + mb_x * 16;
2297  ptr_cb = s->new_picture.f->data[1] +
2298  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2299  ptr_cr = s->new_picture.f->data[2] +
2300  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2301 
2302  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2303  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2304  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2305  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2306  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2307  wrap_y, wrap_y,
2308  16, 16, mb_x * 16, mb_y * 16,
2309  s->width, s->height);
2310  ptr_y = ebuf;
2311  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2312  wrap_c, wrap_c,
2313  mb_block_width, mb_block_height,
2314  mb_x * mb_block_width, mb_y * mb_block_height,
2315  cw, ch);
2316  ptr_cb = ebuf + 16 * wrap_y;
2317  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2318  wrap_c, wrap_c,
2319  mb_block_width, mb_block_height,
2320  mb_x * mb_block_width, mb_y * mb_block_height,
2321  cw, ch);
2322  ptr_cr = ebuf + 16 * wrap_y + 16;
2323  }
2324 
2325  if (s->mb_intra) {
2326  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2327  int progressive_score, interlaced_score;
2328 
2329  s->interlaced_dct = 0;
2330  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2331  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2332  NULL, wrap_y, 8) - 400;
2333 
2334  if (progressive_score > 0) {
2335  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2336  NULL, wrap_y * 2, 8) +
2337  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2338  NULL, wrap_y * 2, 8);
2339  if (progressive_score > interlaced_score) {
2340  s->interlaced_dct = 1;
2341 
2342  dct_offset = wrap_y;
2343  uv_dct_offset = wrap_c;
2344  wrap_y <<= 1;
2345  if (s->chroma_format == CHROMA_422 ||
2346  s->chroma_format == CHROMA_444)
2347  wrap_c <<= 1;
2348  }
2349  }
2350  }
2351 
2352  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2353  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2354  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2355  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2356 
2357  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2358  skip_dct[4] = 1;
2359  skip_dct[5] = 1;
2360  } else {
2361  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2362  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2363  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2364  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2365  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2366  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2367  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2368  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2369  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2370  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2371  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2372  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2373  }
2374  }
2375  } else {
2376  op_pixels_func (*op_pix)[4];
2377  qpel_mc_func (*op_qpix)[16];
2378  uint8_t *dest_y, *dest_cb, *dest_cr;
2379 
2380  dest_y = s->dest[0];
2381  dest_cb = s->dest[1];
2382  dest_cr = s->dest[2];
2383 
2384  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2385  op_pix = s->hdsp.put_pixels_tab;
2386  op_qpix = s->qdsp.put_qpel_pixels_tab;
2387  } else {
2388  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2389  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2390  }
2391 
2392  if (s->mv_dir & MV_DIR_FORWARD) {
2393  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2394  s->last_picture.f->data,
2395  op_pix, op_qpix);
2396  op_pix = s->hdsp.avg_pixels_tab;
2397  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2398  }
2399  if (s->mv_dir & MV_DIR_BACKWARD) {
2400  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2401  s->next_picture.f->data,
2402  op_pix, op_qpix);
2403  }
2404 
2405  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
2406  int progressive_score, interlaced_score;
2407 
2408  s->interlaced_dct = 0;
2409  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2410  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2411  ptr_y + wrap_y * 8,
2412  wrap_y, 8) - 400;
2413 
2414  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2415  progressive_score -= 400;
2416 
2417  if (progressive_score > 0) {
2418  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2419  wrap_y * 2, 8) +
2420  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2421  ptr_y + wrap_y,
2422  wrap_y * 2, 8);
2423 
2424  if (progressive_score > interlaced_score) {
2425  s->interlaced_dct = 1;
2426 
2427  dct_offset = wrap_y;
2428  uv_dct_offset = wrap_c;
2429  wrap_y <<= 1;
2430  if (s->chroma_format == CHROMA_422)
2431  wrap_c <<= 1;
2432  }
2433  }
2434  }
2435 
2436  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2437  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2438  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2439  dest_y + dct_offset, wrap_y);
2440  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2441  dest_y + dct_offset + 8, wrap_y);
2442 
2443  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2444  skip_dct[4] = 1;
2445  skip_dct[5] = 1;
2446  } else {
2447  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2448  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2449  if (!s->chroma_y_shift) { /* 422 */
2450  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2451  dest_cb + uv_dct_offset, wrap_c);
2452  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2453  dest_cr + uv_dct_offset, wrap_c);
2454  }
2455  }
2456  /* pre quantization */
2457  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2458  2 * s->qscale * s->qscale) {
2459  // FIXME optimize
2460  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2461  skip_dct[0] = 1;
2462  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2463  skip_dct[1] = 1;
2464  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2465  wrap_y, 8) < 20 * s->qscale)
2466  skip_dct[2] = 1;
2467  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2468  wrap_y, 8) < 20 * s->qscale)
2469  skip_dct[3] = 1;
2470  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2471  skip_dct[4] = 1;
2472  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2473  skip_dct[5] = 1;
2474  if (!s->chroma_y_shift) { /* 422 */
2475  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2476  dest_cb + uv_dct_offset,
2477  wrap_c, 8) < 20 * s->qscale)
2478  skip_dct[6] = 1;
2479  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2480  dest_cr + uv_dct_offset,
2481  wrap_c, 8) < 20 * s->qscale)
2482  skip_dct[7] = 1;
2483  }
2484  }
2485  }
2486 
2487  if (s->quantizer_noise_shaping) {
2488  if (!skip_dct[0])
2489  get_visual_weight(weight[0], ptr_y , wrap_y);
2490  if (!skip_dct[1])
2491  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2492  if (!skip_dct[2])
2493  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2494  if (!skip_dct[3])
2495  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2496  if (!skip_dct[4])
2497  get_visual_weight(weight[4], ptr_cb , wrap_c);
2498  if (!skip_dct[5])
2499  get_visual_weight(weight[5], ptr_cr , wrap_c);
2500  if (!s->chroma_y_shift) { /* 422 */
2501  if (!skip_dct[6])
2502  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2503  wrap_c);
2504  if (!skip_dct[7])
2505  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2506  wrap_c);
2507  }
2508  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2509  }
2510 
2511  /* DCT & quantize */
2512  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2513  {
2514  for (i = 0; i < mb_block_count; i++) {
2515  if (!skip_dct[i]) {
2516  int overflow;
2517  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2518  // FIXME we could decide to change to quantizer instead of
2519  // clipping
2520  // JS: I don't think that would be a good idea it could lower
2521  // quality instead of improve it. Just INTRADC clipping
2522  // deserves changes in quantizer
2523  if (overflow)
2524  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2525  } else
2526  s->block_last_index[i] = -1;
2527  }
2528  if (s->quantizer_noise_shaping) {
2529  for (i = 0; i < mb_block_count; i++) {
2530  if (!skip_dct[i]) {
2531  s->block_last_index[i] =
2532  dct_quantize_refine(s, s->block[i], weight[i],
2533  orig[i], i, s->qscale);
2534  }
2535  }
2536  }
2537 
2538  if (s->luma_elim_threshold && !s->mb_intra)
2539  for (i = 0; i < 4; i++)
2540  dct_single_coeff_elimination(s, i, s->luma_elim_threshold);
2541  if (s->chroma_elim_threshold && !s->mb_intra)
2542  for (i = 4; i < mb_block_count; i++)
2543  dct_single_coeff_elimination(s, i, s->chroma_elim_threshold);
2544 
2545  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2546  for (i = 0; i < mb_block_count; i++) {
2547  if (s->block_last_index[i] == -1)
2548  s->coded_score[i] = INT_MAX / 256;
2549  }
2550  }
2551  }
2552 
2553  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2554  s->block_last_index[4] =
2555  s->block_last_index[5] = 0;
2556  s->block[4][0] =
2557  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2558  if (!s->chroma_y_shift) { /* 422 / 444 */
2559  for (i=6; i<12; i++) {
2560  s->block_last_index[i] = 0;
2561  s->block[i][0] = s->block[4][0];
2562  }
2563  }
2564  }
2565 
2566  // non c quantize code returns incorrect block_last_index FIXME
2567  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2568  for (i = 0; i < mb_block_count; i++) {
2569  int j;
2570  if (s->block_last_index[i] > 0) {
2571  for (j = 63; j > 0; j--) {
2572  if (s->block[i][s->intra_scantable.permutated[j]])
2573  break;
2574  }
2575  s->block_last_index[i] = j;
2576  }
2577  }
2578  }
2579 
2580  /* huffman encode */
2581  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2585  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2586  break;
2587  case AV_CODEC_ID_MPEG4:
2589  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2590  break;
2591  case AV_CODEC_ID_MSMPEG4V2:
2592  case AV_CODEC_ID_MSMPEG4V3:
2593  case AV_CODEC_ID_WMV1:
2595  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2596  break;
2597  case AV_CODEC_ID_WMV2:
2598  if (CONFIG_WMV2_ENCODER)
2599  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2600  break;
2601  case AV_CODEC_ID_H261:
2602  if (CONFIG_H261_ENCODER)
2603  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2604  break;
2605  case AV_CODEC_ID_H263:
2606  case AV_CODEC_ID_H263P:
2607  case AV_CODEC_ID_FLV1:
2608  case AV_CODEC_ID_RV10:
2609  case AV_CODEC_ID_RV20:
2610  if (CONFIG_H263_ENCODER)
2611  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2612  break;
2613  case AV_CODEC_ID_MJPEG:
2614  case AV_CODEC_ID_AMV:
2616  ff_mjpeg_encode_mb(s, s->block);
2617  break;
2618  case AV_CODEC_ID_SPEEDHQ:
2620  ff_speedhq_encode_mb(s, s->block);
2621  break;
2622  default:
2623  av_assert1(0);
2624  }
2625 }
2626 
2627 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2628 {
2629  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2630  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2631  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2632 }
2633 
2635  int i;
2636 
2637  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2638 
2639  /* MPEG-1 */
2640  d->mb_skip_run= s->mb_skip_run;
2641  for(i=0; i<3; i++)
2642  d->last_dc[i] = s->last_dc[i];
2643 
2644  /* statistics */
2645  d->mv_bits= s->mv_bits;
2646  d->i_tex_bits= s->i_tex_bits;
2647  d->p_tex_bits= s->p_tex_bits;
2648  d->i_count= s->i_count;
2649  d->f_count= s->f_count;
2650  d->b_count= s->b_count;
2651  d->skip_count= s->skip_count;
2652  d->misc_bits= s->misc_bits;
2653  d->last_bits= 0;
2654 
2655  d->mb_skipped= 0;
2656  d->qscale= s->qscale;
2657  d->dquant= s->dquant;
2658 
2659  d->esc3_level_length= s->esc3_level_length;
2660 }
2661 
2663  int i;
2664 
2665  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2666  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2667 
2668  /* MPEG-1 */
2669  d->mb_skip_run= s->mb_skip_run;
2670  for(i=0; i<3; i++)
2671  d->last_dc[i] = s->last_dc[i];
2672 
2673  /* statistics */
2674  d->mv_bits= s->mv_bits;
2675  d->i_tex_bits= s->i_tex_bits;
2676  d->p_tex_bits= s->p_tex_bits;
2677  d->i_count= s->i_count;
2678  d->f_count= s->f_count;
2679  d->b_count= s->b_count;
2680  d->skip_count= s->skip_count;
2681  d->misc_bits= s->misc_bits;
2682 
2683  d->mb_intra= s->mb_intra;
2684  d->mb_skipped= s->mb_skipped;
2685  d->mv_type= s->mv_type;
2686  d->mv_dir= s->mv_dir;
2687  d->pb= s->pb;
2688  if(s->data_partitioning){
2689  d->pb2= s->pb2;
2690  d->tex_pb= s->tex_pb;
2691  }
2692  d->block= s->block;
2693  for(i=0; i<8; i++)
2694  d->block_last_index[i]= s->block_last_index[i];
2695  d->interlaced_dct= s->interlaced_dct;
2696  d->qscale= s->qscale;
2697 
2698  d->esc3_level_length= s->esc3_level_length;
2699 }
2700 
2701 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2702  PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
2703  int *dmin, int *next_block, int motion_x, int motion_y)
2704 {
2705  int score;
2706  uint8_t *dest_backup[3];
2707 
2708  copy_context_before_encode(s, backup, type);
2709 
2710  s->block= s->blocks[*next_block];
2711  s->pb= pb[*next_block];
2712  if(s->data_partitioning){
2713  s->pb2 = pb2 [*next_block];
2714  s->tex_pb= tex_pb[*next_block];
2715  }
2716 
2717  if(*next_block){
2718  memcpy(dest_backup, s->dest, sizeof(s->dest));
2719  s->dest[0] = s->sc.rd_scratchpad;
2720  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2721  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2722  av_assert0(s->linesize >= 32); //FIXME
2723  }
2724 
2725  encode_mb(s, motion_x, motion_y);
2726 
2727  score= put_bits_count(&s->pb);
2728  if(s->data_partitioning){
2729  score+= put_bits_count(&s->pb2);
2730  score+= put_bits_count(&s->tex_pb);
2731  }
2732 
2733  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2734  ff_mpv_reconstruct_mb(s, s->block);
2735 
2736  score *= s->lambda2;
2737  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2738  }
2739 
2740  if(*next_block){
2741  memcpy(s->dest, dest_backup, sizeof(s->dest));
2742  }
2743 
2744  if(score<*dmin){
2745  *dmin= score;
2746  *next_block^=1;
2747 
2749  }
2750 }
2751 
2752 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2753  const uint32_t *sq = ff_square_tab + 256;
2754  int acc=0;
2755  int x,y;
2756 
2757  if(w==16 && h==16)
2758  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2759  else if(w==8 && h==8)
2760  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2761 
2762  for(y=0; y<h; y++){
2763  for(x=0; x<w; x++){
2764  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2765  }
2766  }
2767 
2768  av_assert2(acc>=0);
2769 
2770  return acc;
2771 }
2772 
2773 static int sse_mb(MpegEncContext *s){
2774  int w= 16;
2775  int h= 16;
2776 
2777  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2778  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2779 
2780  if(w==16 && h==16)
2781  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2782  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2783  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2784  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2785  }else{
2786  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2787  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2788  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2789  }
2790  else
2791  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2792  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2793  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2794 }
2795 
2797  MpegEncContext *s= *(void**)arg;
2798 
2799 
2800  s->me.pre_pass=1;
2801  s->me.dia_size= s->avctx->pre_dia_size;
2802  s->first_slice_line=1;
2803  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2804  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2805  ff_pre_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2806  }
2807  s->first_slice_line=0;
2808  }
2809 
2810  s->me.pre_pass=0;
2811 
2812  return 0;
2813 }
2814 
2816  MpegEncContext *s= *(void**)arg;
2817 
2818  s->me.dia_size= s->avctx->dia_size;
2819  s->first_slice_line=1;
2820  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2821  s->mb_x=0; //for block init below
2823  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2824  s->block_index[0]+=2;
2825  s->block_index[1]+=2;
2826  s->block_index[2]+=2;
2827  s->block_index[3]+=2;
2828 
2829  /* compute motion vector & mb_type and store in context */
2830  if(s->pict_type==AV_PICTURE_TYPE_B)
2831  ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y);
2832  else
2833  ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y);
2834  }
2835  s->first_slice_line=0;
2836  }
2837  return 0;
2838 }
2839 
2840 static int mb_var_thread(AVCodecContext *c, void *arg){
2841  MpegEncContext *s= *(void**)arg;
2842  int mb_x, mb_y;
2843 
2844  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2845  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2846  int xx = mb_x * 16;
2847  int yy = mb_y * 16;
2848  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2849  int varc;
2850  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2851 
2852  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2853  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2854 
2855  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2856  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2857  s->me.mb_var_sum_temp += varc;
2858  }
2859  }
2860  return 0;
2861 }
2862 
2864  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
2865  if(s->partitioned_frame){
2867  }
2868 
2869  ff_mpeg4_stuffing(&s->pb);
2870  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2872  } else if (CONFIG_SPEEDHQ_ENCODER && s->out_format == FMT_SPEEDHQ) {
2874  }
2875 
2876  flush_put_bits(&s->pb);
2877 
2878  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2879  s->misc_bits+= get_bits_diff(s);
2880 }
2881 
2883 {
2884  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2885  int offset = put_bits_count(&s->pb);
2886  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2887  int gobn = s->mb_y / s->gob_index;
2888  int pred_x, pred_y;
2889  if (CONFIG_H263_ENCODER)
2890  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2891  bytestream_put_le32(&ptr, offset);
2892  bytestream_put_byte(&ptr, s->qscale);
2893  bytestream_put_byte(&ptr, gobn);
2894  bytestream_put_le16(&ptr, mba);
2895  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2896  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2897  /* 4MV not implemented */
2898  bytestream_put_byte(&ptr, 0); /* hmv2 */
2899  bytestream_put_byte(&ptr, 0); /* vmv2 */
2900 }
2901 
2902 static void update_mb_info(MpegEncContext *s, int startcode)
2903 {
2904  if (!s->mb_info)
2905  return;
2906  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2907  s->mb_info_size += 12;
2908  s->prev_mb_info = s->last_mb_info;
2909  }
2910  if (startcode) {
2911  s->prev_mb_info = put_bits_count(&s->pb)/8;
2912  /* This might have incremented mb_info_size above, and we return without
2913  * actually writing any info into that slot yet. But in that case,
2914  * this will be called again at the start of the after writing the
2915  * start code, actually writing the mb info. */
2916  return;
2917  }
2918 
2919  s->last_mb_info = put_bits_count(&s->pb)/8;
2920  if (!s->mb_info_size)
2921  s->mb_info_size += 12;
2922  write_mb_info(s);
2923 }
2924 
2925 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2926 {
2927  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2928  && s->slice_context_count == 1
2929  && s->pb.buf == s->avctx->internal->byte_buffer) {
2930  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2931  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2932 
2933  uint8_t *new_buffer = NULL;
2934  int new_buffer_size = 0;
2935 
2936  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2937  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2938  return AVERROR(ENOMEM);
2939  }
2940 
2941  emms_c();
2942 
2943  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2944  s->avctx->internal->byte_buffer_size + size_increase);
2945  if (!new_buffer)
2946  return AVERROR(ENOMEM);
2947 
2948  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2949  av_free(s->avctx->internal->byte_buffer);
2950  s->avctx->internal->byte_buffer = new_buffer;
2951  s->avctx->internal->byte_buffer_size = new_buffer_size;
2952  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2953  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2954  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2955  }
2956  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2957  return AVERROR(EINVAL);
2958  return 0;
2959 }
2960 
2961 static int encode_thread(AVCodecContext *c, void *arg){
2962  MpegEncContext *s= *(void**)arg;
2963  int mb_x, mb_y, mb_y_order;
2964  int chr_h= 16>>s->chroma_y_shift;
2965  int i, j;
2966  MpegEncContext best_s = { 0 }, backup_s;
2967  uint8_t bit_buf[2][MAX_MB_BYTES];
2968  uint8_t bit_buf2[2][MAX_MB_BYTES];
2969  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2970  PutBitContext pb[2], pb2[2], tex_pb[2];
2971 
2972  for(i=0; i<2; i++){
2973  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2974  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2975  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2976  }
2977 
2978  s->last_bits= put_bits_count(&s->pb);
2979  s->mv_bits=0;
2980  s->misc_bits=0;
2981  s->i_tex_bits=0;
2982  s->p_tex_bits=0;
2983  s->i_count=0;
2984  s->f_count=0;
2985  s->b_count=0;
2986  s->skip_count=0;
2987 
2988  for(i=0; i<3; i++){
2989  /* init last dc values */
2990  /* note: quant matrix value (8) is implied here */
2991  s->last_dc[i] = 128 << s->intra_dc_precision;
2992 
2993  s->current_picture.encoding_error[i] = 0;
2994  }
2995  if(s->codec_id==AV_CODEC_ID_AMV){
2996  s->last_dc[0] = 128*8/13;
2997  s->last_dc[1] = 128*8/14;
2998  s->last_dc[2] = 128*8/14;
2999  }
3000  s->mb_skip_run = 0;
3001  memset(s->last_mv, 0, sizeof(s->last_mv));
3002 
3003  s->last_mv_dir = 0;
3004 
3005  switch(s->codec_id){
3006  case AV_CODEC_ID_H263:
3007  case AV_CODEC_ID_H263P:
3008  case AV_CODEC_ID_FLV1:
3009  if (CONFIG_H263_ENCODER)
3010  s->gob_index = H263_GOB_HEIGHT(s->height);
3011  break;
3012  case AV_CODEC_ID_MPEG4:
3013  if(CONFIG_MPEG4_ENCODER && s->partitioned_frame)
3015  break;
3016  }
3017 
3018  s->resync_mb_x=0;
3019  s->resync_mb_y=0;
3020  s->first_slice_line = 1;
3021  s->ptr_lastgob = s->pb.buf;
3022  for (mb_y_order = s->start_mb_y; mb_y_order < s->end_mb_y; mb_y_order++) {
3023  if (CONFIG_SPEEDHQ_ENCODER && s->codec_id == AV_CODEC_ID_SPEEDHQ) {
3024  int first_in_slice;
3025  mb_y = ff_speedhq_mb_y_order_to_mb(mb_y_order, s->mb_height, &first_in_slice);
3026  if (first_in_slice && mb_y_order != s->start_mb_y)
3028  s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 1024 << s->intra_dc_precision;
3029  } else {
3030  mb_y = mb_y_order;
3031  }
3032  s->mb_x=0;
3033  s->mb_y= mb_y;
3034 
3035  ff_set_qscale(s, s->qscale);
3037 
3038  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
3039  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
3040  int mb_type= s->mb_type[xy];
3041 // int d;
3042  int dmin= INT_MAX;
3043  int dir;
3044  int size_increase = s->avctx->internal->byte_buffer_size/4
3045  + s->mb_width*MAX_MB_BYTES;
3046 
3048  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
3049  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3050  return -1;
3051  }
3052  if(s->data_partitioning){
3053  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3054  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3055  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3056  return -1;
3057  }
3058  }
3059 
3060  s->mb_x = mb_x;
3061  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3063 
3064  if(CONFIG_H261_ENCODER && s->codec_id == AV_CODEC_ID_H261){
3066  xy= s->mb_y*s->mb_stride + s->mb_x;
3067  mb_type= s->mb_type[xy];
3068  }
3069 
3070  /* write gob / video packet header */
3071  if(s->rtp_mode){
3072  int current_packet_size, is_gob_start;
3073 
3074  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3075 
3076  is_gob_start = s->rtp_payload_size &&
3077  current_packet_size >= s->rtp_payload_size &&
3078  mb_y + mb_x > 0;
3079 
3080  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3081 
3082  switch(s->codec_id){
3083  case AV_CODEC_ID_H263:
3084  case AV_CODEC_ID_H263P:
3085  if(!s->h263_slice_structured)
3086  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3087  break;
3089  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3091  if(s->mb_skip_run) is_gob_start=0;
3092  break;
3093  case AV_CODEC_ID_MJPEG:
3094  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3095  break;
3096  }
3097 
3098  if(is_gob_start){
3099  if(s->start_mb_y != mb_y || mb_x!=0){
3100  write_slice_end(s);
3101 
3102  if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4 && s->partitioned_frame){
3104  }
3105  }
3106 
3107  av_assert2((put_bits_count(&s->pb)&7) == 0);
3108  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3109 
3110  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3111  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3112  int d = 100 / s->error_rate;
3113  if(r % d == 0){
3114  current_packet_size=0;
3115  s->pb.buf_ptr= s->ptr_lastgob;
3116  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3117  }
3118  }
3119 
3120 #if FF_API_RTP_CALLBACK
3122  if (s->avctx->rtp_callback){
3123  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3124  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3125  }
3127 #endif
3128  update_mb_info(s, 1);
3129 
3130  switch(s->codec_id){
3131  case AV_CODEC_ID_MPEG4:
3132  if (CONFIG_MPEG4_ENCODER) {
3135  }
3136  break;
3142  }
3143  break;
3144  case AV_CODEC_ID_H263:
3145  case AV_CODEC_ID_H263P:
3146  if (CONFIG_H263_ENCODER)
3148  break;
3149  }
3150 
3151  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3152  int bits= put_bits_count(&s->pb);
3153  s->misc_bits+= bits - s->last_bits;
3154  s->last_bits= bits;
3155  }
3156 
3157  s->ptr_lastgob += current_packet_size;
3158  s->first_slice_line=1;
3159  s->resync_mb_x=mb_x;
3160  s->resync_mb_y=mb_y;
3161  }
3162  }
3163 
3164  if( (s->resync_mb_x == s->mb_x)
3165  && s->resync_mb_y+1 == s->mb_y){
3166  s->first_slice_line=0;
3167  }
3168 
3169  s->mb_skipped=0;
3170  s->dquant=0; //only for QP_RD
3171 
3172  update_mb_info(s, 0);
3173 
3174  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3175  int next_block=0;
3176  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3177 
3178  copy_context_before_encode(&backup_s, s, -1);
3179  backup_s.pb= s->pb;
3180  best_s.data_partitioning= s->data_partitioning;
3181  best_s.partitioned_frame= s->partitioned_frame;
3182  if(s->data_partitioning){
3183  backup_s.pb2= s->pb2;
3184  backup_s.tex_pb= s->tex_pb;
3185  }
3186 
3187  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3188  s->mv_dir = MV_DIR_FORWARD;
3189  s->mv_type = MV_TYPE_16X16;
3190  s->mb_intra= 0;
3191  s->mv[0][0][0] = s->p_mv_table[xy][0];
3192  s->mv[0][0][1] = s->p_mv_table[xy][1];
3193  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3194  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3195  }
3196  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3197  s->mv_dir = MV_DIR_FORWARD;
3198  s->mv_type = MV_TYPE_FIELD;
3199  s->mb_intra= 0;
3200  for(i=0; i<2; i++){
3201  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3202  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3203  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3204  }
3205  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3206  &dmin, &next_block, 0, 0);
3207  }
3208  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3209  s->mv_dir = MV_DIR_FORWARD;
3210  s->mv_type = MV_TYPE_16X16;
3211  s->mb_intra= 0;
3212  s->mv[0][0][0] = 0;
3213  s->mv[0][0][1] = 0;
3214  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3215  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3216  }
3217  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3218  s->mv_dir = MV_DIR_FORWARD;
3219  s->mv_type = MV_TYPE_8X8;
3220  s->mb_intra= 0;
3221  for(i=0; i<4; i++){
3222  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3223  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3224  }
3225  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3226  &dmin, &next_block, 0, 0);
3227  }
3228  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3229  s->mv_dir = MV_DIR_FORWARD;
3230  s->mv_type = MV_TYPE_16X16;
3231  s->mb_intra= 0;
3232  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3233  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3234  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3235  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3236  }
3237  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3238  s->mv_dir = MV_DIR_BACKWARD;
3239  s->mv_type = MV_TYPE_16X16;
3240  s->mb_intra= 0;
3241  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3242  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3243  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3244  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3245  }
3246  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3247  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3248  s->mv_type = MV_TYPE_16X16;
3249  s->mb_intra= 0;
3250  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3251  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3252  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3253  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3254  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3255  &dmin, &next_block, 0, 0);
3256  }
3257  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3258  s->mv_dir = MV_DIR_FORWARD;
3259  s->mv_type = MV_TYPE_FIELD;
3260  s->mb_intra= 0;
3261  for(i=0; i<2; i++){
3262  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3263  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3264  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3265  }
3266  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3267  &dmin, &next_block, 0, 0);
3268  }
3269  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3270  s->mv_dir = MV_DIR_BACKWARD;
3271  s->mv_type = MV_TYPE_FIELD;
3272  s->mb_intra= 0;
3273  for(i=0; i<2; i++){
3274  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3275  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3276  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3277  }
3278  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3279  &dmin, &next_block, 0, 0);
3280  }
3281  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3282  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3283  s->mv_type = MV_TYPE_FIELD;
3284  s->mb_intra= 0;
3285  for(dir=0; dir<2; dir++){
3286  for(i=0; i<2; i++){
3287  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3288  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3289  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3290  }
3291  }
3292  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3293  &dmin, &next_block, 0, 0);
3294  }
3295  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3296  s->mv_dir = 0;
3297  s->mv_type = MV_TYPE_16X16;
3298  s->mb_intra= 1;
3299  s->mv[0][0][0] = 0;
3300  s->mv[0][0][1] = 0;
3301  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3302  &dmin, &next_block, 0, 0);
3303  if(s->h263_pred || s->h263_aic){
3304  if(best_s.mb_intra)
3305  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3306  else
3307  ff_clean_intra_table_entries(s); //old mode?
3308  }
3309  }
3310 
3311  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3312  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3313  const int last_qp= backup_s.qscale;
3314  int qpi, qp, dc[6];
3315  int16_t ac[6][16];
3316  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3317  static const int dquant_tab[4]={-1,1,-2,2};
3318  int storecoefs = s->mb_intra && s->dc_val[0];
3319 
3320  av_assert2(backup_s.dquant == 0);
3321 
3322  //FIXME intra
3323  s->mv_dir= best_s.mv_dir;
3324  s->mv_type = MV_TYPE_16X16;
3325  s->mb_intra= best_s.mb_intra;
3326  s->mv[0][0][0] = best_s.mv[0][0][0];
3327  s->mv[0][0][1] = best_s.mv[0][0][1];
3328  s->mv[1][0][0] = best_s.mv[1][0][0];
3329  s->mv[1][0][1] = best_s.mv[1][0][1];
3330 
3331  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3332  for(; qpi<4; qpi++){
3333  int dquant= dquant_tab[qpi];
3334  qp= last_qp + dquant;
3335  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3336  continue;
3337  backup_s.dquant= dquant;
3338  if(storecoefs){
3339  for(i=0; i<6; i++){
3340  dc[i]= s->dc_val[0][ s->block_index[i] ];
3341  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3342  }
3343  }
3344 
3345  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3346  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3347  if(best_s.qscale != qp){
3348  if(storecoefs){
3349  for(i=0; i<6; i++){
3350  s->dc_val[0][ s->block_index[i] ]= dc[i];
3351  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3352  }
3353  }
3354  }
3355  }
3356  }
3357  }
3359  int mx= s->b_direct_mv_table[xy][0];
3360  int my= s->b_direct_mv_table[xy][1];
3361 
3362  backup_s.dquant = 0;
3363  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3364  s->mb_intra= 0;
3365  ff_mpeg4_set_direct_mv(s, mx, my);
3366  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3367  &dmin, &next_block, mx, my);
3368  }
3370  backup_s.dquant = 0;
3371  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD | MV_DIRECT;
3372  s->mb_intra= 0;
3373  ff_mpeg4_set_direct_mv(s, 0, 0);
3374  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3375  &dmin, &next_block, 0, 0);
3376  }
3377  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3378  int coded=0;
3379  for(i=0; i<6; i++)
3380  coded |= s->block_last_index[i];
3381  if(coded){
3382  int mx,my;
3383  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3384  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3385  mx=my=0; //FIXME find the one we actually used
3386  ff_mpeg4_set_direct_mv(s, mx, my);
3387  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3388  mx= s->mv[1][0][0];
3389  my= s->mv[1][0][1];
3390  }else{
3391  mx= s->mv[0][0][0];
3392  my= s->mv[0][0][1];
3393  }
3394 
3395  s->mv_dir= best_s.mv_dir;
3396  s->mv_type = best_s.mv_type;
3397  s->mb_intra= 0;
3398 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3399  s->mv[0][0][1] = best_s.mv[0][0][1];
3400  s->mv[1][0][0] = best_s.mv[1][0][0];
3401  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3402  backup_s.dquant= 0;
3403  s->skipdct=1;
3404  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3405  &dmin, &next_block, mx, my);
3406  s->skipdct=0;
3407  }
3408  }
3409 
3410  s->current_picture.qscale_table[xy] = best_s.qscale;
3411 
3412  copy_context_after_encode(s, &best_s, -1);
3413 
3414  pb_bits_count= put_bits_count(&s->pb);
3415  flush_put_bits(&s->pb);
3416  ff_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3417  s->pb= backup_s.pb;
3418 
3419  if(s->data_partitioning){
3420  pb2_bits_count= put_bits_count(&s->pb2);
3421  flush_put_bits(&s->pb2);
3422  ff_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3423  s->pb2= backup_s.pb2;
3424 
3425  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3426  flush_put_bits(&s->tex_pb);
3427  ff_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3428  s->tex_pb= backup_s.tex_pb;
3429  }
3430  s->last_bits= put_bits_count(&s->pb);
3431 
3432  if (CONFIG_H263_ENCODER &&
3433  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3435 
3436  if(next_block==0){ //FIXME 16 vs linesize16
3437  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3438  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3439  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3440  }
3441 
3442  if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
3443  ff_mpv_reconstruct_mb(s, s->block);
3444  } else {
3445  int motion_x = 0, motion_y = 0;
3446  s->mv_type=MV_TYPE_16X16;
3447  // only one MB-Type possible
3448 
3449  switch(mb_type){
3451  s->mv_dir = 0;
3452  s->mb_intra= 1;
3453  motion_x= s->mv[0][0][0] = 0;
3454  motion_y= s->mv[0][0][1] = 0;
3455  break;
3457  s->mv_dir = MV_DIR_FORWARD;
3458  s->mb_intra= 0;
3459  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3460  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3461  break;
3463  s->mv_dir = MV_DIR_FORWARD;
3464  s->mv_type = MV_TYPE_FIELD;
3465  s->mb_intra= 0;
3466  for(i=0; i<2; i++){
3467  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3468  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3469  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3470  }
3471  break;
3473  s->mv_dir = MV_DIR_FORWARD;
3474  s->mv_type = MV_TYPE_8X8;
3475  s->mb_intra= 0;
3476  for(i=0; i<4; i++){
3477  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3478  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3479  }
3480  break;
3482  if (CONFIG_MPEG4_ENCODER) {
3484  s->mb_intra= 0;
3485  motion_x=s->b_direct_mv_table[xy][0];
3486  motion_y=s->b_direct_mv_table[xy][1];
3487  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3488  }
3489  break;
3491  if (CONFIG_MPEG4_ENCODER) {
3493  s->mb_intra= 0;
3494  ff_mpeg4_set_direct_mv(s, 0, 0);
3495  }
3496  break;
3498  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3499  s->mb_intra= 0;
3500  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3501  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3502  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3503  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3504  break;
3506  s->mv_dir = MV_DIR_BACKWARD;
3507  s->mb_intra= 0;
3508  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3509  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3510  break;
3512  s->mv_dir = MV_DIR_FORWARD;
3513  s->mb_intra= 0;
3514  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3515  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3516  break;
3518  s->mv_dir = MV_DIR_FORWARD;
3519  s->mv_type = MV_TYPE_FIELD;
3520  s->mb_intra= 0;
3521  for(i=0; i<2; i++){
3522  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3523  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3524  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3525  }
3526  break;
3528  s->mv_dir = MV_DIR_BACKWARD;
3529  s->mv_type = MV_TYPE_FIELD;
3530  s->mb_intra= 0;
3531  for(i=0; i<2; i++){
3532  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3533  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3534  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3535  }
3536  break;
3538  s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD;
3539  s->mv_type = MV_TYPE_FIELD;
3540  s->mb_intra= 0;
3541  for(dir=0; dir<2; dir++){
3542  for(i=0; i<2; i++){
3543  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3544  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3545  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3546  }
3547  }
3548  break;
3549  default:
3550  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3551  }
3552 
3553  encode_mb(s, motion_x, motion_y);
3554 
3555  // RAL: Update last macroblock type
3556  s->last_mv_dir = s->mv_dir;
3557 
3558  if (CONFIG_H263_ENCODER &&
3559  s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
3561 
3562  ff_mpv_reconstruct_mb(s, s->block);
3563  }
3564 
3565  /* clean the MV table in IPS frames for direct mode in B-frames */
3566  if(s->mb_intra /* && I,P,S_TYPE */){
3567  s->p_mv_table[xy][0]=0;
3568  s->p_mv_table[xy][1]=0;
3569  }
3570 
3571  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3572  int w= 16;
3573  int h= 16;
3574 
3575  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3576  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3577 
3578  s->current_picture.encoding_error[0] += sse(
3579  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3580  s->dest[0], w, h, s->linesize);
3581  s->current_picture.encoding_error[1] += sse(
3582  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3583  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3584  s->current_picture.encoding_error[2] += sse(
3585  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3586  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3587  }
3588  if(s->loop_filter){
3589  if(CONFIG_H263_ENCODER && s->out_format == FMT_H263)
3591  }
3592  ff_dlog(s->avctx, "MB %d %d bits\n",
3593  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3594  }
3595  }
3596 
3597  //not beautiful here but we must write it before flushing so it has to be here
3598  if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version && s->msmpeg4_version<4 && s->pict_type == AV_PICTURE_TYPE_I)
3600 
3601  write_slice_end(s);
3602 
3603 #if FF_API_RTP_CALLBACK
3605  /* Send the last GOB if RTP */
3606  if (s->avctx->rtp_callback) {
3607  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3608  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3609  /* Call the RTP callback to send the last GOB */
3610  emms_c();
3611  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3612  }
3614 #endif
3615 
3616  return 0;
3617 }
3618 
3619 #define MERGE(field) dst->field += src->field; src->field=0
3621  MERGE(me.scene_change_score);
3622  MERGE(me.mc_mb_var_sum_temp);
3623  MERGE(me.mb_var_sum_temp);
3624 }
3625 
3627  int i;
3628 
3629  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3630  MERGE(dct_count[1]);
3631  MERGE(mv_bits);
3632  MERGE(i_tex_bits);
3633  MERGE(p_tex_bits);
3634  MERGE(i_count);
3635  MERGE(f_count);
3636  MERGE(b_count);
3637  MERGE(skip_count);
3638  MERGE(misc_bits);
3639  MERGE(er.error_count);
3640  MERGE(padding_bug_score);
3641  MERGE(current_picture.encoding_error[0]);
3642  MERGE(current_picture.encoding_error[1]);
3643  MERGE(current_picture.encoding_error[2]);
3644 
3645  if (dst->noise_reduction){
3646  for(i=0; i<64; i++){
3647  MERGE(dct_error_sum[0][i]);
3648  MERGE(dct_error_sum[1][i]);
3649  }
3650  }
3651 
3652  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3653  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3654  ff_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3655  flush_put_bits(&dst->pb);
3656 }
3657 
3658 static int estimate_qp(MpegEncContext *s, int dry_run){
3659  if (s->next_lambda){
3660  s->current_picture_ptr->f->quality =
3661  s->current_picture.f->quality = s->next_lambda;
3662  if(!dry_run) s->next_lambda= 0;
3663  } else if (!s->fixed_qscale) {
3664  int quality = ff_rate_estimate_qscale(s, dry_run);
3665  s->current_picture_ptr->f->quality =
3666  s->current_picture.f->quality = quality;
3667  if (s->current_picture.f->quality < 0)
3668  return -1;
3669  }
3670 
3671  if(s->adaptive_quant){
3672  switch(s->codec_id){
3673  case AV_CODEC_ID_MPEG4:
3676  break;
3677  case AV_CODEC_ID_H263:
3678  case AV_CODEC_ID_H263P:
3679  case AV_CODEC_ID_FLV1:
3680  if (CONFIG_H263_ENCODER)
3682  break;
3683  default:
3685  }
3686 
3687  s->lambda= s->lambda_table[0];
3688  //FIXME broken
3689  }else
3690  s->lambda = s->current_picture.f->quality;
3691  update_qscale(s);
3692  return 0;
3693 }
3694 
3695 /* must be called before writing the header */
3697  av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
3698  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3699 
3700  if(s->pict_type==AV_PICTURE_TYPE_B){
3701  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3702  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3703  }else{
3704  s->pp_time= s->time - s->last_non_b_time;
3705  s->last_non_b_time= s->time;
3706  av_assert1(s->picture_number==0 || s->pp_time > 0);
3707  }
3708 }
3709 
3710 static int encode_picture(MpegEncContext *s, int picture_number)
3711 {
3712  int i, ret;
3713  int bits;
3714  int context_count = s->slice_context_count;
3715 
3716  s->picture_number = picture_number;
3717 
3718  /* Reset the average MB variance */
3719  s->me.mb_var_sum_temp =
3720  s->me.mc_mb_var_sum_temp = 0;
3721 
3722  /* we need to initialize some time vars before we can encode B-frames */
3723  // RAL: Condition added for MPEG1VIDEO
3724  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || s->codec_id == AV_CODEC_ID_MPEG2VIDEO || (s->h263_pred && !s->msmpeg4_version))
3726  if(CONFIG_MPEG4_ENCODER && s->codec_id == AV_CODEC_ID_MPEG4)
3728 
3729  s->me.scene_change_score=0;
3730 
3731 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3732 
3733  if(s->pict_type==AV_PICTURE_TYPE_I){
3734  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3735  else s->no_rounding=0;
3736  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3737  if(s->flipflop_rounding || s->codec_id == AV_CODEC_ID_H263P || s->codec_id == AV_CODEC_ID_MPEG4)
3738  s->no_rounding ^= 1;
3739  }
3740 
3741  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3742  if (estimate_qp(s,1) < 0)
3743  return -1;
3745  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3746  if(s->pict_type==AV_PICTURE_TYPE_B)
3747  s->lambda= s->last_lambda_for[s->pict_type];
3748  else
3749  s->lambda= s->last_lambda_for[s->last_non_b_pict_type];
3750  update_qscale(s);
3751  }
3752 
3753  if(s->codec_id != AV_CODEC_ID_AMV && s->codec_id != AV_CODEC_ID_MJPEG){
3754  if(s->q_chroma_intra_matrix != s->q_intra_matrix ) av_freep(&s->q_chroma_intra_matrix);
3755  if(s->q_chroma_intra_matrix16 != s->q_intra_matrix16) av_freep(&s->q_chroma_intra_matrix16);
3756  s->q_chroma_intra_matrix = s->q_intra_matrix;
3757  s->q_chroma_intra_matrix16 = s->q_intra_matrix16;
3758  }
3759 
3760  s->mb_intra=0; //for the rate distortion & bit compare functions
3761  for(i=1; i<context_count; i++){
3762  ret = ff_update_duplicate_context(s->thread_context[i], s);
3763  if (ret < 0)
3764  return ret;
3765  }
3766 
3767  if(ff_init_me(s)<0)
3768  return -1;
3769 
3770  /* Estimate motion for every MB */
3771  if(s->pict_type != AV_PICTURE_TYPE_I){
3772  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3773  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3774  if (s->pict_type != AV_PICTURE_TYPE_B) {
3775  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3776  s->me_pre == 2) {
3777  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3778  }
3779  }
3780 
3781  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3782  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3783  /* I-Frame */
3784  for(i=0; i<s->mb_stride*s->mb_height; i++)
3785  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3786 
3787  if(!s->fixed_qscale){
3788  /* finding spatial complexity for I-frame rate control */
3789  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3790  }
3791  }
3792  for(i=1; i<context_count; i++){
3793  merge_context_after_me(s, s->thread_context[i]);
3794  }
3795  s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp;
3796  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3797  emms_c();
3798 
3799  if (s->me.scene_change_score > s->scenechange_threshold &&
3800  s->pict_type == AV_PICTURE_TYPE_P) {
3801  s->pict_type= AV_PICTURE_TYPE_I;
3802  for(i=0; i<s->mb_stride*s->mb_height; i++)
3803  s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA;
3804  if(s->msmpeg4_version >= 3)
3805  s->no_rounding=1;
3806  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3807  s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);
3808  }
3809 
3810  if(!s->umvplus){
3811  if(s->pict_type==AV_PICTURE_TYPE_P || s->pict_type==AV_PICTURE_TYPE_S) {
3812  s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
3813 
3814  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3815  int a,b;
3816  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3817  b= ff_get_best_fcode(s, s->p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
3818  s->f_code= FFMAX3(s->f_code, a, b);
3819  }
3820 
3822  ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
3823  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3824  int j;
3825  for(i=0; i<2; i++){
3826  for(j=0; j<2; j++)
3827  ff_fix_long_mvs(s, s->p_field_select_table[i], j,
3828  s->p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
3829  }
3830  }
3831  }
3832 
3833  if(s->pict_type==AV_PICTURE_TYPE_B){
3834  int a, b;
3835 
3836  a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
3837  b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3838  s->f_code = FFMAX(a, b);
3839 
3840  a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
3841  b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
3842  s->b_code = FFMAX(a, b);
3843 
3844  ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
3845  ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
3846  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3847  ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
3848  if (s->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
3849  int dir, j;
3850  for(dir=0; dir<2; dir++){
3851  for(i=0; i<2; i++){
3852  for(j=0; j<2; j++){
3855  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3856  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3857  }
3858  }
3859  }
3860  }
3861  }
3862  }
3863 
3864  if (estimate_qp(s, 0) < 0)
3865  return -1;
3866 
3867  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3868  s->pict_type == AV_PICTURE_TYPE_I &&
3869  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3870  s->qscale= 3; //reduce clipping problems
3871 
3872  if (s->out_format == FMT_MJPEG) {
3873  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3874  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3875 
3876  if (s->avctx->intra_matrix) {
3877  chroma_matrix =
3878  luma_matrix = s->avctx->intra_matrix;
3879  }
3880  if (s->avctx->chroma_intra_matrix)
3881  chroma_matrix = s->avctx->chroma_intra_matrix;
3882 
3883  /* for mjpeg, we do include qscale in the matrix */
3884  for(i=1;i<64;i++){
3885  int j = s->idsp.idct_permutation[i];
3886 
3887  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3888  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3889  }
3890  s->y_dc_scale_table=
3891  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[s->intra_dc_precision];
3892  s->chroma_intra_matrix[0] =
3893  s->intra_matrix[0] = ff_mpeg2_dc_scale_table[s->intra_dc_precision][8];
3894  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3895  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3896  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3897  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3898  s->qscale= 8;
3899  }
3900  if(s->codec_id == AV_CODEC_ID_AMV){
3901  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3902  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3903  for(i=1;i<64;i++){
3904  int j= s->idsp.idct_permutation[ff_zigzag_direct[i]];
3905 
3906  s->intra_matrix[j] = sp5x_qscale_five_quant_table[0][i];
3907  s->chroma_intra_matrix[j] = sp5x_qscale_five_quant_table[1][i];
3908  }
3909  s->y_dc_scale_table= y;
3910  s->c_dc_scale_table= c;
3911  s->intra_matrix[0] = 13;
3912  s->chroma_intra_matrix[0] = 14;
3913  ff_convert_matrix(s, s->q_intra_matrix, s->q_intra_matrix16,
3914  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3915  ff_convert_matrix(s, s->q_chroma_intra_matrix, s->q_chroma_intra_matrix16,
3916  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3917  s->qscale= 8;
3918  }
3919 
3920  if (s->out_format == FMT_SPEEDHQ) {
3921  s->y_dc_scale_table=
3922  s->c_dc_scale_table= ff_mpeg2_dc_scale_table[3];
3923  }
3924 
3925  //FIXME var duplication
3926  s->current_picture_ptr->f->key_frame =
3927  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3928  s->current_picture_ptr->f->pict_type =
3929  s->current_picture.f->pict_type = s->pict_type;
3930 
3931  if (s->current_picture.f->key_frame)
3932  s->picture_in_gop_number=0;
3933 
3934  s->mb_x = s->mb_y = 0;
3935  s->last_bits= put_bits_count(&s->pb);
3936  switch(s->out_format) {
3937  case FMT_MJPEG:
3938  if (CONFIG_MJPEG_ENCODER && s->huffman != HUFFMAN_TABLE_OPTIMAL)
3939  ff_mjpeg_encode_picture_header(s->avctx, &s->pb, &s->intra_scantable,
3940  s->pred, s->intra_matrix, s->chroma_intra_matrix);
3941  break;
3942  case FMT_SPEEDHQ:
3945  break;
3946  case FMT_H261:
3947  if (CONFIG_H261_ENCODER)
3948  ff_h261_encode_picture_header(s, picture_number);
3949  break;
3950  case FMT_H263:
3951  if (CONFIG_WMV2_ENCODER && s->codec_id == AV_CODEC_ID_WMV2)
3952  ff_wmv2_encode_picture_header(s, picture_number);
3953  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3954  ff_msmpeg4_encode_picture_header(s, picture_number);
3955  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3956  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3957  if (ret < 0)
3958  return ret;
3959  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3960  ret = ff_rv10_encode_picture_header(s, picture_number);
3961  if (ret < 0)
3962  return ret;
3963  }
3964  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3965  ff_rv20_encode_picture_header(s, picture_number);
3966  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3967  ff_flv_encode_picture_header(s, picture_number);
3968  else if (CONFIG_H263_ENCODER)
3969  ff_h263_encode_picture_header(s, picture_number);
3970  break;
3971  case FMT_MPEG1:
3973  ff_mpeg1_encode_picture_header(s, picture_number);
3974  break;
3975  default:
3976  av_assert0(0);
3977  }
3978  bits= put_bits_count(&s->pb);
3979  s->header_bits= bits - s->last_bits;
3980 
3981  for(i=1; i<context_count; i++){
3982  update_duplicate_context_after_me(s->thread_context[i], s);
3983  }
3984  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3985  for(i=1; i<context_count; i++){
3986  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3987  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-BUF_BITS));
3988  merge_context_after_encode(s, s->thread_context[i]);
3989  }
3990  emms_c();
3991  return 0;
3992 }
3993 
3994 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3995  const int intra= s->mb_intra;
3996  int i;
3997 
3998  s->dct_count[intra]++;
3999 
4000  for(i=0; i<64; i++){
4001  int level= block[i];
4002 
4003  if(level){
4004  if(level>0){
4005  s->dct_error_sum[intra][i] += level;
4006  level -= s->dct_offset[intra][i];
4007  if(level<0) level=0;
4008  }else{
4009  s->dct_error_sum[intra][i] -= level;
4010  level += s->dct_offset[intra][i];
4011  if(level>0) level=0;
4012  }
4013  block[i]= level;
4014  }
4015  }
4016 }
4017 
4019  int16_t *block, int n,
4020  int qscale, int *overflow){
4021  const int *qmat;
4022  const uint16_t *matrix;
4023  const uint8_t *scantable;
4024  const uint8_t *perm_scantable;
4025  int max=0;
4026  unsigned int threshold1, threshold2;
4027  int bias=0;
4028  int run_tab[65];
4029  int level_tab[65];
4030  int score_tab[65];
4031  int survivor[65];
4032  int survivor_count;
4033  int last_run=0;
4034  int last_level=0;
4035  int last_score= 0;
4036  int last_i;
4037  int coeff[2][64];
4038  int coeff_count[64];
4039  int qmul, qadd, start_i, last_non_zero, i, dc;
4040  const int esc_length= s->ac_esc_length;
4041  uint8_t * length;
4042  uint8_t * last_length;
4043  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
4044  int mpeg2_qscale;
4045 
4046  s->fdsp.fdct(block);
4047 
4048  if(s->dct_error_sum)
4049  s->denoise_dct(s, block);
4050  qmul= qscale*16;
4051  qadd= ((qscale-1)|1)*8;
4052 
4053  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
4054  else mpeg2_qscale = qscale << 1;
4055 
4056  if (s->mb_intra) {
4057  int q;
4058  scantable= s->intra_scantable.scantable;
4059  perm_scantable= s->intra_scantable.permutated;
4060  if (!s->h263_aic) {
4061  if (n < 4)
4062  q = s->y_dc_scale;
4063  else
4064  q = s->c_dc_scale;
4065  q = q << 3;
4066  } else{
4067  /* For AIC we skip quant/dequant of INTRADC */
4068  q = 1 << 3;
4069  qadd=0;
4070  }
4071 
4072  /* note: block[0] is assumed to be positive */
4073  block[0] = (block[0] + (q >> 1)) / q;
4074  start_i = 1;
4075  last_non_zero = 0;
4076  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4077  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4078  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4079  bias= 1<<(QMAT_SHIFT-1);
4080 
4081  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4082  length = s->intra_chroma_ac_vlc_length;
4083  last_length= s->intra_chroma_ac_vlc_last_length;
4084  } else {
4085  length = s->intra_ac_vlc_length;
4086  last_length= s->intra_ac_vlc_last_length;
4087  }
4088  } else {
4089  scantable= s->inter_scantable.scantable;
4090  perm_scantable= s->inter_scantable.permutated;
4091  start_i = 0;
4092  last_non_zero = -1;
4093  qmat = s->q_inter_matrix[qscale];
4094  matrix = s->inter_matrix;
4095  length = s->inter_ac_vlc_length;
4096  last_length= s->inter_ac_vlc_last_length;
4097  }
4098  last_i= start_i;
4099 
4100  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4101  threshold2= (threshold1<<1);
4102 
4103  for(i=63; i>=start_i; i--) {
4104  const int j = scantable[i];
4105  int level = block[j] * qmat[j];
4106 
4107  if(((unsigned)(level+threshold1))>threshold2){
4108  last_non_zero = i;
4109  break;
4110  }
4111  }
4112 
4113  for(i=start_i; i<=last_non_zero; i++) {
4114  const int j = scantable[i];
4115  int level = block[j] * qmat[j];
4116 
4117 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4118 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4119  if(((unsigned)(level+threshold1))>threshold2){
4120  if(level>0){
4121  level= (bias + level)>>QMAT_SHIFT;
4122  coeff[0][i]= level;
4123  coeff[1][i]= level-1;
4124 // coeff[2][k]= level-2;
4125  }else{
4126  level= (bias - level)>>QMAT_SHIFT;
4127  coeff[0][i]= -level;
4128  coeff[1][i]= -level+1;
4129 // coeff[2][k]= -level+2;
4130  }
4131  coeff_count[i]= FFMIN(level, 2);
4132  av_assert2(coeff_count[i]);
4133  max |=level;
4134  }else{
4135  coeff[0][i]= (level>>31)|1;
4136  coeff_count[i]= 1;
4137  }
4138  }
4139 
4140  *overflow= s->max_qcoeff < max; //overflow might have happened
4141 
4142  if(last_non_zero < start_i){
4143  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4144  return last_non_zero;
4145  }
4146 
4147  score_tab[start_i]= 0;
4148  survivor[0]= start_i;
4149  survivor_count= 1;
4150 
4151  for(i=start_i; i<=last_non_zero; i++){
4152  int level_index, j, zero_distortion;
4153  int dct_coeff= FFABS(block[ scantable[i] ]);
4154  int best_score=256*256*256*120;
4155 
4156  if (s->fdsp.fdct == ff_fdct_ifast)
4157  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4158  zero_distortion= dct_coeff*dct_coeff;
4159 
4160  for(level_index=0; level_index < coeff_count[i]; level_index++){
4161  int distortion;
4162  int level= coeff[level_index][i];
4163  const int alevel= FFABS(level);
4164  int unquant_coeff;
4165 
4166  av_assert2(level);
4167 
4168  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4169  unquant_coeff= alevel*qmul + qadd;
4170  } else if(s->out_format == FMT_MJPEG) {
4171  j = s->idsp.idct_permutation[scantable[i]];
4172  unquant_coeff = alevel * matrix[j] * 8;
4173  }else{ // MPEG-1
4174  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4175  if(s->mb_intra){
4176  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4177  unquant_coeff = (unquant_coeff - 1) | 1;
4178  }else{
4179  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4180  unquant_coeff = (unquant_coeff - 1) | 1;
4181  }
4182  unquant_coeff<<= 3;
4183  }
4184 
4185  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4186  level+=64;
4187  if((level&(~127)) == 0){
4188  for(j=survivor_count-1; j>=0; j--){
4189  int run= i - survivor[j];
4190  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4191  score += score_tab[i-run];
4192 
4193  if(score < best_score){
4194  best_score= score;
4195  run_tab[i+1]= run;
4196  level_tab[i+1]= level-64;
4197  }
4198  }
4199 
4200  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4201  for(j=survivor_count-1; j>=0; j--){
4202  int run= i - survivor[j];
4203  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4204  score += score_tab[i-run];
4205  if(score < last_score){
4206  last_score= score;
4207  last_run= run;
4208  last_level= level-64;
4209  last_i= i+1;
4210  }
4211  }
4212  }
4213  }else{
4214  distortion += esc_length*lambda;
4215  for(j=survivor_count-1; j>=0; j--){
4216  int run= i - survivor[j];
4217  int score= distortion + score_tab[i-run];
4218 
4219  if(score < best_score){
4220  best_score= score;
4221  run_tab[i+1]= run;
4222  level_tab[i+1]= level-64;
4223  }
4224  }
4225 
4226  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4227  for(j=survivor_count-1; j>=0; j--){
4228  int run= i - survivor[j];
4229  int score= distortion + score_tab[i-run];
4230  if(score < last_score){
4231  last_score= score;
4232  last_run= run;
4233  last_level= level-64;
4234  last_i= i+1;
4235  }
4236  }
4237  }
4238  }
4239  }
4240 
4241  score_tab[i+1]= best_score;
4242 
4243  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4244  if(last_non_zero <= 27){
4245  for(; survivor_count; survivor_count--){
4246  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4247  break;
4248  }
4249  }else{
4250  for(; survivor_count; survivor_count--){
4251  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4252  break;
4253  }
4254  }
4255 
4256  survivor[ survivor_count++ ]= i+1;
4257  }
4258 
4259  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4260  last_score= 256*256*256*120;
4261  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4262  int score= score_tab[i];
4263  if (i)
4264  score += lambda * 2; // FIXME more exact?
4265 
4266  if(score < last_score){
4267  last_score= score;
4268  last_i= i;
4269  last_level= level_tab[i];
4270  last_run= run_tab[i];
4271  }
4272  }
4273  }
4274 
4275  s->coded_score[n] = last_score;
4276 
4277  dc= FFABS(block[0]);
4278  last_non_zero= last_i - 1;
4279  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4280 
4281  if(last_non_zero < start_i)
4282  return last_non_zero;
4283 
4284  if(last_non_zero == 0 && start_i == 0){
4285  int best_level= 0;
4286  int best_score= dc * dc;
4287 
4288  for(i=0; i<coeff_count[0]; i++){
4289  int level= coeff[i][0];
4290  int alevel= FFABS(level);
4291  int unquant_coeff, score, distortion;
4292 
4293  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4294  unquant_coeff= (alevel*qmul + qadd)>>3;
4295  } else{ // MPEG-1
4296  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4297  unquant_coeff = (unquant_coeff - 1) | 1;
4298  }
4299  unquant_coeff = (unquant_coeff + 4) >> 3;
4300  unquant_coeff<<= 3 + 3;
4301 
4302  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4303  level+=64;
4304  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4305  else score= distortion + esc_length*lambda;
4306 
4307  if(score < best_score){
4308  best_score= score;
4309  best_level= level - 64;
4310  }
4311  }
4312  block[0]= best_level;
4313  s->coded_score[n] = best_score - dc*dc;
4314  if(best_level == 0) return -1;
4315  else return last_non_zero;
4316  }
4317 
4318  i= last_i;
4319  av_assert2(last_level);
4320 
4321  block[ perm_scantable[last_non_zero] ]= last_level;
4322  i -= last_run + 1;
4323 
4324  for(; i>start_i; i -= run_tab[i] + 1){
4325  block[ perm_scantable[i-1] ]= level_tab[i];
4326  }
4327 
4328  return last_non_zero;
4329 }
4330 
4331 static int16_t basis[64][64];
4332 
4333 static void build_basis(uint8_t *perm){
4334  int i, j, x, y;
4335  emms_c();
4336  for(i=0; i<8; i++){
4337  for(j=0; j<8; j++){
4338  for(y=0; y<8; y++){
4339  for(x=0; x<8; x++){
4340  double s= 0.25*(1<<BASIS_SHIFT);
4341  int index= 8*i + j;
4342  int perm_index= perm[index];
4343  if(i==0) s*= sqrt(0.5);
4344  if(j==0) s*= sqrt(0.5);
4345  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4346  }
4347  }
4348  }
4349  }
4350 }
4351 
4352 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4353  int16_t *block, int16_t *weight, int16_t *orig,
4354  int n, int qscale){
4355  int16_t rem[64];
4356  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4357  const uint8_t *scantable;
4358  const uint8_t *perm_scantable;
4359 // unsigned int threshold1, threshold2;
4360 // int bias=0;
4361  int run_tab[65];
4362  int prev_run=0;
4363  int prev_level=0;
4364  int qmul, qadd, start_i, last_non_zero, i, dc;
4365  uint8_t * length;
4366  uint8_t * last_length;
4367  int lambda;
4368  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4369 
4370  if(basis[0][0] == 0)
4371  build_basis(s->idsp.idct_permutation);
4372 
4373  qmul= qscale*2;
4374  qadd= (qscale-1)|1;
4375  if (s->mb_intra) {
4376  scantable= s->intra_scantable.scantable;
4377  perm_scantable= s->intra_scantable.permutated;
4378  if (!s->h263_aic) {
4379  if (n < 4)
4380  q = s->y_dc_scale;
4381  else
4382  q = s->c_dc_scale;
4383  } else{
4384  /* For AIC we skip quant/dequant of INTRADC */
4385  q = 1;
4386  qadd=0;
4387  }
4388  q <<= RECON_SHIFT-3;
4389  /* note: block[0] is assumed to be positive */
4390  dc= block[0]*q;
4391 // block[0] = (block[0] + (q >> 1)) / q;
4392  start_i = 1;
4393 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4394 // bias= 1<<(QMAT_SHIFT-1);
4395  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4396  length = s->intra_chroma_ac_vlc_length;
4397  last_length= s->intra_chroma_ac_vlc_last_length;
4398  } else {
4399  length = s->intra_ac_vlc_length;
4400  last_length= s->intra_ac_vlc_last_length;
4401  }
4402  } else {
4403  scantable= s->inter_scantable.scantable;
4404  perm_scantable= s->inter_scantable.permutated;
4405  dc= 0;
4406  start_i = 0;
4407  length = s->inter_ac_vlc_length;
4408  last_length= s->inter_ac_vlc_last_length;
4409  }
4410  last_non_zero = s->block_last_index[n];
4411 
4412  dc += (1<<(RECON_SHIFT-1));
4413  for(i=0; i<64; i++){
4414  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4415  }
4416 
4417  sum=0;
4418  for(i=0; i<64; i++){
4419  int one= 36;
4420  int qns=4;
4421  int w;
4422 
4423  w= FFABS(weight[i]) + qns*one;
4424  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4425 
4426  weight[i] = w;
4427 // w=weight[i] = (63*qns + (w/2)) / w;
4428 
4429  av_assert2(w>0);
4430  av_assert2(w<(1<<6));
4431  sum += w*w;
4432  }
4433  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4434 
4435  run=0;
4436  rle_index=0;
4437  for(i=start_i; i<=last_non_zero; i++){
4438  int j= perm_scantable[i];
4439  const int level= block[j];
4440  int coeff;
4441 
4442  if(level){
4443  if(level<0) coeff= qmul*level - qadd;
4444  else coeff= qmul*level + qadd;
4445  run_tab[rle_index++]=run;
4446  run=0;
4447 
4448  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4449  }else{
4450  run++;
4451  }
4452  }
4453 
4454  for(;;){
4455  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4456  int best_coeff=0;
4457  int best_change=0;
4458  int run2, best_unquant_change=0, analyze_gradient;
4459  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4460 
4461  if(analyze_gradient){
4462  for(i=0; i<64; i++){
4463  int w= weight[i];
4464 
4465  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4466  }
4467  s->fdsp.fdct(d1);
4468  }
4469 
4470  if(start_i){
4471  const int level= block[0];
4472  int change, old_coeff;
4473 
4474  av_assert2(s->mb_intra);
4475 
4476  old_coeff= q*level;
4477 
4478  for(change=-1; change<=1; change+=2){
4479  int new_level= level + change;
4480  int score, new_coeff;
4481 
4482  new_coeff= q*new_level;
4483  if(new_coeff >= 2048 || new_coeff < 0)
4484  continue;
4485 
4486  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4487  new_coeff - old_coeff);
4488  if(score<best_score){
4489  best_score= score;
4490  best_coeff= 0;
4491  best_change= change;
4492  best_unquant_change= new_coeff - old_coeff;
4493  }
4494  }
4495  }
4496 
4497  run=0;
4498  rle_index=0;
4499  run2= run_tab[rle_index++];
4500  prev_level=0;
4501  prev_run=0;
4502 
4503  for(i=start_i; i<64; i++){
4504  int j= perm_scantable[i];
4505  const int level= block[j];
4506  int change, old_coeff;
4507 
4508  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4509  break;
4510 
4511  if(level){
4512  if(level<0) old_coeff= qmul*level - qadd;
4513  else old_coeff= qmul*level + qadd;
4514  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4515  }else{
4516  old_coeff=0;
4517  run2--;
4518  av_assert2(run2>=0 || i >= last_non_zero );
4519  }
4520 
4521  for(change=-1; change<=1; change+=2){
4522  int new_level= level + change;
4523  int score, new_coeff, unquant_change;
4524 
4525  score=0;
4526  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4527  continue;
4528 
4529  if(new_level){
4530  if(new_level<0) new_coeff= qmul*new_level - qadd;
4531  else new_coeff= qmul*new_level + qadd;
4532  if(new_coeff >= 2048 || new_coeff <= -2048)
4533  continue;
4534  //FIXME check for overflow
4535 
4536  if(level){
4537  if(level < 63 && level > -63){
4538  if(i < last_non_zero)
4539  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4540  - length[UNI_AC_ENC_INDEX(run, level+64)];
4541  else
4542  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4543  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4544  }
4545  }else{
4546  av_assert2(FFABS(new_level)==1);
4547 
4548  if(analyze_gradient){
4549  int g= d1[ scantable[i] ];
4550  if(g && (g^new_level) >= 0)
4551  continue;
4552  }
4553 
4554  if(i < last_non_zero){
4555  int next_i= i + run2 + 1;
4556  int next_level= block[ perm_scantable[next_i] ] + 64;
4557 
4558  if(next_level&(~127))
4559  next_level= 0;
4560 
4561  if(next_i < last_non_zero)
4562  score += length[UNI_AC_ENC_INDEX(run, 65)]
4563  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4564  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4565  else
4566  score += length[UNI_AC_ENC_INDEX(run, 65)]
4567  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4568  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4569  }else{
4570  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4571  if(prev_level){
4572  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4573  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4574  }
4575  }
4576  }
4577  }else{
4578  new_coeff=0;
4579  av_assert2(FFABS(level)==1);
4580 
4581  if(i < last_non_zero){
4582  int next_i= i + run2 + 1;
4583  int next_level= block[ perm_scantable[next_i] ] + 64;
4584 
4585  if(next_level&(~127))
4586  next_level= 0;
4587 
4588  if(next_i < last_non_zero)
4589  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4590  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4591  - length[UNI_AC_ENC_INDEX(run, 65)];
4592  else
4593  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4594  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4595  - length[UNI_AC_ENC_INDEX(run, 65)];
4596  }else{
4597  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4598  if(prev_level){
4599  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4600  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4601  }
4602  }
4603  }
4604 
4605  score *= lambda;
4606 
4607  unquant_change= new_coeff - old_coeff;
4608  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4609 
4610  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4611  unquant_change);
4612  if(score<best_score){
4613  best_score= score;
4614  best_coeff= i;
4615  best_change= change;
4616  best_unquant_change= unquant_change;
4617  }
4618  }
4619  if(level){
4620  prev_level= level + 64;
4621  if(prev_level&(~127))
4622  prev_level= 0;
4623  prev_run= run;
4624  run=0;
4625  }else{
4626  run++;
4627  }
4628  }
4629 
4630  if(best_change){
4631  int j= perm_scantable[ best_coeff ];
4632 
4633  block[j] += best_change;
4634 
4635  if(best_coeff > last_non_zero){
4636  last_non_zero= best_coeff;
4637  av_assert2(block[j]);
4638  }else{
4639  for(; last_non_zero>=start_i; last_non_zero--){
4640  if(block[perm_scantable[last_non_zero]])
4641  break;
4642  }
4643  }
4644 
4645  run=0;
4646  rle_index=0;
4647  for(i=start_i; i<=last_non_zero; i++){
4648  int j= perm_scantable[i];
4649  const int level= block[j];
4650 
4651  if(level){
4652  run_tab[rle_index++]=run;
4653  run=0;
4654  }else{
4655  run++;
4656  }
4657  }
4658 
4659  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4660  }else{
4661  break;
4662  }
4663  }
4664 
4665  return last_non_zero;
4666 }
4667 
4668 /**
4669  * Permute an 8x8 block according to permutation.
4670  * @param block the block which will be permuted according to
4671  * the given permutation vector
4672  * @param permutation the permutation vector
4673  * @param last the last non zero coefficient in scantable order, used to
4674  * speed the permutation up
4675  * @param scantable the used scantable, this is only used to speed the
4676  * permutation up, the block is not (inverse) permutated
4677  * to scantable order!
4678  */
4679 void ff_block_permute(int16_t *block, uint8_t *permutation,
4680  const uint8_t *scantable, int last)
4681 {
4682  int i;
4683  int16_t temp[64];
4684 
4685  if (last <= 0)
4686  return;
4687  //FIXME it is ok but not clean and might fail for some permutations
4688  // if (permutation[1] == 1)
4689  // return;
4690 
4691  for (i = 0; i <= last; i++) {
4692  const int j = scantable[i];
4693  temp[j] = block[j];
4694  block[j] = 0;
4695  }
4696 
4697  for (i = 0; i <= last; i++) {
4698  const int j = scantable[i];
4699  const int perm_j = permutation[j];
4700  block[perm_j] = temp[j];
4701  }
4702 }
4703 
4705  int16_t *block, int n,
4706  int qscale, int *overflow)
4707 {
4708  int i, j, level, last_non_zero, q, start_i;
4709  const int *qmat;
4710  const uint8_t *scantable;
4711  int bias;
4712  int max=0;
4713  unsigned int threshold1, threshold2;
4714 
4715  s->fdsp.fdct(block);
4716 
4717  if(s->dct_error_sum)
4718  s->denoise_dct(s, block);
4719 
4720  if (s->mb_intra) {
4721  scantable= s->intra_scantable.scantable;
4722  if (!s->h263_aic) {
4723  if (n < 4)
4724  q = s->y_dc_scale;
4725  else
4726  q = s->c_dc_scale;
4727  q = q << 3;
4728  } else
4729  /* For AIC we skip quant/dequant of INTRADC */
4730  q = 1 << 3;
4731 
4732  /* note: block[0] is assumed to be positive */
4733  block[0] = (block[0] + (q >> 1)) / q;
4734  start_i = 1;
4735  last_non_zero = 0;
4736  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4737  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4738  } else {
4739  scantable= s->inter_scantable.scantable;
4740  start_i = 0;
4741  last_non_zero = -1;
4742  qmat = s->q_inter_matrix[qscale];
4743  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4744  }
4745  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4746  threshold2= (threshold1<<1);
4747  for(i=63;i>=start_i;i--) {
4748  j = scantable[i];
4749  level = block[j] * qmat[j];
4750 
4751  if(((unsigned)(level+threshold1))>threshold2){
4752  last_non_zero = i;
4753  break;
4754  }else{
4755  block[j]=0;
4756  }
4757  }
4758  for(i=start_i; i<=last_non_zero; i++) {
4759  j = scantable[i];
4760  level = block[j] * qmat[j];
4761 
4762 // if( bias+level >= (1<<QMAT_SHIFT)
4763 // || bias-level >= (1<<QMAT_SHIFT)){
4764  if(((unsigned)(level+threshold1))>threshold2){
4765  if(level>0){
4766  level= (bias + level)>>QMAT_SHIFT;
4767  block[j]= level;
4768  }else{
4769  level= (bias - level)>>QMAT_SHIFT;
4770  block[j]= -level;
4771  }
4772  max |=level;
4773  }else{
4774  block[j]=0;
4775  }
4776  }
4777  *overflow= s->max_qcoeff < max; //overflow might have happened
4778 
4779  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4780  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4781  ff_block_permute(block, s->idsp.idct_permutation,
4782  scantable, last_non_zero);
4783 
4784  return last_non_zero;
4785 }
4786 
4787 #define OFFSET(x) offsetof(MpegEncContext, x)
4788 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4789 static const AVOption h263_options[] = {
4790  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4791  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4793  { NULL },
4794 };
4795 
4796 static const AVClass h263_class = {
4797  .class_name = "H.263 encoder",
4798  .item_name = av_default_item_name,
4799  .option = h263_options,
4800  .version = LIBAVUTIL_VERSION_INT,
4801 };
4802 
4804  .name = "h263",
4805  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4806  .type = AVMEDIA_TYPE_VIDEO,
4807  .id = AV_CODEC_ID_H263,
4808  .priv_data_size = sizeof(MpegEncContext),
4810  .encode2 = ff_mpv_encode_picture,
4811  .close = ff_mpv_encode_end,
4812  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4814  .priv_class = &h263_class,
4815 };
4816 
4817 static const AVOption h263p_options[] = {
4818  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4819  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4820  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4821  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4823  { NULL },
4824 };
4825 static const AVClass h263p_class = {
4826  .class_name = "H.263p encoder",
4827  .item_name = av_default_item_name,
4828  .option = h263p_options,
4829  .version = LIBAVUTIL_VERSION_INT,
4830 };
4831 
4833  .name = "h263p",
4834  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4835  .type = AVMEDIA_TYPE_VIDEO,
4836  .id = AV_CODEC_ID_H263P,
4837  .priv_data_size = sizeof(MpegEncContext),
4839  .encode2 = ff_mpv_encode_picture,
4840  .close = ff_mpv_encode_end,
4841  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4842  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4844  .priv_class = &h263p_class,
4845 };
4846 
4847 static const AVClass msmpeg4v2_class = {
4848  .class_name = "msmpeg4v2 encoder",
4849  .item_name = av_default_item_name,
4850  .option = ff_mpv_generic_options,
4851  .version = LIBAVUTIL_VERSION_INT,
4852 };
4853 
4855  .name = "msmpeg4v2",
4856  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4857  .type = AVMEDIA_TYPE_VIDEO,
4858  .id = AV_CODEC_ID_MSMPEG4V2,
4859  .priv_data_size = sizeof(MpegEncContext),
4861  .encode2 = ff_mpv_encode_picture,
4862  .close = ff_mpv_encode_end,
4863  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4865  .priv_class = &msmpeg4v2_class,
4866 };
4867 
4868 static const AVClass msmpeg4v3_class = {
4869  .class_name = "msmpeg4v3 encoder",
4870  .item_name = av_default_item_name,
4871  .option = ff_mpv_generic_options,
4872  .version = LIBAVUTIL_VERSION_INT,
4873 };
4874 
4876  .name = "msmpeg4",
4877  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4878  .type = AVMEDIA_TYPE_VIDEO,
4879  .id = AV_CODEC_ID_MSMPEG4V3,
4880  .priv_data_size = sizeof(MpegEncContext),
4882  .encode2 = ff_mpv_encode_picture,
4883  .close = ff_mpv_encode_end,
4884  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4886  .priv_class = &msmpeg4v3_class,
4887 };
4888 
4889 static const AVClass wmv1_class = {
4890  .class_name = "wmv1 encoder",
4891  .item_name = av_default_item_name,
4892  .option = ff_mpv_generic_options,
4893  .version = LIBAVUTIL_VERSION_INT,
4894 };
4895 
4897  .name = "wmv1",
4898  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4899  .type = AVMEDIA_TYPE_VIDEO,
4900  .id = AV_CODEC_ID_WMV1,
4901  .priv_data_size = sizeof(MpegEncContext),
4903  .encode2 = ff_mpv_encode_picture,
4904  .close = ff_mpv_encode_end,
4905  .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
4907  .priv_class = &wmv1_class,
4908 };
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
AAN (Arai, Agui and Nakajima) (I)DCT tables.
#define av_always_inline
Definition: attributes.h:45
#define av_cold
Definition: attributes.h:88
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
uint8_t
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
Libavcodec external API header.
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1609
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1610
#define FF_CMP_DCTMAX
Definition: avcodec.h:954
#define FF_CMP_VSSE
Definition: avcodec.h:950
#define FF_CMP_NSSE
Definition: avcodec.h:951
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1027
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1608
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1026
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1789
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1025
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:584
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:820
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, buffer_size_t size)
Definition: avpacket.c:343
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:31
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:69
#define s(width, name)
Definition: cbs_vp9.c:257
#define fail()
Definition: checkasm.h:133
#define FFMAX3(a, b, c)
Definition: common.h:104
#define FFMIN(a, b)
Definition: common.h:105
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
#define av_clip
Definition: common.h:122
#define ROUNDED_DIV(a, b)
Definition: common.h:56
#define FFMAX(a, b)
Definition: common.h:103
#define av_clip_uint8
Definition: common.h:128
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define CONFIG_H261_ENCODER
Definition: config.h:1323
#define CONFIG_WMV2_ENCODER
Definition: config.h:1374
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1334
#define CONFIG_RV20_ENCODER
Definition: config.h:1357
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1333
#define CONFIG_SPEEDHQ_ENCODER
Definition: config.h:1361
#define ARCH_X86
Definition: config.h:39
#define CONFIG_FLV_ENCODER
Definition: config.h:1321
#define CONFIG_FAANDCT
Definition: config.h:638
#define CONFIG_RV10_ENCODER
Definition: config.h:1356
#define CONFIG_H263P_ENCODER
Definition: config.h:1325
#define CONFIG_H263_ENCODER
Definition: config.h:1324
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1332
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1335
#define NULL
Definition: coverity.c:32
long long int64_t
Definition: coverity.c:34
#define max(a, b)
Definition: cuda_runtime.h:33
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
void ff_jpeg_fdct_islow_10(int16_t *data)
void ff_jpeg_fdct_islow_8(int16_t *data)
static AVFrame * frame
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:33
perm
Definition: f_perms.c:74
void ff_faandct(int16_t *data)
Definition: faandct.c:114
Floating point AAN DCT.
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
int
@ AV_OPT_TYPE_INT
Definition: opt.h:225
@ AV_OPT_TYPE_BOOL
Definition: opt.h:242
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: avcodec.c:144
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:112
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:941
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:173
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer.
Definition: options.c:188
@ AV_CODEC_ID_MSMPEG4V1
Definition: codec_id.h:63
@ AV_CODEC_ID_H261
Definition: codec_id.h:52
@ AV_CODEC_ID_FLV1
Definition: codec_id.h:70
@ AV_CODEC_ID_WMV2
Definition: codec_id.h:67
@ AV_CODEC_ID_MSMPEG4V2
Definition: codec_id.h:64
@ AV_CODEC_ID_WMV1
Definition: codec_id.h:66
@ AV_CODEC_ID_RV10
Definition: codec_id.h:54
@ AV_CODEC_ID_SPEEDHQ
Definition: codec_id.h:274
@ AV_CODEC_ID_RV20
Definition: codec_id.h:55
@ AV_CODEC_ID_H263
Definition: codec_id.h:53
@ AV_CODEC_ID_MPEG4
Definition: codec_id.h:61
@ AV_CODEC_ID_MJPEG
Definition: codec_id.h:56
@ AV_CODEC_ID_MPEG1VIDEO
Definition: codec_id.h:50
@ AV_CODEC_ID_H263P
Definition: codec_id.h:68
@ AV_CODEC_ID_MSMPEG4V3
Definition: codec_id.h:65
@ AV_CODEC_ID_MPEG2VIDEO
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
@ AV_CODEC_ID_AMV
Definition: codec_id.h:156
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:395
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding.
Definition: avcodec.h:215
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:364
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:50
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:1053
void av_packet_free(AVPacket **pkt)
Free the packet, if the packet is reference counted, it will be unreferenced first.
Definition: avpacket.c:75
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:634
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:410
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:309
AVPacket * av_packet_alloc(void)
Allocate an AVPacket and set its fields to default values.
Definition: avpacket.c:64
@ AV_PKT_DATA_H263_MB_INFO
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
@ AV_PKT_DATA_CPB_PROPERTIES
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AVERROR(e)
Definition: error.h:43
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:553
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:337
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:443
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:658
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:215
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:200
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:210
#define AV_LOG_INFO
Standard information.
Definition: log.h:205
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:194
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:237
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
@ AV_PICTURE_TYPE_I
Intra.
Definition: avutil.h:274
@ AV_PICTURE_TYPE_P
Predicted.
Definition: avutil.h:275
@ AV_PICTURE_TYPE_S
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
@ AV_PICTURE_TYPE_B
Bi-dir predicted.
Definition: avutil.h:276
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
int index
Definition: gxfenc.c:89
H.261 codec.
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:373
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:238
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:54
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:109
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:41
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:54
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:147
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:319
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2....
Definition: ituh263enc.c:266
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:757
#define H263_GOB_HEIGHT(h)
Definition: h263.h:43
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:260
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:236
H.263 tables.
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
for(j=16;j >0;--j)
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
cl_device_type type
@ FF_IDCT_PERM_NONE
Definition: idctdsp.h:38
int i
Definition: input.c:407
#define av_log2
Definition: intmath.h:83
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:218
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1561
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:1067
#define STRIDE_ALIGN
Definition: internal.h:118
#define FF_CODEC_CAP_INIT_CLEANUP
The codec allows calling the close function for deallocation even if the init function returned a fai...
Definition: internal.h:49
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:906
const char * arg
Definition: jacosubdec.c:66
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
FLV common header.
common internal API header
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:117
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:83
#define PTRDIFF_SPECIFIER
Definition: internal.h:192
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
#define emms_c()
Definition: internal.h:54
#define FF_ALLOCZ_TYPED_ARRAY(p, nelem)
Definition: internal.h:103
#define AVOnce
Definition: thread.h:172
static int ff_thread_once(char *control, void(*routine)(void))
Definition: thread.h:175
#define AV_ONCE_INIT
Definition: thread.h:173
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:309
#define lrintf(x)
Definition: libm_mips.h:70
const char * desc
Definition: libsvtav1.c:79
uint8_t w
Definition: llviddspenc.c:39
int stride
Definition: mace.c:144
#define FFALIGN(x, a)
Definition: macros.h:48
#define M_PI
Definition: mathematics.h:52
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
#define ff_sqrt
Definition: mathops.h:206
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:34
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:475
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1015
#define LOCAL_ALIGNED_16(t, v,...)
Definition: mem_internal.h:130
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:472
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:258
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
Definition: mjpegenc.c:184
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:313
MJPEG encoder.
@ HUFFMAN_TABLE_OPTIMAL
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
static const uint8_t mv_bits[2][16][10]
Definition: mobiclip.c:162
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
#define MAX_MV
Definition: motion_est.h:35
#define MAX_DMV
Definition: motion_est.h:37
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:114
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg1_encode_init(MpegEncContext *s)
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:335
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:346
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
void ff_mpeg4_merge_partitions(MpegEncContext *s)
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
void ff_set_mpeg4_time(MpegEncContext *s)
void ff_mpeg4_init_partitions(MpegEncContext *s)
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:232
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:295
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:440
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:355
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:454
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
#define EDGE_WIDTH
Definition: mpegpicture.h:33
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
#define MAX_FCODE
Definition: mpegutils.h:48
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
#define MAX_MB_BYTES
Definition: mpegutils.h:47
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
#define INPLACE_OFFSET
Definition: mpegutils.h:121
@ FMT_H261
Definition: mpegutils.h:125
@ FMT_MPEG1
Definition: mpegutils.h:124
@ FMT_SPEEDHQ
Definition: mpegutils.h:128
@ FMT_H263
Definition: mpegutils.h:126
@ FMT_MJPEG
Definition: mpegutils.h:127
#define PICT_FRAME
Definition: mpegutils.h:39
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding).
Definition: mpegvideo.c:676
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1111
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:913
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:499
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:331
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2248
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2331
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2267
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1904
mpegvideo header.
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:596
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:214
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:765
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:595
#define CHROMA_420
Definition: mpegvideo.h:488
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
#define CHROMA_444
Definition: mpegvideo.h:490
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:629
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4)
Definition: mpegvideo.h:264
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:593
#define CHROMA_422
Definition: mpegvideo.h:489
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:750
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:594
static int estimate_qp(MpegEncContext *s, int dry_run)
static void update_noise_reduction(MpegEncContext *s)
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:75
AVCodec ff_wmv1_encoder
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
AVCodec ff_msmpeg4v2_encoder
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:92
static void set_frame_distances(MpegEncContext *s)
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:87
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define MERGE(field)
AVCodec ff_h263_encoder
static const AVClass h263p_class
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:84
static int estimate_best_b_count(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
static int estimate_motion_thread(AVCodecContext *c, void *arg)
#define VE
static int select_input_picture(MpegEncContext *s)
static const AVOption h263p_options[]
static const AVClass wmv1_class
static const AVClass msmpeg4v3_class
static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
static int get_sae(uint8_t *src, int ref, int stride)
AVCodec ff_msmpeg4v3_encoder
static const AVClass h263_class
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:85
AVCodec ff_h263p_encoder
static void build_basis(uint8_t *perm)
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:73
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
static void frame_end(MpegEncContext *s)
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
static void mpv_encode_init_static(void)
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
static int sse_mb(MpegEncContext *s)
static int frame_start(MpegEncContext *s)
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
static int16_t basis[64][64]
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
static int encode_picture(MpegEncContext *s, int picture_number)
static const AVOption h263_options[]
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
static void write_mb_info(MpegEncContext *s)
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
static const AVClass msmpeg4v2_class
#define OFFSET(x)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
static void update_qscale(MpegEncContext *s)
#define COPY(a)
static int encode_thread(AVCodecContext *c, void *arg)
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
static void write_slice_end(MpegEncContext *s)
static void update_mb_info(MpegEncContext *s, int startcode)
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
av_cold int ff_dct_encode_init(MpegEncContext *s)
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:76
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
#define BASIS_SHIFT
#define EDGE_BOTTOM
#define RECON_SHIFT
#define EDGE_TOP
void ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:116
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:217
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:277
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:369
const char data[16]
Definition: mxf.c:142
AVOptions.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2601
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2573
@ AVCOL_RANGE_JPEG
Full range content.
Definition: pixfmt.h:586
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:57
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:88
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:376
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:342
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:76
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:110
static const int BUF_BITS
Definition: put_bits.h:42
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
quarterpel DSP functions
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
#define MAX_LEVEL
Definition: rl.h:36
#define MAX_RUN
Definition: rl.h:35
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
#define FF_ARRAY_ELEMS(a)
static int shift(int a, int b)
Definition: sonic.c:82
static const uint8_t sp5x_qscale_five_quant_table[][64]
Definition: sp5x.h:135
void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: speedhqenc.c:245
void ff_speedhq_end_slice(MpegEncContext *s)
Definition: speedhqenc.c:150
av_cold int ff_speedhq_encode_init(MpegEncContext *s)
Definition: speedhqenc.c:102
int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice)
Definition: speedhqenc.c:273
void ff_speedhq_encode_picture_header(MpegEncContext *s)
Definition: speedhqenc.c:140
SpeedHQ encoder.
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:453
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:477
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:495
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:468
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:486
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:459
Describe the class of an AVClass context structure.
Definition: log.h:67
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
main external API structure.
Definition: avcodec.h:536
attribute_deprecated int brd_scale
Definition: avcodec.h:1109
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
Definition: avcodec.h:1434
int trellis
trellis RD quantization
Definition: avcodec.h:1491
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:746
attribute_deprecated int pre_me
Definition: avcodec.h:976
int width
picture width / height.
Definition: avcodec.h:709
attribute_deprecated int i_count
Definition: avcodec.h:1543
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1561
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1405
attribute_deprecated int header_bits
Definition: avcodec.h:1537
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1050
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1171
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1605
attribute_deprecated int frame_bits
Definition: avcodec.h:1553
attribute_deprecated int mv_bits
Definition: avcodec.h:1535
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:796
int qmin
minimum quantizer
Definition: avcodec.h:1384
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1142
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel.
Definition: avcodec.h:915
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1045
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1796
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:594
int mb_decision
macroblock decision mode
Definition: avcodec.h:1024
attribute_deprecated int misc_bits
Definition: avcodec.h:1549
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:826
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1471
int64_t bit_rate
the average bitrate
Definition: avcodec.h:586
const struct AVCodec * codec
Definition: avcodec.h:545
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1483
attribute_deprecated int p_count
Definition: avcodec.h:1545
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1751
attribute_deprecated int mpeg_quant
Definition: avcodec.h:831
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:862
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:876
int delay
Codec delay.
Definition: avcodec.h:692
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:883
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:731
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1524
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:940
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1420
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1097
int thread_count
thread count is used to decide how many independent tasks should be passed to execute()
Definition: avcodec.h:1777
int qmax
maximum quantizer
Definition: avcodec.h:1391
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions,...
Definition: avcodec.h:1036
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented.
Definition: avcodec.h:659
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1539
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:616
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1479
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:637
attribute_deprecated int prediction_method
Definition: avcodec.h:895
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1427
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1062
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1703
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1541
attribute_deprecated int noise_reduction
Definition: avcodec.h:1054
attribute_deprecated int skip_count
Definition: avcodec.h:1547
enum AVCodecID codec_id
Definition: avcodec.h:546
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:810
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:855
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1475
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2033
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:571
void * priv_data
Definition: avcodec.h:563
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:869
int slices
Number of slices.
Definition: avcodec.h:1187
unsigned int byte_buffer_size
Definition: internal.h:166
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:165
AVCodec.
Definition: codec.h:197
enum AVCodecID id
Definition: codec.h:211
const char * name
Name of the codec implementation.
Definition: codec.h:204
This structure describes decoded (raw) audio or video data.
Definition: frame.h:318
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:411
int display_picture_number
picture number in display order
Definition: frame.h:436
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:332
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:509
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:349
AVOption.
Definition: opt.h:248
This structure stores compressed data.
Definition: packet.h:346
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:375
int size
Definition: packet.h:370
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:362
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed.
Definition: packet.h:368
uint8_t * data
Definition: packet.h:369
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
int num
Numerator.
Definition: rational.h:59
int den
Denominator.
Definition: rational.h:60
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
MpegEncContext.
Definition: mpegvideo.h:81
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:407
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:513
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:410
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:406
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int interlaced_dct
Definition: mpegvideo.h:496
int noise_reduction
Definition: mpegvideo.h:587
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:411
int qscale
QP.
Definition: mpegvideo.h:204
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
int esc3_level_length
Definition: mpegvideo.h:442
PutBitContext pb
bit output
Definition: mpegvideo.h:151
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
Picture.
Definition: mpegpicture.h:45
int reference
Definition: mpegpicture.h:88
int shared
Definition: mpegpicture.h:89
struct AVFrame * f
Definition: mpegpicture.h:46
uint8_t * buf
Definition: put_bits.h:47
uint8_t * buf_end
Definition: put_bits.h:47
rate control context.
Definition: ratecontrol.h:63
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
uint8_t run
Definition: svq3.c:205
uint8_t level
Definition: svq3.c:206
#define av_free(p)
#define ff_dlog(a,...)
#define av_freep(p)
#define av_log(a,...)
#define src1
Definition: h264pred.c:140
#define src
Definition: vp8dsp.c:255
static int16_t block[64]
Definition: dct.c:116
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
AVPacket * pkt
Definition: movenc.c:59
int out_size
Definition: movenc.c:55
#define height
#define width
static int64_t pts
int size
static const struct twinvq_data tab
#define me
const char * b
Definition: vf_curves.c:118
const char * g
Definition: vf_curves.c:117
const char * r
Definition: vf_curves.c:116
else temp
Definition: vf_mcdeint.c:259
if(ret< 0)
Definition: vf_mcdeint.c:282
static float mean(const float *input, int size)
Definition: vf_nnedi.c:864
static const double coeff[2][5]
Definition: vf_owdenoise.c:73
static av_always_inline int diff(const uint32_t a, const uint32_t b)
static const uint8_t offset[127][2]
Definition: vf_spp.c:107
uint8_t bits
Definition: vp3data.h:141
static double c[64]
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int acc
Definition: yuv2rgb.c:555