FFmpeg  4.3.8
mpegvideo_enc.c
Go to the documentation of this file.
1 /*
2  * The simplest mpeg encoder (well, it was the simplest!)
3  * Copyright (c) 2000,2001 Fabrice Bellard
4  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
5  *
6  * 4MV & hq & B-frame encoding stuff by Michael Niedermayer <michaelni@gmx.at>
7  *
8  * This file is part of FFmpeg.
9  *
10  * FFmpeg is free software; you can redistribute it and/or
11  * modify it under the terms of the GNU Lesser General Public
12  * License as published by the Free Software Foundation; either
13  * version 2.1 of the License, or (at your option) any later version.
14  *
15  * FFmpeg is distributed in the hope that it will be useful,
16  * but WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18  * Lesser General Public License for more details.
19  *
20  * You should have received a copy of the GNU Lesser General Public
21  * License along with FFmpeg; if not, write to the Free Software
22  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
23  */
24 
25 /*
26  * non linear quantizers with large QPs and VBV with restrictive qmin fixes sponsored by NOA GmbH
27  */
28 
29 /**
30  * @file
31  * The simplest mpeg encoder (well, it was the simplest!).
32  */
33 
34 #include <stdint.h>
35 
36 #include "libavutil/internal.h"
37 #include "libavutil/intmath.h"
38 #include "libavutil/mathematics.h"
39 #include "libavutil/pixdesc.h"
40 #include "libavutil/opt.h"
41 #include "avcodec.h"
42 #include "dct.h"
43 #include "idctdsp.h"
44 #include "mpeg12.h"
45 #include "mpegvideo.h"
46 #include "mpegvideodata.h"
47 #include "h261.h"
48 #include "h263.h"
49 #include "h263data.h"
50 #include "mjpegenc_common.h"
51 #include "mathops.h"
52 #include "mpegutils.h"
53 #include "mjpegenc.h"
54 #include "msmpeg4.h"
55 #include "pixblockdsp.h"
56 #include "qpeldsp.h"
57 #include "faandct.h"
58 #include "thread.h"
59 #include "aandcttab.h"
60 #include "flv.h"
61 #include "mpeg4video.h"
62 #include "internal.h"
63 #include "bytestream.h"
64 #include "wmv2.h"
65 #include "rv10.h"
66 #include "packet_internal.h"
67 #include "libxvid.h"
68 #include <limits.h>
69 #include "sp5x.h"
70 
71 #define QUANT_BIAS_SHIFT 8
72 
73 #define QMAT_SHIFT_MMX 16
74 #define QMAT_SHIFT 21
75 
77 static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
78 static int sse_mb(MpegEncContext *s);
79 static void denoise_dct_c(MpegEncContext *s, int16_t *block);
80 static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
81 
84 
87  { NULL },
88 };
89 
90 void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
91  uint16_t (*qmat16)[2][64],
92  const uint16_t *quant_matrix,
93  int bias, int qmin, int qmax, int intra)
94 {
95  FDCTDSPContext *fdsp = &s->fdsp;
96  int qscale;
97  int shift = 0;
98 
99  for (qscale = qmin; qscale <= qmax; qscale++) {
100  int i;
101  int qscale2;
102 
104  else qscale2 = qscale << 1;
105 
106  if (fdsp->fdct == ff_jpeg_fdct_islow_8 ||
107 #if CONFIG_FAANDCT
108  fdsp->fdct == ff_faandct ||
109 #endif /* CONFIG_FAANDCT */
110  fdsp->fdct == ff_jpeg_fdct_islow_10) {
111  for (i = 0; i < 64; i++) {
112  const int j = s->idsp.idct_permutation[i];
113  int64_t den = (int64_t) qscale2 * quant_matrix[j];
114  /* 16 <= qscale * quant_matrix[i] <= 7905
115  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
116  * 19952 <= x <= 249205026
117  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
118  * 3444240 >= (1 << 36) / (x) >= 275 */
119 
120  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
121  }
122  } else if (fdsp->fdct == ff_fdct_ifast) {
123  for (i = 0; i < 64; i++) {
124  const int j = s->idsp.idct_permutation[i];
125  int64_t den = ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
126  /* 16 <= qscale * quant_matrix[i] <= 7905
127  * Assume x = ff_aanscales[i] * qscale * quant_matrix[i]
128  * 19952 <= x <= 249205026
129  * (1 << 36) / 19952 >= (1 << 36) / (x) >= (1 << 36) / 249205026
130  * 3444240 >= (1 << 36) / (x) >= 275 */
131 
132  qmat[qscale][i] = (int)((UINT64_C(2) << (QMAT_SHIFT + 14)) / den);
133  }
134  } else {
135  for (i = 0; i < 64; i++) {
136  const int j = s->idsp.idct_permutation[i];
137  int64_t den = (int64_t) qscale2 * quant_matrix[j];
138  /* We can safely suppose that 16 <= quant_matrix[i] <= 255
139  * Assume x = qscale * quant_matrix[i]
140  * So 16 <= x <= 7905
141  * so (1 << 19) / 16 >= (1 << 19) / (x) >= (1 << 19) / 7905
142  * so 32768 >= (1 << 19) / (x) >= 67 */
143  qmat[qscale][i] = (int)((UINT64_C(2) << QMAT_SHIFT) / den);
144  //qmat [qscale][i] = (1 << QMAT_SHIFT_MMX) /
145  // (qscale * quant_matrix[i]);
146  qmat16[qscale][0][i] = (2 << QMAT_SHIFT_MMX) / den;
147 
148  if (qmat16[qscale][0][i] == 0 ||
149  qmat16[qscale][0][i] == 128 * 256)
150  qmat16[qscale][0][i] = 128 * 256 - 1;
151  qmat16[qscale][1][i] =
152  ROUNDED_DIV(bias * (1<<(16 - QUANT_BIAS_SHIFT)),
153  qmat16[qscale][0][i]);
154  }
155  }
156 
157  for (i = intra; i < 64; i++) {
158  int64_t max = 8191;
159  if (fdsp->fdct == ff_fdct_ifast) {
160  max = (8191LL * ff_aanscales[i]) >> 14;
161  }
162  while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
163  shift++;
164  }
165  }
166  }
167  if (shift) {
169  "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
170  QMAT_SHIFT - shift);
171  }
172 }
173 
174 static inline void update_qscale(MpegEncContext *s)
175 {
176  if (s->q_scale_type == 1 && 0) {
177  int i;
178  int bestdiff=INT_MAX;
179  int best = 1;
180 
181  for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
182  int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
183  if (ff_mpeg2_non_linear_qscale[i] < s->avctx->qmin ||
185  continue;
186  if (diff < bestdiff) {
187  bestdiff = diff;
188  best = i;
189  }
190  }
191  s->qscale = best;
192  } else {
193  s->qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
194  (FF_LAMBDA_SHIFT + 7);
195  s->qscale = av_clip(s->qscale, s->avctx->qmin, s->vbv_ignore_qmax ? 31 : s->avctx->qmax);
196  }
197 
198  s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
200 }
201 
202 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
203 {
204  int i;
205 
206  if (matrix) {
207  put_bits(pb, 1, 1);
208  for (i = 0; i < 64; i++) {
209  put_bits(pb, 8, matrix[ff_zigzag_direct[i]]);
210  }
211  } else
212  put_bits(pb, 1, 0);
213 }
214 
215 /**
216  * init s->current_picture.qscale_table from s->lambda_table
217  */
219 {
220  int8_t * const qscale_table = s->current_picture.qscale_table;
221  int i;
222 
223  for (i = 0; i < s->mb_num; i++) {
224  unsigned int lam = s->lambda_table[s->mb_index2xy[i]];
225  int qp = (lam * 139 + FF_LAMBDA_SCALE * 64) >> (FF_LAMBDA_SHIFT + 7);
226  qscale_table[s->mb_index2xy[i]] = av_clip(qp, s->avctx->qmin,
227  s->avctx->qmax);
228  }
229 }
230 
233 {
234 #define COPY(a) dst->a= src->a
235  COPY(pict_type);
237  COPY(f_code);
238  COPY(b_code);
239  COPY(qscale);
240  COPY(lambda);
241  COPY(lambda2);
244  COPY(frame_pred_frame_dct); // FIXME don't set in encode_header
245  COPY(progressive_frame); // FIXME don't set in encode_header
246  COPY(partitioned_frame); // FIXME don't set in encode_header
247 #undef COPY
248 }
249 
250 /**
251  * Set the given MpegEncContext to defaults for encoding.
252  * the changed fields will not depend upon the prior state of the MpegEncContext.
253  */
255 {
256  int i;
258 
259  for (i = -16; i < 16; i++) {
260  default_fcode_tab[i + MAX_MV] = 1;
261  }
264 
265  s->input_picture_number = 0;
266  s->picture_in_gop_number = 0;
267 }
268 
270 {
271  if (ARCH_X86)
273 
276  if (!s->dct_quantize)
278  if (!s->denoise_dct)
281  if (s->avctx->trellis)
283 
284  return 0;
285 }
286 
287 /* init video encoder */
289 {
290  MpegEncContext *s = avctx->priv_data;
291  AVCPBProperties *cpb_props;
292  int i, ret, format_supported;
293 
295 
296  switch (avctx->codec_id) {
298  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
299  avctx->pix_fmt != AV_PIX_FMT_YUV422P) {
300  av_log(avctx, AV_LOG_ERROR,
301  "only YUV420 and YUV422 are supported\n");
302  return AVERROR(EINVAL);
303  }
304  break;
305  case AV_CODEC_ID_MJPEG:
306  case AV_CODEC_ID_AMV:
307  format_supported = 0;
308  /* JPEG color space */
309  if (avctx->pix_fmt == AV_PIX_FMT_YUVJ420P ||
310  avctx->pix_fmt == AV_PIX_FMT_YUVJ422P ||
311  avctx->pix_fmt == AV_PIX_FMT_YUVJ444P ||
312  (avctx->color_range == AVCOL_RANGE_JPEG &&
313  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
314  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
315  avctx->pix_fmt == AV_PIX_FMT_YUV444P)))
316  format_supported = 1;
317  /* MPEG color space */
318  else if (avctx->strict_std_compliance <= FF_COMPLIANCE_UNOFFICIAL &&
319  (avctx->pix_fmt == AV_PIX_FMT_YUV420P ||
320  avctx->pix_fmt == AV_PIX_FMT_YUV422P ||
321  avctx->pix_fmt == AV_PIX_FMT_YUV444P))
322  format_supported = 1;
323 
324  if (!format_supported) {
325  av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
326  return AVERROR(EINVAL);
327  }
328  break;
329  default:
330  if (avctx->pix_fmt != AV_PIX_FMT_YUV420P) {
331  av_log(avctx, AV_LOG_ERROR, "only YUV420 is supported\n");
332  return AVERROR(EINVAL);
333  }
334  }
335 
336  switch (avctx->pix_fmt) {
337  case AV_PIX_FMT_YUVJ444P:
338  case AV_PIX_FMT_YUV444P:
340  break;
341  case AV_PIX_FMT_YUVJ422P:
342  case AV_PIX_FMT_YUV422P:
344  break;
345  case AV_PIX_FMT_YUVJ420P:
346  case AV_PIX_FMT_YUV420P:
347  default:
349  break;
350  }
351 
352  avctx->bits_per_raw_sample = av_clip(avctx->bits_per_raw_sample, 0, 8);
353 
354 #if FF_API_PRIVATE_OPT
356  if (avctx->rtp_payload_size)
358  if (avctx->me_penalty_compensation)
360  if (avctx->pre_me)
361  s->me_pre = avctx->pre_me;
363 #endif
364 
365  s->bit_rate = avctx->bit_rate;
366  s->width = avctx->width;
367  s->height = avctx->height;
368  if (avctx->gop_size > 600 &&
370  av_log(avctx, AV_LOG_WARNING,
371  "keyframe interval too large!, reducing it from %d to %d\n",
372  avctx->gop_size, 600);
373  avctx->gop_size = 600;
374  }
375  s->gop_size = avctx->gop_size;
376  s->avctx = avctx;
377  if (avctx->max_b_frames > MAX_B_FRAMES) {
378  av_log(avctx, AV_LOG_ERROR, "Too many B-frames requested, maximum "
379  "is %d.\n", MAX_B_FRAMES);
380  avctx->max_b_frames = MAX_B_FRAMES;
381  }
382  s->max_b_frames = avctx->max_b_frames;
383  s->codec_id = avctx->codec->id;
385  s->quarter_sample = (avctx->flags & AV_CODEC_FLAG_QPEL) != 0;
386  s->rtp_mode = !!s->rtp_payload_size;
388 
389  // workaround some differences between how applications specify dc precision
390  if (s->intra_dc_precision < 0) {
391  s->intra_dc_precision += 8;
392  } else if (s->intra_dc_precision >= 8)
393  s->intra_dc_precision -= 8;
394 
395  if (s->intra_dc_precision < 0) {
396  av_log(avctx, AV_LOG_ERROR,
397  "intra dc precision must be positive, note some applications use"
398  " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
399  return AVERROR(EINVAL);
400  }
401 
402  if (avctx->codec_id == AV_CODEC_ID_AMV || (avctx->active_thread_type & FF_THREAD_SLICE))
403  s->huffman = 0;
404 
405  if (s->intra_dc_precision > (avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 3 : 0)) {
406  av_log(avctx, AV_LOG_ERROR, "intra dc precision too large\n");
407  return AVERROR(EINVAL);
408  }
410 
411  if (s->gop_size <= 1) {
412  s->intra_only = 1;
413  s->gop_size = 12;
414  } else {
415  s->intra_only = 0;
416  }
417 
418  /* Fixed QSCALE */
419  s->fixed_qscale = !!(avctx->flags & AV_CODEC_FLAG_QSCALE);
420 
421  s->adaptive_quant = (s->avctx->lumi_masking ||
422  s->avctx->dark_masking ||
425  s->avctx->p_masking ||
426  s->border_masking ||
427  (s->mpv_flags & FF_MPV_FLAG_QP_RD)) &&
428  !s->fixed_qscale;
429 
431 
432  if (avctx->rc_max_rate && !avctx->rc_buffer_size) {
433  switch(avctx->codec_id) {
436  avctx->rc_buffer_size = FFMAX(avctx->rc_max_rate, 15000000) * 112LL / 15000000 * 16384;
437  break;
438  case AV_CODEC_ID_MPEG4:
442  if (avctx->rc_max_rate >= 15000000) {
443  avctx->rc_buffer_size = 320 + (avctx->rc_max_rate - 15000000LL) * (760-320) / (38400000 - 15000000);
444  } else if(avctx->rc_max_rate >= 2000000) {
445  avctx->rc_buffer_size = 80 + (avctx->rc_max_rate - 2000000LL) * (320- 80) / (15000000 - 2000000);
446  } else if(avctx->rc_max_rate >= 384000) {
447  avctx->rc_buffer_size = 40 + (avctx->rc_max_rate - 384000LL) * ( 80- 40) / ( 2000000 - 384000);
448  } else
449  avctx->rc_buffer_size = 40;
450  avctx->rc_buffer_size *= 16384;
451  break;
452  }
453  if (avctx->rc_buffer_size) {
454  av_log(avctx, AV_LOG_INFO, "Automatically choosing VBV buffer size of %d kbyte\n", avctx->rc_buffer_size/8192);
455  }
456  }
457 
458  if ((!avctx->rc_max_rate) != (!avctx->rc_buffer_size)) {
459  av_log(avctx, AV_LOG_ERROR, "Either both buffer size and max rate or neither must be specified\n");
460  return AVERROR(EINVAL);
461  }
462 
463  if (avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate) {
464  av_log(avctx, AV_LOG_INFO,
465  "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
466  }
467 
468  if (avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate) {
469  av_log(avctx, AV_LOG_ERROR, "bitrate below min bitrate\n");
470  return AVERROR(EINVAL);
471  }
472 
473  if (avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate) {
474  av_log(avctx, AV_LOG_ERROR, "bitrate above max bitrate\n");
475  return AVERROR(EINVAL);
476  }
477 
478  if (avctx->rc_max_rate &&
479  avctx->rc_max_rate == avctx->bit_rate &&
480  avctx->rc_max_rate != avctx->rc_min_rate) {
481  av_log(avctx, AV_LOG_INFO,
482  "impossible bitrate constraints, this will fail\n");
483  }
484 
485  if (avctx->rc_buffer_size &&
486  avctx->bit_rate * (int64_t)avctx->time_base.num >
487  avctx->rc_buffer_size * (int64_t)avctx->time_base.den) {
488  av_log(avctx, AV_LOG_ERROR, "VBV buffer too small for bitrate\n");
489  return AVERROR(EINVAL);
490  }
491 
492  if (!s->fixed_qscale &&
493  avctx->bit_rate * av_q2d(avctx->time_base) >
494  avctx->bit_rate_tolerance) {
495  double nbt = avctx->bit_rate * av_q2d(avctx->time_base) * 5;
496  av_log(avctx, AV_LOG_WARNING,
497  "bitrate tolerance %d too small for bitrate %"PRId64", overriding\n", avctx->bit_rate_tolerance, avctx->bit_rate);
498  if (nbt <= INT_MAX) {
499  avctx->bit_rate_tolerance = nbt;
500  } else
501  avctx->bit_rate_tolerance = INT_MAX;
502  }
503 
504  if (s->avctx->rc_max_rate &&
505  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
508  90000LL * (avctx->rc_buffer_size - 1) >
509  s->avctx->rc_max_rate * 0xFFFFLL) {
510  av_log(avctx, AV_LOG_INFO,
511  "Warning vbv_delay will be set to 0xFFFF (=VBR) as the "
512  "specified vbv buffer is too large for the given bitrate!\n");
513  }
514 
515  if ((s->avctx->flags & AV_CODEC_FLAG_4MV) && s->codec_id != AV_CODEC_ID_MPEG4 &&
517  s->codec_id != AV_CODEC_ID_FLV1) {
518  av_log(avctx, AV_LOG_ERROR, "4MV not supported by codec\n");
519  return AVERROR(EINVAL);
520  }
521 
522  if (s->obmc && s->avctx->mb_decision != FF_MB_DECISION_SIMPLE) {
523  av_log(avctx, AV_LOG_ERROR,
524  "OBMC is only supported with simple mb decision\n");
525  return AVERROR(EINVAL);
526  }
527 
528  if (s->quarter_sample && s->codec_id != AV_CODEC_ID_MPEG4) {
529  av_log(avctx, AV_LOG_ERROR, "qpel not supported by codec\n");
530  return AVERROR(EINVAL);
531  }
532 
533  if (s->max_b_frames &&
534  s->codec_id != AV_CODEC_ID_MPEG4 &&
537  av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
538  return AVERROR(EINVAL);
539  }
540  if (s->max_b_frames < 0) {
541  av_log(avctx, AV_LOG_ERROR,
542  "max b frames must be 0 or positive for mpegvideo based encoders\n");
543  return AVERROR(EINVAL);
544  }
545 
546  if ((s->codec_id == AV_CODEC_ID_MPEG4 ||
547  s->codec_id == AV_CODEC_ID_H263 ||
548  s->codec_id == AV_CODEC_ID_H263P) &&
549  (avctx->sample_aspect_ratio.num > 255 ||
550  avctx->sample_aspect_ratio.den > 255)) {
551  av_log(avctx, AV_LOG_WARNING,
552  "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
555  avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den, 255);
556  }
557 
558  if ((s->codec_id == AV_CODEC_ID_H263 ||
559  s->codec_id == AV_CODEC_ID_H263P) &&
560  (avctx->width > 2048 ||
561  avctx->height > 1152 )) {
562  av_log(avctx, AV_LOG_ERROR, "H.263 does not support resolutions above 2048x1152\n");
563  return AVERROR(EINVAL);
564  }
565  if ((s->codec_id == AV_CODEC_ID_H263 ||
566  s->codec_id == AV_CODEC_ID_H263P) &&
567  ((avctx->width &3) ||
568  (avctx->height&3) )) {
569  av_log(avctx, AV_LOG_ERROR, "w/h must be a multiple of 4\n");
570  return AVERROR(EINVAL);
571  }
572 
573  if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
574  (avctx->width > 4095 ||
575  avctx->height > 4095 )) {
576  av_log(avctx, AV_LOG_ERROR, "MPEG-1 does not support resolutions above 4095x4095\n");
577  return AVERROR(EINVAL);
578  }
579 
580  if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO &&
581  (avctx->width > 16383 ||
582  avctx->height > 16383 )) {
583  av_log(avctx, AV_LOG_ERROR, "MPEG-2 does not support resolutions above 16383x16383\n");
584  return AVERROR(EINVAL);
585  }
586 
587  if (s->codec_id == AV_CODEC_ID_RV10 &&
588  (avctx->width &15 ||
589  avctx->height&15 )) {
590  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 16\n");
591  return AVERROR(EINVAL);
592  }
593 
594  if (s->codec_id == AV_CODEC_ID_RV20 &&
595  (avctx->width &3 ||
596  avctx->height&3 )) {
597  av_log(avctx, AV_LOG_ERROR, "width and height must be a multiple of 4\n");
598  return AVERROR(EINVAL);
599  }
600 
601  if ((s->codec_id == AV_CODEC_ID_WMV1 ||
602  s->codec_id == AV_CODEC_ID_WMV2) &&
603  avctx->width & 1) {
604  av_log(avctx, AV_LOG_ERROR, "width must be multiple of 2\n");
605  return AVERROR(EINVAL);
606  }
607 
610  av_log(avctx, AV_LOG_ERROR, "interlacing not supported by codec\n");
611  return AVERROR(EINVAL);
612  }
613 
614 #if FF_API_PRIVATE_OPT
616  if (avctx->mpeg_quant)
617  s->mpeg_quant = avctx->mpeg_quant;
619 #endif
620 
621  // FIXME mpeg2 uses that too
622  if (s->mpeg_quant && ( s->codec_id != AV_CODEC_ID_MPEG4
623  && s->codec_id != AV_CODEC_ID_MPEG2VIDEO)) {
624  av_log(avctx, AV_LOG_ERROR,
625  "mpeg2 style quantization not supported by codec\n");
626  return AVERROR(EINVAL);
627  }
628 
629  if ((s->mpv_flags & FF_MPV_FLAG_CBP_RD) && !avctx->trellis) {
630  av_log(avctx, AV_LOG_ERROR, "CBP RD needs trellis quant\n");
631  return AVERROR(EINVAL);
632  }
633 
634  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
636  av_log(avctx, AV_LOG_ERROR, "QP RD needs mbd=2\n");
637  return AVERROR(EINVAL);
638  }
639 
640  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) &&
641  (s->codec_id == AV_CODEC_ID_AMV ||
642  s->codec_id == AV_CODEC_ID_MJPEG)) {
643  // Used to produce garbage with MJPEG.
644  av_log(avctx, AV_LOG_ERROR,
645  "QP RD is no longer compatible with MJPEG or AMV\n");
646  return AVERROR(EINVAL);
647  }
648 
649 #if FF_API_PRIVATE_OPT
651  if (avctx->scenechange_threshold)
654 #endif
655 
656  if (s->scenechange_threshold < 1000000000 &&
658  av_log(avctx, AV_LOG_ERROR,
659  "closed gop with scene change detection are not supported yet, "
660  "set threshold to 1000000000\n");
661  return AVERROR_PATCHWELCOME;
662  }
663 
664  if (s->avctx->flags & AV_CODEC_FLAG_LOW_DELAY) {
665  if (s->codec_id != AV_CODEC_ID_MPEG2VIDEO &&
667  av_log(avctx, AV_LOG_ERROR,
668  "low delay forcing is only available for mpeg2, "
669  "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
670  return AVERROR(EINVAL);
671  }
672  if (s->max_b_frames != 0) {
673  av_log(avctx, AV_LOG_ERROR,
674  "B-frames cannot be used with low delay\n");
675  return AVERROR(EINVAL);
676  }
677  }
678 
679  if (s->q_scale_type == 1) {
680  if (avctx->qmax > 28) {
681  av_log(avctx, AV_LOG_ERROR,
682  "non linear quant only supports qmax <= 28 currently\n");
683  return AVERROR_PATCHWELCOME;
684  }
685  }
686 
687  if (avctx->slices > 1 &&
688  (avctx->codec_id == AV_CODEC_ID_FLV1 || avctx->codec_id == AV_CODEC_ID_H261)) {
689  av_log(avctx, AV_LOG_ERROR, "Multiple slices are not supported by this codec\n");
690  return AVERROR(EINVAL);
691  }
692 
693  if (s->avctx->thread_count > 1 &&
694  s->codec_id != AV_CODEC_ID_MPEG4 &&
697  s->codec_id != AV_CODEC_ID_MJPEG &&
698  (s->codec_id != AV_CODEC_ID_H263P)) {
699  av_log(avctx, AV_LOG_ERROR,
700  "multi threaded encoding not supported by codec\n");
701  return AVERROR_PATCHWELCOME;
702  }
703 
704  if (s->avctx->thread_count < 1) {
705  av_log(avctx, AV_LOG_ERROR,
706  "automatic thread number detection not supported by codec, "
707  "patch welcome\n");
708  return AVERROR_PATCHWELCOME;
709  }
710 
711  if (!avctx->time_base.den || !avctx->time_base.num) {
712  av_log(avctx, AV_LOG_ERROR, "framerate not set\n");
713  return AVERROR(EINVAL);
714  }
715 
716 #if FF_API_PRIVATE_OPT
718  if (avctx->b_frame_strategy)
720  if (avctx->b_sensitivity != 40)
721  s->b_sensitivity = avctx->b_sensitivity;
723 #endif
724 
725  if (s->b_frame_strategy && (avctx->flags & AV_CODEC_FLAG_PASS2)) {
726  av_log(avctx, AV_LOG_INFO,
727  "notice: b_frame_strategy only affects the first pass\n");
728  s->b_frame_strategy = 0;
729  }
730 
731  i = av_gcd(avctx->time_base.den, avctx->time_base.num);
732  if (i > 1) {
733  av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n");
734  avctx->time_base.den /= i;
735  avctx->time_base.num /= i;
736  //return -1;
737  }
738 
740  // (a + x * 3 / 8) / x
741  s->intra_quant_bias = 3 << (QUANT_BIAS_SHIFT - 3);
742  s->inter_quant_bias = 0;
743  } else {
744  s->intra_quant_bias = 0;
745  // (a - x / 4) / x
746  s->inter_quant_bias = -(1 << (QUANT_BIAS_SHIFT - 2));
747  }
748 
749  if (avctx->qmin > avctx->qmax || avctx->qmin <= 0) {
750  av_log(avctx, AV_LOG_ERROR, "qmin and or qmax are invalid, they must be 0 < min <= max\n");
751  return AVERROR(EINVAL);
752  }
753 
754  av_log(avctx, AV_LOG_DEBUG, "intra_quant_bias = %d inter_quant_bias = %d\n",s->intra_quant_bias,s->inter_quant_bias);
755 
756  if (avctx->codec_id == AV_CODEC_ID_MPEG4 &&
757  s->avctx->time_base.den > (1 << 16) - 1) {
758  av_log(avctx, AV_LOG_ERROR,
759  "timebase %d/%d not supported by MPEG 4 standard, "
760  "the maximum admitted value for the timebase denominator "
761  "is %d\n", s->avctx->time_base.num, s->avctx->time_base.den,
762  (1 << 16) - 1);
763  return AVERROR(EINVAL);
764  }
765  s->time_increment_bits = av_log2(s->avctx->time_base.den - 1) + 1;
766 
767  switch (avctx->codec->id) {
769  s->out_format = FMT_MPEG1;
771  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
772  break;
774  s->out_format = FMT_MPEG1;
776  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
777  s->rtp_mode = 1;
778  break;
779  case AV_CODEC_ID_MJPEG:
780  case AV_CODEC_ID_AMV:
781  s->out_format = FMT_MJPEG;
782  s->intra_only = 1; /* force intra only for jpeg */
785  if ((ret = ff_mjpeg_encode_init(s)) < 0)
786  return ret;
787  avctx->delay = 0;
788  s->low_delay = 1;
789  break;
790  case AV_CODEC_ID_H261:
791  if (!CONFIG_H261_ENCODER)
793  if (ff_h261_get_picture_format(s->width, s->height) < 0) {
794  av_log(avctx, AV_LOG_ERROR,
795  "The specified picture size of %dx%d is not valid for the "
796  "H.261 codec.\nValid sizes are 176x144, 352x288\n",
797  s->width, s->height);
798  return AVERROR(EINVAL);
799  }
800  s->out_format = FMT_H261;
801  avctx->delay = 0;
802  s->low_delay = 1;
803  s->rtp_mode = 0; /* Sliced encoding not supported */
804  break;
805  case AV_CODEC_ID_H263:
806  if (!CONFIG_H263_ENCODER)
809  s->width, s->height) == 8) {
810  av_log(avctx, AV_LOG_ERROR,
811  "The specified picture size of %dx%d is not valid for "
812  "the H.263 codec.\nValid sizes are 128x96, 176x144, "
813  "352x288, 704x576, and 1408x1152. "
814  "Try H.263+.\n", s->width, s->height);
815  return AVERROR(EINVAL);
816  }
817  s->out_format = FMT_H263;
818  avctx->delay = 0;
819  s->low_delay = 1;
820  break;
821  case AV_CODEC_ID_H263P:
822  s->out_format = FMT_H263;
823  s->h263_plus = 1;
824  /* Fx */
825  s->h263_aic = (avctx->flags & AV_CODEC_FLAG_AC_PRED) ? 1 : 0;
826  s->modified_quant = s->h263_aic;
827  s->loop_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
828  s->unrestricted_mv = s->obmc || s->loop_filter || s->umvplus;
829 
830  /* /Fx */
831  /* These are just to be sure */
832  avctx->delay = 0;
833  s->low_delay = 1;
834  break;
835  case AV_CODEC_ID_FLV1:
836  s->out_format = FMT_H263;
837  s->h263_flv = 2; /* format = 1; 11-bit codes */
838  s->unrestricted_mv = 1;
839  s->rtp_mode = 0; /* don't allow GOB */
840  avctx->delay = 0;
841  s->low_delay = 1;
842  break;
843  case AV_CODEC_ID_RV10:
844  s->out_format = FMT_H263;
845  avctx->delay = 0;
846  s->low_delay = 1;
847  break;
848  case AV_CODEC_ID_RV20:
849  s->out_format = FMT_H263;
850  avctx->delay = 0;
851  s->low_delay = 1;
852  s->modified_quant = 1;
853  s->h263_aic = 1;
854  s->h263_plus = 1;
855  s->loop_filter = 1;
856  s->unrestricted_mv = 0;
857  break;
858  case AV_CODEC_ID_MPEG4:
859  s->out_format = FMT_H263;
860  s->h263_pred = 1;
861  s->unrestricted_mv = 1;
862  s->low_delay = s->max_b_frames ? 0 : 1;
863  avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
864  break;
866  s->out_format = FMT_H263;
867  s->h263_pred = 1;
868  s->unrestricted_mv = 1;
869  s->msmpeg4_version = 2;
870  avctx->delay = 0;
871  s->low_delay = 1;
872  break;
874  s->out_format = FMT_H263;
875  s->h263_pred = 1;
876  s->unrestricted_mv = 1;
877  s->msmpeg4_version = 3;
878  s->flipflop_rounding = 1;
879  avctx->delay = 0;
880  s->low_delay = 1;
881  break;
882  case AV_CODEC_ID_WMV1:
883  s->out_format = FMT_H263;
884  s->h263_pred = 1;
885  s->unrestricted_mv = 1;
886  s->msmpeg4_version = 4;
887  s->flipflop_rounding = 1;
888  avctx->delay = 0;
889  s->low_delay = 1;
890  break;
891  case AV_CODEC_ID_WMV2:
892  s->out_format = FMT_H263;
893  s->h263_pred = 1;
894  s->unrestricted_mv = 1;
895  s->msmpeg4_version = 5;
896  s->flipflop_rounding = 1;
897  avctx->delay = 0;
898  s->low_delay = 1;
899  break;
900  default:
901  return AVERROR(EINVAL);
902  }
903 
904 #if FF_API_PRIVATE_OPT
906  if (avctx->noise_reduction)
907  s->noise_reduction = avctx->noise_reduction;
909 #endif
910 
911  avctx->has_b_frames = !s->low_delay;
912 
913  s->encoding = 1;
914 
915  s->progressive_frame =
918  s->alternate_scan);
919 
920  /* init */
921  ff_mpv_idct_init(s);
922  if ((ret = ff_mpv_common_init(s)) < 0)
923  return ret;
924 
925  ff_fdctdsp_init(&s->fdsp, avctx);
926  ff_me_cmp_init(&s->mecc, avctx);
928  ff_pixblockdsp_init(&s->pdsp, avctx);
929  ff_qpeldsp_init(&s->qdsp);
930 
931  if (s->msmpeg4_version) {
933  2 * 2 * (MAX_LEVEL + 1) *
934  (MAX_RUN + 1) * 2 * sizeof(int), fail);
935  }
936  FF_ALLOCZ_OR_GOTO(s->avctx, s->avctx->stats_out, 256, fail);
937 
938  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix, 64 * 32 * sizeof(int), fail);
939  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix, 64 * 32 * sizeof(int), fail);
940  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix, 64 * 32 * sizeof(int), fail);
941  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
942  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_chroma_intra_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
943  FF_ALLOCZ_OR_GOTO(s->avctx, s->q_inter_matrix16, 64 * 32 * 2 * sizeof(uint16_t), fail);
945  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
947  MAX_PICTURE_COUNT * sizeof(Picture *), fail);
948 
949 
950  if (s->noise_reduction) {
952  2 * 64 * sizeof(uint16_t), fail);
953  }
954 
956 
959 
960  if (s->slice_context_count > 1) {
961  s->rtp_mode = 1;
962 
963  if (avctx->codec_id == AV_CODEC_ID_H263P)
964  s->h263_slice_structured = 1;
965  }
966 
967  s->quant_precision = 5;
968 
969 #if FF_API_PRIVATE_OPT
971  if (avctx->frame_skip_threshold)
973  if (avctx->frame_skip_factor)
975  if (avctx->frame_skip_exp)
976  s->frame_skip_exp = avctx->frame_skip_exp;
977  if (avctx->frame_skip_cmp != FF_CMP_DCTMAX)
978  s->frame_skip_cmp = avctx->frame_skip_cmp;
980 #endif
981 
984 
990  if ((ret = ff_msmpeg4_encode_init(s)) < 0)
991  return ret;
993  && s->out_format == FMT_MPEG1)
995 
996  /* init q matrix */
997  for (i = 0; i < 64; i++) {
998  int j = s->idsp.idct_permutation[i];
1000  s->mpeg_quant) {
1003  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1004  s->intra_matrix[j] =
1006  } else {
1007  /* MPEG-1/2 */
1008  s->chroma_intra_matrix[j] =
1011  }
1012  if (s->avctx->intra_matrix)
1013  s->intra_matrix[j] = s->avctx->intra_matrix[i];
1014  if (s->avctx->inter_matrix)
1015  s->inter_matrix[j] = s->avctx->inter_matrix[i];
1016  }
1017 
1018  /* precompute matrix */
1019  /* for mjpeg, we do include qscale in the matrix */
1020  if (s->out_format != FMT_MJPEG) {
1022  s->intra_matrix, s->intra_quant_bias, avctx->qmin,
1023  31, 1);
1025  s->inter_matrix, s->inter_quant_bias, avctx->qmin,
1026  31, 0);
1027  }
1028 
1029  if ((ret = ff_rate_control_init(s)) < 0)
1030  return ret;
1031 
1032 #if FF_API_PRIVATE_OPT
1034  if (avctx->brd_scale)
1035  s->brd_scale = avctx->brd_scale;
1036 
1037  if (avctx->prediction_method)
1038  s->pred = avctx->prediction_method + 1;
1040 #endif
1041 
1042  if (s->b_frame_strategy == 2) {
1043  for (i = 0; i < s->max_b_frames + 2; i++) {
1044  s->tmp_frames[i] = av_frame_alloc();
1045  if (!s->tmp_frames[i])
1046  return AVERROR(ENOMEM);
1047 
1049  s->tmp_frames[i]->width = s->width >> s->brd_scale;
1050  s->tmp_frames[i]->height = s->height >> s->brd_scale;
1051 
1052  ret = av_frame_get_buffer(s->tmp_frames[i], 0);
1053  if (ret < 0)
1054  return ret;
1055  }
1056  }
1057 
1058  cpb_props = ff_add_cpb_side_data(avctx);
1059  if (!cpb_props)
1060  return AVERROR(ENOMEM);
1061  cpb_props->max_bitrate = avctx->rc_max_rate;
1062  cpb_props->min_bitrate = avctx->rc_min_rate;
1063  cpb_props->avg_bitrate = avctx->bit_rate;
1064  cpb_props->buffer_size = avctx->rc_buffer_size;
1065 
1066  return 0;
1067 fail:
1068  ff_mpv_encode_end(avctx);
1069  return AVERROR_UNKNOWN;
1070 }
1071 
1073 {
1074  MpegEncContext *s = avctx->priv_data;
1075  int i;
1076 
1078 
1079  ff_mpv_common_end(s);
1080  if (CONFIG_MJPEG_ENCODER &&
1081  s->out_format == FMT_MJPEG)
1083 
1084  av_freep(&avctx->extradata);
1085 
1086  for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
1087  av_frame_free(&s->tmp_frames[i]);
1088 
1091 
1092  av_freep(&s->avctx->stats_out);
1093  av_freep(&s->ac_stats);
1094 
1099  av_freep(&s->q_intra_matrix);
1100  av_freep(&s->q_inter_matrix);
1103  av_freep(&s->input_picture);
1105  av_freep(&s->dct_offset);
1106 
1107  return 0;
1108 }
1109 
1110 static int get_sae(uint8_t *src, int ref, int stride)
1111 {
1112  int x,y;
1113  int acc = 0;
1114 
1115  for (y = 0; y < 16; y++) {
1116  for (x = 0; x < 16; x++) {
1117  acc += FFABS(src[x + y * stride] - ref);
1118  }
1119  }
1120 
1121  return acc;
1122 }
1123 
1125  uint8_t *ref, int stride)
1126 {
1127  int x, y, w, h;
1128  int acc = 0;
1129 
1130  w = s->width & ~15;
1131  h = s->height & ~15;
1132 
1133  for (y = 0; y < h; y += 16) {
1134  for (x = 0; x < w; x += 16) {
1135  int offset = x + y * stride;
1136  int sad = s->mecc.sad[0](NULL, src + offset, ref + offset,
1137  stride, 16);
1138  int mean = (s->mpvencdsp.pix_sum(src + offset, stride) + 128) >> 8;
1139  int sae = get_sae(src + offset, mean, stride);
1140 
1141  acc += sae + 500 < sad;
1142  }
1143  }
1144  return acc;
1145 }
1146 
1147 static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
1148 {
1149  return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
1151  s->mb_stride, s->mb_width, s->mb_height, s->b8_stride,
1152  &s->linesize, &s->uvlinesize);
1153 }
1154 
1155 static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
1156 {
1157  Picture *pic = NULL;
1158  int64_t pts;
1159  int i, display_picture_number = 0, ret;
1160  int encoding_delay = s->max_b_frames ? s->max_b_frames
1161  : (s->low_delay ? 0 : 1);
1162  int flush_offset = 1;
1163  int direct = 1;
1164 
1165  if (pic_arg) {
1166  pts = pic_arg->pts;
1167  display_picture_number = s->input_picture_number++;
1168 
1169  if (pts != AV_NOPTS_VALUE) {
1170  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1171  int64_t last = s->user_specified_pts;
1172 
1173  if (pts <= last) {
1175  "Invalid pts (%"PRId64") <= last (%"PRId64")\n",
1176  pts, last);
1177  return AVERROR(EINVAL);
1178  }
1179 
1180  if (!s->low_delay && display_picture_number == 1)
1181  s->dts_delta = pts - last;
1182  }
1183  s->user_specified_pts = pts;
1184  } else {
1185  if (s->user_specified_pts != AV_NOPTS_VALUE) {
1186  s->user_specified_pts =
1187  pts = s->user_specified_pts + 1;
1188  av_log(s->avctx, AV_LOG_INFO,
1189  "Warning: AVFrame.pts=? trying to guess (%"PRId64")\n",
1190  pts);
1191  } else {
1192  pts = display_picture_number;
1193  }
1194  }
1195 
1196  if (!pic_arg->buf[0] ||
1197  pic_arg->linesize[0] != s->linesize ||
1198  pic_arg->linesize[1] != s->uvlinesize ||
1199  pic_arg->linesize[2] != s->uvlinesize)
1200  direct = 0;
1201  if ((s->width & 15) || (s->height & 15))
1202  direct = 0;
1203  if (((intptr_t)(pic_arg->data[0])) & (STRIDE_ALIGN-1))
1204  direct = 0;
1205  if (s->linesize & (STRIDE_ALIGN-1))
1206  direct = 0;
1207 
1208  ff_dlog(s->avctx, "%d %d %"PTRDIFF_SPECIFIER" %"PTRDIFF_SPECIFIER"\n", pic_arg->linesize[0],
1209  pic_arg->linesize[1], s->linesize, s->uvlinesize);
1210 
1211  i = ff_find_unused_picture(s->avctx, s->picture, direct);
1212  if (i < 0)
1213  return i;
1214 
1215  pic = &s->picture[i];
1216  pic->reference = 3;
1217 
1218  if (direct) {
1219  if ((ret = av_frame_ref(pic->f, pic_arg)) < 0)
1220  return ret;
1221  }
1222  ret = alloc_picture(s, pic, direct);
1223  if (ret < 0)
1224  return ret;
1225 
1226  if (!direct) {
1227  if (pic->f->data[0] + INPLACE_OFFSET == pic_arg->data[0] &&
1228  pic->f->data[1] + INPLACE_OFFSET == pic_arg->data[1] &&
1229  pic->f->data[2] + INPLACE_OFFSET == pic_arg->data[2]) {
1230  // empty
1231  } else {
1232  int h_chroma_shift, v_chroma_shift;
1234  &h_chroma_shift,
1235  &v_chroma_shift);
1236 
1237  for (i = 0; i < 3; i++) {
1238  ptrdiff_t src_stride = pic_arg->linesize[i];
1239  ptrdiff_t dst_stride = i ? s->uvlinesize : s->linesize;
1240  int h_shift = i ? h_chroma_shift : 0;
1241  int v_shift = i ? v_chroma_shift : 0;
1242  int w = AV_CEIL_RSHIFT(s->width , h_shift);
1243  int h = AV_CEIL_RSHIFT(s->height, v_shift);
1244  uint8_t *src = pic_arg->data[i];
1245  uint8_t *dst = pic->f->data[i];
1246  int vpad = 16;
1247 
1248  if ( s->codec_id == AV_CODEC_ID_MPEG2VIDEO
1249  && !s->progressive_sequence
1250  && FFALIGN(s->height, 32) - s->height > 16)
1251  vpad = 32;
1252 
1253  if (!s->avctx->rc_buffer_size)
1254  dst += INPLACE_OFFSET;
1255 
1256  if (src_stride == dst_stride)
1257  memcpy(dst, src, src_stride * h - src_stride + w);
1258  else {
1259  int h2 = h;
1260  uint8_t *dst2 = dst;
1261  while (h2--) {
1262  memcpy(dst2, src, w);
1263  dst2 += dst_stride;
1264  src += src_stride;
1265  }
1266  }
1267  if ((s->width & 15) || (s->height & (vpad-1))) {
1268  s->mpvencdsp.draw_edges(dst, dst_stride,
1269  w, h,
1270  16 >> h_shift,
1271  vpad >> v_shift,
1272  EDGE_BOTTOM);
1273  }
1274  }
1275  emms_c();
1276  }
1277  }
1278  ret = av_frame_copy_props(pic->f, pic_arg);
1279  if (ret < 0)
1280  return ret;
1281 
1282  pic->f->display_picture_number = display_picture_number;
1283  pic->f->pts = pts; // we set this here to avoid modifying pic_arg
1284  } else {
1285  /* Flushing: When we have not received enough input frames,
1286  * ensure s->input_picture[0] contains the first picture */
1287  for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1288  if (s->input_picture[flush_offset])
1289  break;
1290 
1291  if (flush_offset <= 1)
1292  flush_offset = 1;
1293  else
1294  encoding_delay = encoding_delay - flush_offset + 1;
1295  }
1296 
1297  /* shift buffer entries */
1298  for (i = flush_offset; i < MAX_PICTURE_COUNT /*s->encoding_delay + 1*/; i++)
1299  s->input_picture[i - flush_offset] = s->input_picture[i];
1300  for (int i = MAX_B_FRAMES + 1 - flush_offset; i <= MAX_B_FRAMES; i++)
1301  s->input_picture[i] = NULL;
1302 
1303  s->input_picture[encoding_delay] = (Picture*) pic;
1304 
1305  return 0;
1306 }
1307 
1309 {
1310  int x, y, plane;
1311  int score = 0;
1312  int64_t score64 = 0;
1313 
1314  for (plane = 0; plane < 3; plane++) {
1315  const int stride = p->f->linesize[plane];
1316  const int bw = plane ? 1 : 2;
1317  for (y = 0; y < s->mb_height * bw; y++) {
1318  for (x = 0; x < s->mb_width * bw; x++) {
1319  int off = p->shared ? 0 : 16;
1320  uint8_t *dptr = p->f->data[plane] + 8 * (x + y * stride) + off;
1321  uint8_t *rptr = ref->f->data[plane] + 8 * (x + y * stride);
1322  int v = s->mecc.frame_skip_cmp[1](s, dptr, rptr, stride, 8);
1323 
1324  switch (FFABS(s->frame_skip_exp)) {
1325  case 0: score = FFMAX(score, v); break;
1326  case 1: score += FFABS(v); break;
1327  case 2: score64 += v * (int64_t)v; break;
1328  case 3: score64 += FFABS(v * (int64_t)v * v); break;
1329  case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v); break;
1330  }
1331  }
1332  }
1333  }
1334  emms_c();
1335 
1336  if (score)
1337  score64 = score;
1338  if (s->frame_skip_exp < 0)
1339  score64 = pow(score64 / (double)(s->mb_width * s->mb_height),
1340  -1.0/s->frame_skip_exp);
1341 
1342  if (score64 < s->frame_skip_threshold)
1343  return 1;
1344  if (score64 < ((s->frame_skip_factor * (int64_t) s->lambda) >> 8))
1345  return 1;
1346  return 0;
1347 }
1348 
1350 {
1351  AVPacket pkt = { 0 };
1352  int ret;
1353  int size = 0;
1354 
1355  av_init_packet(&pkt);
1356 
1357  ret = avcodec_send_frame(c, frame);
1358  if (ret < 0)
1359  return ret;
1360 
1361  do {
1362  ret = avcodec_receive_packet(c, &pkt);
1363  if (ret >= 0) {
1364  size += pkt.size;
1365  av_packet_unref(&pkt);
1366  } else if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
1367  return ret;
1368  } while (ret >= 0);
1369 
1370  return size;
1371 }
1372 
1374 {
1375  const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
1376  const int scale = s->brd_scale;
1377  int width = s->width >> scale;
1378  int height = s->height >> scale;
1379  int i, j, out_size, p_lambda, b_lambda, lambda2;
1380  int64_t best_rd = INT64_MAX;
1381  int best_b_count = -1;
1382  int ret = 0;
1383 
1384  av_assert0(scale >= 0 && scale <= 3);
1385 
1386  //emms_c();
1387  //s->next_picture_ptr->quality;
1388  p_lambda = s->last_lambda_for[AV_PICTURE_TYPE_P];
1389  //p_lambda * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset;
1390  b_lambda = s->last_lambda_for[AV_PICTURE_TYPE_B];
1391  if (!b_lambda) // FIXME we should do this somewhere else
1392  b_lambda = p_lambda;
1393  lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
1395 
1396  for (i = 0; i < s->max_b_frames + 2; i++) {
1397  Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
1398  s->next_picture_ptr;
1399  uint8_t *data[4];
1400 
1401  if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
1402  pre_input = *pre_input_ptr;
1403  memcpy(data, pre_input_ptr->f->data, sizeof(data));
1404 
1405  if (!pre_input.shared && i) {
1406  data[0] += INPLACE_OFFSET;
1407  data[1] += INPLACE_OFFSET;
1408  data[2] += INPLACE_OFFSET;
1409  }
1410 
1411  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[0],
1412  s->tmp_frames[i]->linesize[0],
1413  data[0],
1414  pre_input.f->linesize[0],
1415  width, height);
1416  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[1],
1417  s->tmp_frames[i]->linesize[1],
1418  data[1],
1419  pre_input.f->linesize[1],
1420  width >> 1, height >> 1);
1421  s->mpvencdsp.shrink[scale](s->tmp_frames[i]->data[2],
1422  s->tmp_frames[i]->linesize[2],
1423  data[2],
1424  pre_input.f->linesize[2],
1425  width >> 1, height >> 1);
1426  }
1427  }
1428 
1429  for (j = 0; j < s->max_b_frames + 1; j++) {
1430  AVCodecContext *c;
1431  int64_t rd = 0;
1432 
1433  if (!s->input_picture[j])
1434  break;
1435 
1437  if (!c)
1438  return AVERROR(ENOMEM);
1439 
1440  c->width = width;
1441  c->height = height;
1443  c->flags |= s->avctx->flags & AV_CODEC_FLAG_QPEL;
1444  c->mb_decision = s->avctx->mb_decision;
1445  c->me_cmp = s->avctx->me_cmp;
1446  c->mb_cmp = s->avctx->mb_cmp;
1447  c->me_sub_cmp = s->avctx->me_sub_cmp;
1449  c->time_base = s->avctx->time_base;
1450  c->max_b_frames = s->max_b_frames;
1451 
1452  ret = avcodec_open2(c, codec, NULL);
1453  if (ret < 0)
1454  goto fail;
1455 
1457  s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
1458 
1459  out_size = encode_frame(c, s->tmp_frames[0]);
1460  if (out_size < 0) {
1461  ret = out_size;
1462  goto fail;
1463  }
1464 
1465  //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
1466 
1467  for (i = 0; i < s->max_b_frames + 1; i++) {
1468  int is_p = i % (j + 1) == j || i == s->max_b_frames;
1469 
1470  s->tmp_frames[i + 1]->pict_type = is_p ?
1472  s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
1473 
1474  out_size = encode_frame(c, s->tmp_frames[i + 1]);
1475  if (out_size < 0) {
1476  ret = out_size;
1477  goto fail;
1478  }
1479 
1480  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1481  }
1482 
1483  /* get the delayed frames */
1484  out_size = encode_frame(c, NULL);
1485  if (out_size < 0) {
1486  ret = out_size;
1487  goto fail;
1488  }
1489  rd += (out_size * (uint64_t)lambda2) >> (FF_LAMBDA_SHIFT - 3);
1490 
1491  rd += c->error[0] + c->error[1] + c->error[2];
1492 
1493  if (rd < best_rd) {
1494  best_rd = rd;
1495  best_b_count = j;
1496  }
1497 
1498 fail:
1500  if (ret < 0)
1501  return ret;
1502  }
1503 
1504  return best_b_count;
1505 }
1506 
1508 {
1509  int i, ret;
1510 
1511  for (i = 1; i < MAX_PICTURE_COUNT; i++)
1513  s->reordered_input_picture[MAX_PICTURE_COUNT - 1] = NULL;
1514 
1515  /* set next picture type & ordering */
1516  if (!s->reordered_input_picture[0] && s->input_picture[0]) {
1517  if (s->frame_skip_threshold || s->frame_skip_factor) {
1518  if (s->picture_in_gop_number < s->gop_size &&
1519  s->next_picture_ptr &&
1520  skip_check(s, s->input_picture[0], s->next_picture_ptr)) {
1521  // FIXME check that the gop check above is +-1 correct
1522  av_frame_unref(s->input_picture[0]->f);
1523 
1524  ff_vbv_update(s, 0);
1525 
1526  goto no_output_pic;
1527  }
1528  }
1529 
1530  if (/*s->picture_in_gop_number >= s->gop_size ||*/
1531  !s->next_picture_ptr || s->intra_only) {
1532  s->reordered_input_picture[0] = s->input_picture[0];
1535  s->coded_picture_number++;
1536  } else {
1537  int b_frames = 0;
1538 
1539  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
1540  for (i = 0; i < s->max_b_frames + 1; i++) {
1541  int pict_num = s->input_picture[0]->f->display_picture_number + i;
1542 
1543  if (pict_num >= s->rc_context.num_entries)
1544  break;
1545  if (!s->input_picture[i]) {
1546  s->rc_context.entry[pict_num - 1].new_pict_type = AV_PICTURE_TYPE_P;
1547  break;
1548  }
1549 
1550  s->input_picture[i]->f->pict_type =
1551  s->rc_context.entry[pict_num].new_pict_type;
1552  }
1553  }
1554 
1555  if (s->b_frame_strategy == 0) {
1556  b_frames = s->max_b_frames;
1557  while (b_frames && !s->input_picture[b_frames])
1558  b_frames--;
1559  } else if (s->b_frame_strategy == 1) {
1560  for (i = 1; i < s->max_b_frames + 1; i++) {
1561  if (s->input_picture[i] &&
1562  s->input_picture[i]->b_frame_score == 0) {
1564  get_intra_count(s,
1565  s->input_picture[i ]->f->data[0],
1566  s->input_picture[i - 1]->f->data[0],
1567  s->linesize) + 1;
1568  }
1569  }
1570  for (i = 0; i < s->max_b_frames + 1; i++) {
1571  if (!s->input_picture[i] ||
1572  s->input_picture[i]->b_frame_score - 1 >
1573  s->mb_num / s->b_sensitivity)
1574  break;
1575  }
1576 
1577  b_frames = FFMAX(0, i - 1);
1578 
1579  /* reset scores */
1580  for (i = 0; i < b_frames + 1; i++) {
1581  s->input_picture[i]->b_frame_score = 0;
1582  }
1583  } else if (s->b_frame_strategy == 2) {
1584  b_frames = estimate_best_b_count(s);
1585  if (b_frames < 0)
1586  return b_frames;
1587  }
1588 
1589  emms_c();
1590 
1591  for (i = b_frames - 1; i >= 0; i--) {
1592  int type = s->input_picture[i]->f->pict_type;
1593  if (type && type != AV_PICTURE_TYPE_B)
1594  b_frames = i;
1595  }
1596  if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
1597  b_frames == s->max_b_frames) {
1599  "warning, too many B-frames in a row\n");
1600  }
1601 
1602  if (s->picture_in_gop_number + b_frames >= s->gop_size) {
1603  if ((s->mpv_flags & FF_MPV_FLAG_STRICT_GOP) &&
1604  s->gop_size > s->picture_in_gop_number) {
1605  b_frames = s->gop_size - s->picture_in_gop_number - 1;
1606  } else {
1608  b_frames = 0;
1609  s->input_picture[b_frames]->f->pict_type = AV_PICTURE_TYPE_I;
1610  }
1611  }
1612 
1613  if ((s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) && b_frames &&
1614  s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_I)
1615  b_frames--;
1616 
1617  s->reordered_input_picture[0] = s->input_picture[b_frames];
1621  s->coded_picture_number++;
1622  for (i = 0; i < b_frames; i++) {
1623  s->reordered_input_picture[i + 1] = s->input_picture[i];
1624  s->reordered_input_picture[i + 1]->f->pict_type =
1627  s->coded_picture_number++;
1628  }
1629  }
1630  }
1631 no_output_pic:
1633 
1634  if (s->reordered_input_picture[0]) {
1637  AV_PICTURE_TYPE_B ? 3 : 0;
1638 
1639  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
1640  return ret;
1641 
1642  if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
1643  // input is a shared pix, so we can't modify it -> allocate a new
1644  // one & ensure that the shared one is reuseable
1645 
1646  Picture *pic;
1647  int i = ff_find_unused_picture(s->avctx, s->picture, 0);
1648  if (i < 0)
1649  return i;
1650  pic = &s->picture[i];
1651 
1653  if (alloc_picture(s, pic, 0) < 0) {
1654  return -1;
1655  }
1656 
1657  ret = av_frame_copy_props(pic->f, s->reordered_input_picture[0]->f);
1658  if (ret < 0)
1659  return ret;
1660 
1661  /* mark us unused / free shared pic */
1663  s->reordered_input_picture[0]->shared = 0;
1664 
1665  s->current_picture_ptr = pic;
1666  } else {
1667  // input is not a shared pix -> reuse buffer for current_pix
1669  for (i = 0; i < 4; i++) {
1670  s->new_picture.f->data[i] += INPLACE_OFFSET;
1671  }
1672  }
1674  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1675  s->current_picture_ptr)) < 0)
1676  return ret;
1677 
1679  }
1680  return 0;
1681 }
1682 
1683 static void frame_end(MpegEncContext *s)
1684 {
1685  if (s->unrestricted_mv &&
1687  !s->intra_only) {
1689  int hshift = desc->log2_chroma_w;
1690  int vshift = desc->log2_chroma_h;
1692  s->current_picture.f->linesize[0],
1693  s->h_edge_pos, s->v_edge_pos,
1695  EDGE_TOP | EDGE_BOTTOM);
1697  s->current_picture.f->linesize[1],
1698  s->h_edge_pos >> hshift,
1699  s->v_edge_pos >> vshift,
1700  EDGE_WIDTH >> hshift,
1701  EDGE_WIDTH >> vshift,
1702  EDGE_TOP | EDGE_BOTTOM);
1704  s->current_picture.f->linesize[2],
1705  s->h_edge_pos >> hshift,
1706  s->v_edge_pos >> vshift,
1707  EDGE_WIDTH >> hshift,
1708  EDGE_WIDTH >> vshift,
1709  EDGE_TOP | EDGE_BOTTOM);
1710  }
1711 
1712  emms_c();
1713 
1714  s->last_pict_type = s->pict_type;
1716  if (s->pict_type!= AV_PICTURE_TYPE_B)
1718 
1719 #if FF_API_CODED_FRAME
1724 #endif
1725 #if FF_API_ERROR_FRAME
1728  sizeof(s->current_picture.encoding_error));
1730 #endif
1731 }
1732 
1734 {
1735  int intra, i;
1736 
1737  for (intra = 0; intra < 2; intra++) {
1738  if (s->dct_count[intra] > (1 << 16)) {
1739  for (i = 0; i < 64; i++) {
1740  s->dct_error_sum[intra][i] >>= 1;
1741  }
1742  s->dct_count[intra] >>= 1;
1743  }
1744 
1745  for (i = 0; i < 64; i++) {
1746  s->dct_offset[intra][i] = (s->noise_reduction *
1747  s->dct_count[intra] +
1748  s->dct_error_sum[intra][i] / 2) /
1749  (s->dct_error_sum[intra][i] + 1);
1750  }
1751  }
1752 }
1753 
1755 {
1756  int ret;
1757 
1758  /* mark & release old frames */
1759  if (s->pict_type != AV_PICTURE_TYPE_B && s->last_picture_ptr &&
1761  s->last_picture_ptr->f->buf[0]) {
1763  }
1764 
1767 
1769  if ((ret = ff_mpeg_ref_picture(s->avctx, &s->current_picture,
1770  s->current_picture_ptr)) < 0)
1771  return ret;
1772 
1773  if (s->pict_type != AV_PICTURE_TYPE_B) {
1775  if (!s->droppable)
1777  }
1778 
1779  if (s->last_picture_ptr) {
1781  if (s->last_picture_ptr->f->buf[0] &&
1782  (ret = ff_mpeg_ref_picture(s->avctx, &s->last_picture,
1783  s->last_picture_ptr)) < 0)
1784  return ret;
1785  }
1786  if (s->next_picture_ptr) {
1788  if (s->next_picture_ptr->f->buf[0] &&
1789  (ret = ff_mpeg_ref_picture(s->avctx, &s->next_picture,
1790  s->next_picture_ptr)) < 0)
1791  return ret;
1792  }
1793 
1794  if (s->picture_structure!= PICT_FRAME) {
1795  int i;
1796  for (i = 0; i < 4; i++) {
1798  s->current_picture.f->data[i] +=
1799  s->current_picture.f->linesize[i];
1800  }
1801  s->current_picture.f->linesize[i] *= 2;
1802  s->last_picture.f->linesize[i] *= 2;
1803  s->next_picture.f->linesize[i] *= 2;
1804  }
1805  }
1806 
1807  if (s->mpeg_quant || s->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
1810  } else if (s->out_format == FMT_H263 || s->out_format == FMT_H261) {
1813  } else {
1816  }
1817 
1818  if (s->dct_error_sum) {
1821  }
1822 
1823  return 0;
1824 }
1825 
1827  const AVFrame *pic_arg, int *got_packet)
1828 {
1829  MpegEncContext *s = avctx->priv_data;
1830  int i, stuffing_count, ret;
1831  int context_count = s->slice_context_count;
1832 
1833  s->vbv_ignore_qmax = 0;
1834 
1835  s->picture_in_gop_number++;
1836 
1837  if (load_input_picture(s, pic_arg) < 0)
1838  return -1;
1839 
1840  if (select_input_picture(s) < 0) {
1841  return -1;
1842  }
1843 
1844  /* output? */
1845  if (s->new_picture.f->data[0]) {
1846  int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
1847  int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
1848  :
1849  s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000;
1850  if ((ret = ff_alloc_packet2(avctx, pkt, pkt_size, 0)) < 0)
1851  return ret;
1852  if (s->mb_info) {
1855  s->mb_width*s->mb_height*12);
1856  s->prev_mb_info = s->last_mb_info = s->mb_info_size = 0;
1857  }
1858 
1859  for (i = 0; i < context_count; i++) {
1860  int start_y = s->thread_context[i]->start_mb_y;
1861  int end_y = s->thread_context[i]-> end_mb_y;
1862  int h = s->mb_height;
1863  uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
1864  uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
1865 
1866  init_put_bits(&s->thread_context[i]->pb, start, end - start);
1867  }
1868 
1869  s->pict_type = s->new_picture.f->pict_type;
1870  //emms_c();
1871  ret = frame_start(s);
1872  if (ret < 0)
1873  return ret;
1874 vbv_retry:
1875  ret = encode_picture(s, s->picture_number);
1876  if (growing_buffer) {
1877  av_assert0(s->pb.buf == avctx->internal->byte_buffer);
1878  pkt->data = s->pb.buf;
1879  pkt->size = avctx->internal->byte_buffer_size;
1880  }
1881  if (ret < 0)
1882  return -1;
1883 
1884 #if FF_API_STAT_BITS
1886  avctx->header_bits = s->header_bits;
1887  avctx->mv_bits = s->mv_bits;
1888  avctx->misc_bits = s->misc_bits;
1889  avctx->i_tex_bits = s->i_tex_bits;
1890  avctx->p_tex_bits = s->p_tex_bits;
1891  avctx->i_count = s->i_count;
1892  // FIXME f/b_count in avctx
1893  avctx->p_count = s->mb_num - s->i_count - s->skip_count;
1894  avctx->skip_count = s->skip_count;
1896 #endif
1897 
1898  frame_end(s);
1899 
1902 
1903  if (avctx->rc_buffer_size) {
1904  RateControlContext *rcc = &s->rc_context;
1905  int max_size = FFMAX(rcc->buffer_index * avctx->rc_max_available_vbv_use, rcc->buffer_index - 500);
1906  int hq = (s->avctx->mb_decision == FF_MB_DECISION_RD || s->avctx->trellis);
1907  int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
1908 
1909  if (put_bits_count(&s->pb) > max_size &&
1910  s->lambda < s->lmax) {
1911  s->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
1912  (s->qscale + 1) / s->qscale);
1913  if (s->adaptive_quant) {
1914  int i;
1915  for (i = 0; i < s->mb_height * s->mb_stride; i++)
1916  s->lambda_table[i] =
1917  FFMAX(s->lambda_table[i] + min_step,
1918  s->lambda_table[i] * (s->qscale + 1) /
1919  s->qscale);
1920  }
1921  s->mb_skipped = 0; // done in frame_start()
1922  // done in encode_picture() so we must undo it
1923  if (s->pict_type == AV_PICTURE_TYPE_P) {
1924  if (s->flipflop_rounding ||
1925  s->codec_id == AV_CODEC_ID_H263P ||
1927  s->no_rounding ^= 1;
1928  }
1929  if (s->pict_type != AV_PICTURE_TYPE_B) {
1930  s->time_base = s->last_time_base;
1931  s->last_non_b_time = s->time - s->pp_time;
1932  }
1933  for (i = 0; i < context_count; i++) {
1934  PutBitContext *pb = &s->thread_context[i]->pb;
1935  init_put_bits(pb, pb->buf, pb->buf_end - pb->buf);
1936  }
1937  s->vbv_ignore_qmax = 1;
1938  av_log(s->avctx, AV_LOG_VERBOSE, "reencoding frame due to VBV\n");
1939  goto vbv_retry;
1940  }
1941 
1943  }
1944 
1945  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1947 
1948  for (i = 0; i < 4; i++) {
1950  avctx->error[i] += s->current_picture_ptr->encoding_error[i];
1951  }
1954  (s->avctx->flags&AV_CODEC_FLAG_PSNR) ? 4 : 0,
1955  s->pict_type);
1956 
1957  if (s->avctx->flags & AV_CODEC_FLAG_PASS1)
1958  assert(put_bits_count(&s->pb) == s->header_bits + s->mv_bits +
1959  s->misc_bits + s->i_tex_bits +
1960  s->p_tex_bits);
1961  flush_put_bits(&s->pb);
1962  s->frame_bits = put_bits_count(&s->pb);
1963 
1964  stuffing_count = ff_vbv_update(s, s->frame_bits);
1965  s->stuffing_bits = 8*stuffing_count;
1966  if (stuffing_count) {
1967  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
1968  stuffing_count + 50) {
1969  av_log(s->avctx, AV_LOG_ERROR, "stuffing too large\n");
1970  return -1;
1971  }
1972 
1973  switch (s->codec_id) {
1976  while (stuffing_count--) {
1977  put_bits(&s->pb, 8, 0);
1978  }
1979  break;
1980  case AV_CODEC_ID_MPEG4:
1981  put_bits(&s->pb, 16, 0);
1982  put_bits(&s->pb, 16, 0x1C3);
1983  stuffing_count -= 4;
1984  while (stuffing_count--) {
1985  put_bits(&s->pb, 8, 0xFF);
1986  }
1987  break;
1988  default:
1989  av_log(s->avctx, AV_LOG_ERROR, "vbv buffer overflow\n");
1990  }
1991  flush_put_bits(&s->pb);
1992  s->frame_bits = put_bits_count(&s->pb);
1993  }
1994 
1995  /* update MPEG-1/2 vbv_delay for CBR */
1996  if (s->avctx->rc_max_rate &&
1997  s->avctx->rc_min_rate == s->avctx->rc_max_rate &&
1998  s->out_format == FMT_MPEG1 &&
1999  90000LL * (avctx->rc_buffer_size - 1) <=
2000  s->avctx->rc_max_rate * 0xFFFFLL) {
2001  AVCPBProperties *props;
2002  size_t props_size;
2003 
2004  int vbv_delay, min_delay;
2005  double inbits = s->avctx->rc_max_rate *
2006  av_q2d(s->avctx->time_base);
2007  int minbits = s->frame_bits - 8 *
2008  (s->vbv_delay_ptr - s->pb.buf - 1);
2009  double bits = s->rc_context.buffer_index + minbits - inbits;
2010 
2011  if (bits < 0)
2013  "Internal error, negative bits\n");
2014 
2015  av_assert1(s->repeat_first_field == 0);
2016 
2017  vbv_delay = bits * 90000 / s->avctx->rc_max_rate;
2018  min_delay = (minbits * 90000LL + s->avctx->rc_max_rate - 1) /
2019  s->avctx->rc_max_rate;
2020 
2021  vbv_delay = FFMAX(vbv_delay, min_delay);
2022 
2023  av_assert0(vbv_delay < 0xFFFF);
2024 
2025  s->vbv_delay_ptr[0] &= 0xF8;
2026  s->vbv_delay_ptr[0] |= vbv_delay >> 13;
2027  s->vbv_delay_ptr[1] = vbv_delay >> 5;
2028  s->vbv_delay_ptr[2] &= 0x07;
2029  s->vbv_delay_ptr[2] |= vbv_delay << 3;
2030 
2031  props = av_cpb_properties_alloc(&props_size);
2032  if (!props)
2033  return AVERROR(ENOMEM);
2034  props->vbv_delay = vbv_delay * 300;
2035 
2037  (uint8_t*)props, props_size);
2038  if (ret < 0) {
2039  av_freep(&props);
2040  return ret;
2041  }
2042 
2043 #if FF_API_VBV_DELAY
2045  avctx->vbv_delay = vbv_delay * 300;
2047 #endif
2048  }
2049  s->total_bits += s->frame_bits;
2050 #if FF_API_STAT_BITS
2052  avctx->frame_bits = s->frame_bits;
2054 #endif
2055 
2056 
2057  pkt->pts = s->current_picture.f->pts;
2058  if (!s->low_delay && s->pict_type != AV_PICTURE_TYPE_B) {
2060  pkt->dts = pkt->pts - s->dts_delta;
2061  else
2062  pkt->dts = s->reordered_pts;
2063  s->reordered_pts = pkt->pts;
2064  } else
2065  pkt->dts = pkt->pts;
2066  if (s->current_picture.f->key_frame)
2067  pkt->flags |= AV_PKT_FLAG_KEY;
2068  if (s->mb_info)
2070  } else {
2071  s->frame_bits = 0;
2072  }
2073 
2074  /* release non-reference frames */
2075  for (i = 0; i < MAX_PICTURE_COUNT; i++) {
2076  if (!s->picture[i].reference)
2077  ff_mpeg_unref_picture(s->avctx, &s->picture[i]);
2078  }
2079 
2080  av_assert1((s->frame_bits & 7) == 0);
2081 
2082  pkt->size = s->frame_bits / 8;
2083  *got_packet = !!pkt->size;
2084  return 0;
2085 }
2086 
2088  int n, int threshold)
2089 {
2090  static const char tab[64] = {
2091  3, 2, 2, 1, 1, 1, 1, 1,
2092  1, 1, 1, 1, 1, 1, 1, 1,
2093  1, 1, 1, 1, 1, 1, 1, 1,
2094  0, 0, 0, 0, 0, 0, 0, 0,
2095  0, 0, 0, 0, 0, 0, 0, 0,
2096  0, 0, 0, 0, 0, 0, 0, 0,
2097  0, 0, 0, 0, 0, 0, 0, 0,
2098  0, 0, 0, 0, 0, 0, 0, 0
2099  };
2100  int score = 0;
2101  int run = 0;
2102  int i;
2103  int16_t *block = s->block[n];
2104  const int last_index = s->block_last_index[n];
2105  int skip_dc;
2106 
2107  if (threshold < 0) {
2108  skip_dc = 0;
2109  threshold = -threshold;
2110  } else
2111  skip_dc = 1;
2112 
2113  /* Are all we could set to zero already zero? */
2114  if (last_index <= skip_dc - 1)
2115  return;
2116 
2117  for (i = 0; i <= last_index; i++) {
2118  const int j = s->intra_scantable.permutated[i];
2119  const int level = FFABS(block[j]);
2120  if (level == 1) {
2121  if (skip_dc && i == 0)
2122  continue;
2123  score += tab[run];
2124  run = 0;
2125  } else if (level > 1) {
2126  return;
2127  } else {
2128  run++;
2129  }
2130  }
2131  if (score >= threshold)
2132  return;
2133  for (i = skip_dc; i <= last_index; i++) {
2134  const int j = s->intra_scantable.permutated[i];
2135  block[j] = 0;
2136  }
2137  if (block[0])
2138  s->block_last_index[n] = 0;
2139  else
2140  s->block_last_index[n] = -1;
2141 }
2142 
2143 static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
2144  int last_index)
2145 {
2146  int i;
2147  const int maxlevel = s->max_qcoeff;
2148  const int minlevel = s->min_qcoeff;
2149  int overflow = 0;
2150 
2151  if (s->mb_intra) {
2152  i = 1; // skip clipping of intra dc
2153  } else
2154  i = 0;
2155 
2156  for (; i <= last_index; i++) {
2157  const int j = s->intra_scantable.permutated[i];
2158  int level = block[j];
2159 
2160  if (level > maxlevel) {
2161  level = maxlevel;
2162  overflow++;
2163  } else if (level < minlevel) {
2164  level = minlevel;
2165  overflow++;
2166  }
2167 
2168  block[j] = level;
2169  }
2170 
2171  if (overflow && s->avctx->mb_decision == FF_MB_DECISION_SIMPLE)
2172  av_log(s->avctx, AV_LOG_INFO,
2173  "warning, clipping %d dct coefficients to %d..%d\n",
2174  overflow, minlevel, maxlevel);
2175 }
2176 
2177 static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
2178 {
2179  int x, y;
2180  // FIXME optimize
2181  for (y = 0; y < 8; y++) {
2182  for (x = 0; x < 8; x++) {
2183  int x2, y2;
2184  int sum = 0;
2185  int sqr = 0;
2186  int count = 0;
2187 
2188  for (y2 = FFMAX(y - 1, 0); y2 < FFMIN(8, y + 2); y2++) {
2189  for (x2= FFMAX(x - 1, 0); x2 < FFMIN(8, x + 2); x2++) {
2190  int v = ptr[x2 + y2 * stride];
2191  sum += v;
2192  sqr += v * v;
2193  count++;
2194  }
2195  }
2196  weight[x + 8 * y]= (36 * ff_sqrt(count * sqr - sum * sum)) / count;
2197  }
2198  }
2199 }
2200 
2202  int motion_x, int motion_y,
2203  int mb_block_height,
2204  int mb_block_width,
2205  int mb_block_count)
2206 {
2207  int16_t weight[12][64];
2208  int16_t orig[12][64];
2209  const int mb_x = s->mb_x;
2210  const int mb_y = s->mb_y;
2211  int i;
2212  int skip_dct[12];
2213  int dct_offset = s->linesize * 8; // default for progressive frames
2214  int uv_dct_offset = s->uvlinesize * 8;
2215  uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2216  ptrdiff_t wrap_y, wrap_c;
2217 
2218  for (i = 0; i < mb_block_count; i++)
2219  skip_dct[i] = s->skipdct;
2220 
2221  if (s->adaptive_quant) {
2222  const int last_qp = s->qscale;
2223  const int mb_xy = mb_x + mb_y * s->mb_stride;
2224 
2225  s->lambda = s->lambda_table[mb_xy];
2226  update_qscale(s);
2227 
2228  if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
2229  s->qscale = s->current_picture_ptr->qscale_table[mb_xy];
2230  s->dquant = s->qscale - last_qp;
2231 
2232  if (s->out_format == FMT_H263) {
2233  s->dquant = av_clip(s->dquant, -2, 2);
2234 
2235  if (s->codec_id == AV_CODEC_ID_MPEG4) {
2236  if (!s->mb_intra) {
2237  if (s->pict_type == AV_PICTURE_TYPE_B) {
2238  if (s->dquant & 1 || s->mv_dir & MV_DIRECT)
2239  s->dquant = 0;
2240  }
2241  if (s->mv_type == MV_TYPE_8X8)
2242  s->dquant = 0;
2243  }
2244  }
2245  }
2246  }
2247  ff_set_qscale(s, last_qp + s->dquant);
2248  } else if (s->mpv_flags & FF_MPV_FLAG_QP_RD)
2249  ff_set_qscale(s, s->qscale + s->dquant);
2250 
2251  wrap_y = s->linesize;
2252  wrap_c = s->uvlinesize;
2253  ptr_y = s->new_picture.f->data[0] +
2254  (mb_y * 16 * wrap_y) + mb_x * 16;
2255  ptr_cb = s->new_picture.f->data[1] +
2256  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2257  ptr_cr = s->new_picture.f->data[2] +
2258  (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2259 
2260  if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
2261  uint8_t *ebuf = s->sc.edge_emu_buffer + 38 * wrap_y;
2262  int cw = (s->width + s->chroma_x_shift) >> s->chroma_x_shift;
2263  int ch = (s->height + s->chroma_y_shift) >> s->chroma_y_shift;
2264  s->vdsp.emulated_edge_mc(ebuf, ptr_y,
2265  wrap_y, wrap_y,
2266  16, 16, mb_x * 16, mb_y * 16,
2267  s->width, s->height);
2268  ptr_y = ebuf;
2269  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y, ptr_cb,
2270  wrap_c, wrap_c,
2271  mb_block_width, mb_block_height,
2272  mb_x * mb_block_width, mb_y * mb_block_height,
2273  cw, ch);
2274  ptr_cb = ebuf + 16 * wrap_y;
2275  s->vdsp.emulated_edge_mc(ebuf + 16 * wrap_y + 16, ptr_cr,
2276  wrap_c, wrap_c,
2277  mb_block_width, mb_block_height,
2278  mb_x * mb_block_width, mb_y * mb_block_height,
2279  cw, ch);
2280  ptr_cr = ebuf + 16 * wrap_y + 16;
2281  }
2282 
2283  if (s->mb_intra) {
2285  int progressive_score, interlaced_score;
2286 
2287  s->interlaced_dct = 0;
2288  progressive_score = s->mecc.ildct_cmp[4](s, ptr_y, NULL, wrap_y, 8) +
2289  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y * 8,
2290  NULL, wrap_y, 8) - 400;
2291 
2292  if (progressive_score > 0) {
2293  interlaced_score = s->mecc.ildct_cmp[4](s, ptr_y,
2294  NULL, wrap_y * 2, 8) +
2295  s->mecc.ildct_cmp[4](s, ptr_y + wrap_y,
2296  NULL, wrap_y * 2, 8);
2297  if (progressive_score > interlaced_score) {
2298  s->interlaced_dct = 1;
2299 
2300  dct_offset = wrap_y;
2301  uv_dct_offset = wrap_c;
2302  wrap_y <<= 1;
2303  if (s->chroma_format == CHROMA_422 ||
2304  s->chroma_format == CHROMA_444)
2305  wrap_c <<= 1;
2306  }
2307  }
2308  }
2309 
2310  s->pdsp.get_pixels(s->block[0], ptr_y, wrap_y);
2311  s->pdsp.get_pixels(s->block[1], ptr_y + 8, wrap_y);
2312  s->pdsp.get_pixels(s->block[2], ptr_y + dct_offset, wrap_y);
2313  s->pdsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y);
2314 
2315  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2316  skip_dct[4] = 1;
2317  skip_dct[5] = 1;
2318  } else {
2319  s->pdsp.get_pixels(s->block[4], ptr_cb, wrap_c);
2320  s->pdsp.get_pixels(s->block[5], ptr_cr, wrap_c);
2321  if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
2322  s->pdsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
2323  s->pdsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
2324  } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
2325  s->pdsp.get_pixels(s->block[ 6], ptr_cb + 8, wrap_c);
2326  s->pdsp.get_pixels(s->block[ 7], ptr_cr + 8, wrap_c);
2327  s->pdsp.get_pixels(s->block[ 8], ptr_cb + uv_dct_offset, wrap_c);
2328  s->pdsp.get_pixels(s->block[ 9], ptr_cr + uv_dct_offset, wrap_c);
2329  s->pdsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
2330  s->pdsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
2331  }
2332  }
2333  } else {
2334  op_pixels_func (*op_pix)[4];
2335  qpel_mc_func (*op_qpix)[16];
2336  uint8_t *dest_y, *dest_cb, *dest_cr;
2337 
2338  dest_y = s->dest[0];
2339  dest_cb = s->dest[1];
2340  dest_cr = s->dest[2];
2341 
2342  if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
2343  op_pix = s->hdsp.put_pixels_tab;
2344  op_qpix = s->qdsp.put_qpel_pixels_tab;
2345  } else {
2346  op_pix = s->hdsp.put_no_rnd_pixels_tab;
2347  op_qpix = s->qdsp.put_no_rnd_qpel_pixels_tab;
2348  }
2349 
2350  if (s->mv_dir & MV_DIR_FORWARD) {
2351  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0,
2352  s->last_picture.f->data,
2353  op_pix, op_qpix);
2354  op_pix = s->hdsp.avg_pixels_tab;
2355  op_qpix = s->qdsp.avg_qpel_pixels_tab;
2356  }
2357  if (s->mv_dir & MV_DIR_BACKWARD) {
2358  ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1,
2359  s->next_picture.f->data,
2360  op_pix, op_qpix);
2361  }
2362 
2364  int progressive_score, interlaced_score;
2365 
2366  s->interlaced_dct = 0;
2367  progressive_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y, wrap_y, 8) +
2368  s->mecc.ildct_cmp[0](s, dest_y + wrap_y * 8,
2369  ptr_y + wrap_y * 8,
2370  wrap_y, 8) - 400;
2371 
2372  if (s->avctx->ildct_cmp == FF_CMP_VSSE)
2373  progressive_score -= 400;
2374 
2375  if (progressive_score > 0) {
2376  interlaced_score = s->mecc.ildct_cmp[0](s, dest_y, ptr_y,
2377  wrap_y * 2, 8) +
2378  s->mecc.ildct_cmp[0](s, dest_y + wrap_y,
2379  ptr_y + wrap_y,
2380  wrap_y * 2, 8);
2381 
2382  if (progressive_score > interlaced_score) {
2383  s->interlaced_dct = 1;
2384 
2385  dct_offset = wrap_y;
2386  uv_dct_offset = wrap_c;
2387  wrap_y <<= 1;
2388  if (s->chroma_format == CHROMA_422)
2389  wrap_c <<= 1;
2390  }
2391  }
2392  }
2393 
2394  s->pdsp.diff_pixels(s->block[0], ptr_y, dest_y, wrap_y);
2395  s->pdsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y);
2396  s->pdsp.diff_pixels(s->block[2], ptr_y + dct_offset,
2397  dest_y + dct_offset, wrap_y);
2398  s->pdsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8,
2399  dest_y + dct_offset + 8, wrap_y);
2400 
2401  if (s->avctx->flags & AV_CODEC_FLAG_GRAY) {
2402  skip_dct[4] = 1;
2403  skip_dct[5] = 1;
2404  } else {
2405  s->pdsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
2406  s->pdsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
2407  if (!s->chroma_y_shift) { /* 422 */
2408  s->pdsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
2409  dest_cb + uv_dct_offset, wrap_c);
2410  s->pdsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
2411  dest_cr + uv_dct_offset, wrap_c);
2412  }
2413  }
2414  /* pre quantization */
2415  if (s->current_picture.mc_mb_var[s->mb_stride * mb_y + mb_x] <
2416  2 * s->qscale * s->qscale) {
2417  // FIXME optimize
2418  if (s->mecc.sad[1](NULL, ptr_y, dest_y, wrap_y, 8) < 20 * s->qscale)
2419  skip_dct[0] = 1;
2420  if (s->mecc.sad[1](NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20 * s->qscale)
2421  skip_dct[1] = 1;
2422  if (s->mecc.sad[1](NULL, ptr_y + dct_offset, dest_y + dct_offset,
2423  wrap_y, 8) < 20 * s->qscale)
2424  skip_dct[2] = 1;
2425  if (s->mecc.sad[1](NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2426  wrap_y, 8) < 20 * s->qscale)
2427  skip_dct[3] = 1;
2428  if (s->mecc.sad[1](NULL, ptr_cb, dest_cb, wrap_c, 8) < 20 * s->qscale)
2429  skip_dct[4] = 1;
2430  if (s->mecc.sad[1](NULL, ptr_cr, dest_cr, wrap_c, 8) < 20 * s->qscale)
2431  skip_dct[5] = 1;
2432  if (!s->chroma_y_shift) { /* 422 */
2433  if (s->mecc.sad[1](NULL, ptr_cb + uv_dct_offset,
2434  dest_cb + uv_dct_offset,
2435  wrap_c, 8) < 20 * s->qscale)
2436  skip_dct[6] = 1;
2437  if (s->mecc.sad[1](NULL, ptr_cr + uv_dct_offset,
2438  dest_cr + uv_dct_offset,
2439  wrap_c, 8) < 20 * s->qscale)
2440  skip_dct[7] = 1;
2441  }
2442  }
2443  }
2444 
2445  if (s->quantizer_noise_shaping) {
2446  if (!skip_dct[0])
2447  get_visual_weight(weight[0], ptr_y , wrap_y);
2448  if (!skip_dct[1])
2449  get_visual_weight(weight[1], ptr_y + 8, wrap_y);
2450  if (!skip_dct[2])
2451  get_visual_weight(weight[2], ptr_y + dct_offset , wrap_y);
2452  if (!skip_dct[3])
2453  get_visual_weight(weight[3], ptr_y + dct_offset + 8, wrap_y);
2454  if (!skip_dct[4])
2455  get_visual_weight(weight[4], ptr_cb , wrap_c);
2456  if (!skip_dct[5])
2457  get_visual_weight(weight[5], ptr_cr , wrap_c);
2458  if (!s->chroma_y_shift) { /* 422 */
2459  if (!skip_dct[6])
2460  get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
2461  wrap_c);
2462  if (!skip_dct[7])
2463  get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
2464  wrap_c);
2465  }
2466  memcpy(orig[0], s->block[0], sizeof(int16_t) * 64 * mb_block_count);
2467  }
2468 
2469  /* DCT & quantize */
2470  av_assert2(s->out_format != FMT_MJPEG || s->qscale == 8);
2471  {
2472  for (i = 0; i < mb_block_count; i++) {
2473  if (!skip_dct[i]) {
2474  int overflow;
2475  s->block_last_index[i] = s->dct_quantize(s, s->block[i], i, s->qscale, &overflow);
2476  // FIXME we could decide to change to quantizer instead of
2477  // clipping
2478  // JS: I don't think that would be a good idea it could lower
2479  // quality instead of improve it. Just INTRADC clipping
2480  // deserves changes in quantizer
2481  if (overflow)
2482  clip_coeffs(s, s->block[i], s->block_last_index[i]);
2483  } else
2484  s->block_last_index[i] = -1;
2485  }
2486  if (s->quantizer_noise_shaping) {
2487  for (i = 0; i < mb_block_count; i++) {
2488  if (!skip_dct[i]) {
2489  s->block_last_index[i] =
2490  dct_quantize_refine(s, s->block[i], weight[i],
2491  orig[i], i, s->qscale);
2492  }
2493  }
2494  }
2495 
2496  if (s->luma_elim_threshold && !s->mb_intra)
2497  for (i = 0; i < 4; i++)
2499  if (s->chroma_elim_threshold && !s->mb_intra)
2500  for (i = 4; i < mb_block_count; i++)
2502 
2503  if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
2504  for (i = 0; i < mb_block_count; i++) {
2505  if (s->block_last_index[i] == -1)
2506  s->coded_score[i] = INT_MAX / 256;
2507  }
2508  }
2509  }
2510 
2511  if ((s->avctx->flags & AV_CODEC_FLAG_GRAY) && s->mb_intra) {
2512  s->block_last_index[4] =
2513  s->block_last_index[5] = 0;
2514  s->block[4][0] =
2515  s->block[5][0] = (1024 + s->c_dc_scale / 2) / s->c_dc_scale;
2516  if (!s->chroma_y_shift) { /* 422 / 444 */
2517  for (i=6; i<12; i++) {
2518  s->block_last_index[i] = 0;
2519  s->block[i][0] = s->block[4][0];
2520  }
2521  }
2522  }
2523 
2524  // non c quantize code returns incorrect block_last_index FIXME
2525  if (s->alternate_scan && s->dct_quantize != ff_dct_quantize_c) {
2526  for (i = 0; i < mb_block_count; i++) {
2527  int j;
2528  if (s->block_last_index[i] > 0) {
2529  for (j = 63; j > 0; j--) {
2530  if (s->block[i][s->intra_scantable.permutated[j]])
2531  break;
2532  }
2533  s->block_last_index[i] = j;
2534  }
2535  }
2536  }
2537 
2538  /* huffman encode */
2539  switch(s->codec_id){ //FIXME funct ptr could be slightly faster
2543  ff_mpeg1_encode_mb(s, s->block, motion_x, motion_y);
2544  break;
2545  case AV_CODEC_ID_MPEG4:
2547  ff_mpeg4_encode_mb(s, s->block, motion_x, motion_y);
2548  break;
2549  case AV_CODEC_ID_MSMPEG4V2:
2550  case AV_CODEC_ID_MSMPEG4V3:
2551  case AV_CODEC_ID_WMV1:
2553  ff_msmpeg4_encode_mb(s, s->block, motion_x, motion_y);
2554  break;
2555  case AV_CODEC_ID_WMV2:
2556  if (CONFIG_WMV2_ENCODER)
2557  ff_wmv2_encode_mb(s, s->block, motion_x, motion_y);
2558  break;
2559  case AV_CODEC_ID_H261:
2560  if (CONFIG_H261_ENCODER)
2561  ff_h261_encode_mb(s, s->block, motion_x, motion_y);
2562  break;
2563  case AV_CODEC_ID_H263:
2564  case AV_CODEC_ID_H263P:
2565  case AV_CODEC_ID_FLV1:
2566  case AV_CODEC_ID_RV10:
2567  case AV_CODEC_ID_RV20:
2568  if (CONFIG_H263_ENCODER)
2569  ff_h263_encode_mb(s, s->block, motion_x, motion_y);
2570  break;
2571  case AV_CODEC_ID_MJPEG:
2572  case AV_CODEC_ID_AMV:
2574  ff_mjpeg_encode_mb(s, s->block);
2575  break;
2576  default:
2577  av_assert1(0);
2578  }
2579 }
2580 
2581 static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
2582 {
2583  if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
2584  else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
2585  else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
2586 }
2587 
2589  int i;
2590 
2591  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2592 
2593  /* MPEG-1 */
2594  d->mb_skip_run= s->mb_skip_run;
2595  for(i=0; i<3; i++)
2596  d->last_dc[i] = s->last_dc[i];
2597 
2598  /* statistics */
2599  d->mv_bits= s->mv_bits;
2600  d->i_tex_bits= s->i_tex_bits;
2601  d->p_tex_bits= s->p_tex_bits;
2602  d->i_count= s->i_count;
2603  d->f_count= s->f_count;
2604  d->b_count= s->b_count;
2605  d->skip_count= s->skip_count;
2606  d->misc_bits= s->misc_bits;
2607  d->last_bits= 0;
2608 
2609  d->mb_skipped= 0;
2610  d->qscale= s->qscale;
2611  d->dquant= s->dquant;
2612 
2614 }
2615 
2617  int i;
2618 
2619  memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
2620  memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
2621 
2622  /* MPEG-1 */
2623  d->mb_skip_run= s->mb_skip_run;
2624  for(i=0; i<3; i++)
2625  d->last_dc[i] = s->last_dc[i];
2626 
2627  /* statistics */
2628  d->mv_bits= s->mv_bits;
2629  d->i_tex_bits= s->i_tex_bits;
2630  d->p_tex_bits= s->p_tex_bits;
2631  d->i_count= s->i_count;
2632  d->f_count= s->f_count;
2633  d->b_count= s->b_count;
2634  d->skip_count= s->skip_count;
2635  d->misc_bits= s->misc_bits;
2636 
2637  d->mb_intra= s->mb_intra;
2638  d->mb_skipped= s->mb_skipped;
2639  d->mv_type= s->mv_type;
2640  d->mv_dir= s->mv_dir;
2641  d->pb= s->pb;
2642  if(s->data_partitioning){
2643  d->pb2= s->pb2;
2644  d->tex_pb= s->tex_pb;
2645  }
2646  d->block= s->block;
2647  for(i=0; i<8; i++)
2648  d->block_last_index[i]= s->block_last_index[i];
2650  d->qscale= s->qscale;
2651 
2653 }
2654 
2655 static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
2657  int *dmin, int *next_block, int motion_x, int motion_y)
2658 {
2659  int score;
2660  uint8_t *dest_backup[3];
2661 
2662  copy_context_before_encode(s, backup, type);
2663 
2664  s->block= s->blocks[*next_block];
2665  s->pb= pb[*next_block];
2666  if(s->data_partitioning){
2667  s->pb2 = pb2 [*next_block];
2668  s->tex_pb= tex_pb[*next_block];
2669  }
2670 
2671  if(*next_block){
2672  memcpy(dest_backup, s->dest, sizeof(s->dest));
2673  s->dest[0] = s->sc.rd_scratchpad;
2674  s->dest[1] = s->sc.rd_scratchpad + 16*s->linesize;
2675  s->dest[2] = s->sc.rd_scratchpad + 16*s->linesize + 8;
2676  av_assert0(s->linesize >= 32); //FIXME
2677  }
2678 
2679  encode_mb(s, motion_x, motion_y);
2680 
2681  score= put_bits_count(&s->pb);
2682  if(s->data_partitioning){
2683  score+= put_bits_count(&s->pb2);
2684  score+= put_bits_count(&s->tex_pb);
2685  }
2686 
2687  if(s->avctx->mb_decision == FF_MB_DECISION_RD){
2689 
2690  score *= s->lambda2;
2691  score += sse_mb(s) << FF_LAMBDA_SHIFT;
2692  }
2693 
2694  if(*next_block){
2695  memcpy(s->dest, dest_backup, sizeof(s->dest));
2696  }
2697 
2698  if(score<*dmin){
2699  *dmin= score;
2700  *next_block^=1;
2701 
2702  copy_context_after_encode(best, s, type);
2703  }
2704 }
2705 
2706 static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
2707  const uint32_t *sq = ff_square_tab + 256;
2708  int acc=0;
2709  int x,y;
2710 
2711  if(w==16 && h==16)
2712  return s->mecc.sse[0](NULL, src1, src2, stride, 16);
2713  else if(w==8 && h==8)
2714  return s->mecc.sse[1](NULL, src1, src2, stride, 8);
2715 
2716  for(y=0; y<h; y++){
2717  for(x=0; x<w; x++){
2718  acc+= sq[src1[x + y*stride] - src2[x + y*stride]];
2719  }
2720  }
2721 
2722  av_assert2(acc>=0);
2723 
2724  return acc;
2725 }
2726 
2727 static int sse_mb(MpegEncContext *s){
2728  int w= 16;
2729  int h= 16;
2730 
2731  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
2732  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
2733 
2734  if(w==16 && h==16)
2735  if(s->avctx->mb_cmp == FF_CMP_NSSE){
2736  return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2737  s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2738  s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2739  }else{
2740  return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
2741  s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
2742  s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
2743  }
2744  else
2745  return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
2746  +sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
2747  +sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
2748 }
2749 
2751  MpegEncContext *s= *(void**)arg;
2752 
2753 
2754  s->me.pre_pass=1;
2755  s->me.dia_size= s->avctx->pre_dia_size;
2756  s->first_slice_line=1;
2757  for(s->mb_y= s->end_mb_y-1; s->mb_y >= s->start_mb_y; s->mb_y--) {
2758  for(s->mb_x=s->mb_width-1; s->mb_x >=0 ;s->mb_x--) {
2760  }
2761  s->first_slice_line=0;
2762  }
2763 
2764  s->me.pre_pass=0;
2765 
2766  return 0;
2767 }
2768 
2770  MpegEncContext *s= *(void**)arg;
2771 
2773 
2774  s->me.dia_size= s->avctx->dia_size;
2775  s->first_slice_line=1;
2776  for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) {
2777  s->mb_x=0; //for block init below
2779  for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) {
2780  s->block_index[0]+=2;
2781  s->block_index[1]+=2;
2782  s->block_index[2]+=2;
2783  s->block_index[3]+=2;
2784 
2785  /* compute motion vector & mb_type and store in context */
2788  else
2790  }
2791  s->first_slice_line=0;
2792  }
2793  return 0;
2794 }
2795 
2796 static int mb_var_thread(AVCodecContext *c, void *arg){
2797  MpegEncContext *s= *(void**)arg;
2798  int mb_x, mb_y;
2799 
2801 
2802  for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2803  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2804  int xx = mb_x * 16;
2805  int yy = mb_y * 16;
2806  uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
2807  int varc;
2808  int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
2809 
2810  varc = (s->mpvencdsp.pix_norm1(pix, s->linesize) -
2811  (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2812 
2813  s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc;
2814  s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8;
2815  s->me.mb_var_sum_temp += varc;
2816  }
2817  }
2818  return 0;
2819 }
2820 
2823  if(s->partitioned_frame){
2825  }
2826 
2827  ff_mpeg4_stuffing(&s->pb);
2828  }else if(CONFIG_MJPEG_ENCODER && s->out_format == FMT_MJPEG){
2830  }
2831 
2833  flush_put_bits(&s->pb);
2834 
2835  if ((s->avctx->flags & AV_CODEC_FLAG_PASS1) && !s->partitioned_frame)
2836  s->misc_bits+= get_bits_diff(s);
2837 }
2838 
2840 {
2841  uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
2842  int offset = put_bits_count(&s->pb);
2843  int mba = s->mb_x + s->mb_width * (s->mb_y % s->gob_index);
2844  int gobn = s->mb_y / s->gob_index;
2845  int pred_x, pred_y;
2846  if (CONFIG_H263_ENCODER)
2847  ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
2848  bytestream_put_le32(&ptr, offset);
2849  bytestream_put_byte(&ptr, s->qscale);
2850  bytestream_put_byte(&ptr, gobn);
2851  bytestream_put_le16(&ptr, mba);
2852  bytestream_put_byte(&ptr, pred_x); /* hmv1 */
2853  bytestream_put_byte(&ptr, pred_y); /* vmv1 */
2854  /* 4MV not implemented */
2855  bytestream_put_byte(&ptr, 0); /* hmv2 */
2856  bytestream_put_byte(&ptr, 0); /* vmv2 */
2857 }
2858 
2859 static void update_mb_info(MpegEncContext *s, int startcode)
2860 {
2861  if (!s->mb_info)
2862  return;
2863  if (put_bits_count(&s->pb) - s->prev_mb_info*8 >= s->mb_info*8) {
2864  s->mb_info_size += 12;
2865  s->prev_mb_info = s->last_mb_info;
2866  }
2867  if (startcode) {
2868  s->prev_mb_info = put_bits_count(&s->pb)/8;
2869  /* This might have incremented mb_info_size above, and we return without
2870  * actually writing any info into that slot yet. But in that case,
2871  * this will be called again at the start of the after writing the
2872  * start code, actually writing the mb info. */
2873  return;
2874  }
2875 
2876  s->last_mb_info = put_bits_count(&s->pb)/8;
2877  if (!s->mb_info_size)
2878  s->mb_info_size += 12;
2879  write_mb_info(s);
2880 }
2881 
2882 int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
2883 {
2884  if ( s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold
2885  && s->slice_context_count == 1
2886  && s->pb.buf == s->avctx->internal->byte_buffer) {
2887  int lastgob_pos = s->ptr_lastgob - s->pb.buf;
2888  int vbv_pos = s->vbv_delay_ptr - s->pb.buf;
2889 
2890  uint8_t *new_buffer = NULL;
2891  int new_buffer_size = 0;
2892 
2893  if ((s->avctx->internal->byte_buffer_size + size_increase) >= INT_MAX/8) {
2894  av_log(s->avctx, AV_LOG_ERROR, "Cannot reallocate putbit buffer\n");
2895  return AVERROR(ENOMEM);
2896  }
2897 
2898  emms_c();
2899 
2900  av_fast_padded_malloc(&new_buffer, &new_buffer_size,
2901  s->avctx->internal->byte_buffer_size + size_increase);
2902  if (!new_buffer)
2903  return AVERROR(ENOMEM);
2904 
2905  memcpy(new_buffer, s->avctx->internal->byte_buffer, s->avctx->internal->byte_buffer_size);
2907  s->avctx->internal->byte_buffer = new_buffer;
2908  s->avctx->internal->byte_buffer_size = new_buffer_size;
2909  rebase_put_bits(&s->pb, new_buffer, new_buffer_size);
2910  s->ptr_lastgob = s->pb.buf + lastgob_pos;
2911  s->vbv_delay_ptr = s->pb.buf + vbv_pos;
2912  }
2913  if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < threshold)
2914  return AVERROR(EINVAL);
2915  return 0;
2916 }
2917 
2918 static int encode_thread(AVCodecContext *c, void *arg){
2919  MpegEncContext *s= *(void**)arg;
2920  int mb_x, mb_y;
2921  int chr_h= 16>>s->chroma_y_shift;
2922  int i, j;
2923  MpegEncContext best_s = { 0 }, backup_s;
2924  uint8_t bit_buf[2][MAX_MB_BYTES];
2925  uint8_t bit_buf2[2][MAX_MB_BYTES];
2926  uint8_t bit_buf_tex[2][MAX_MB_BYTES];
2927  PutBitContext pb[2], pb2[2], tex_pb[2];
2928 
2930 
2931  for(i=0; i<2; i++){
2932  init_put_bits(&pb [i], bit_buf [i], MAX_MB_BYTES);
2933  init_put_bits(&pb2 [i], bit_buf2 [i], MAX_MB_BYTES);
2934  init_put_bits(&tex_pb[i], bit_buf_tex[i], MAX_MB_BYTES);
2935  }
2936 
2937  s->last_bits= put_bits_count(&s->pb);
2938  s->mv_bits=0;
2939  s->misc_bits=0;
2940  s->i_tex_bits=0;
2941  s->p_tex_bits=0;
2942  s->i_count=0;
2943  s->f_count=0;
2944  s->b_count=0;
2945  s->skip_count=0;
2946 
2947  for(i=0; i<3; i++){
2948  /* init last dc values */
2949  /* note: quant matrix value (8) is implied here */
2950  s->last_dc[i] = 128 << s->intra_dc_precision;
2951 
2953  }
2954  if(s->codec_id==AV_CODEC_ID_AMV){
2955  s->last_dc[0] = 128*8/13;
2956  s->last_dc[1] = 128*8/14;
2957  s->last_dc[2] = 128*8/14;
2958  }
2959  s->mb_skip_run = 0;
2960  memset(s->last_mv, 0, sizeof(s->last_mv));
2961 
2962  s->last_mv_dir = 0;
2963 
2964  switch(s->codec_id){
2965  case AV_CODEC_ID_H263:
2966  case AV_CODEC_ID_H263P:
2967  case AV_CODEC_ID_FLV1:
2968  if (CONFIG_H263_ENCODER)
2969  s->gob_index = H263_GOB_HEIGHT(s->height);
2970  break;
2971  case AV_CODEC_ID_MPEG4:
2974  break;
2975  }
2976 
2977  s->resync_mb_x=0;
2978  s->resync_mb_y=0;
2979  s->first_slice_line = 1;
2980  s->ptr_lastgob = s->pb.buf;
2981  for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
2982  s->mb_x=0;
2983  s->mb_y= mb_y;
2984 
2985  ff_set_qscale(s, s->qscale);
2987 
2988  for(mb_x=0; mb_x < s->mb_width; mb_x++) {
2989  int xy= mb_y*s->mb_stride + mb_x; // removed const, H261 needs to adjust this
2990  int mb_type= s->mb_type[xy];
2991 // int d;
2992  int dmin= INT_MAX;
2993  int dir;
2994  int size_increase = s->avctx->internal->byte_buffer_size/4
2995  + s->mb_width*MAX_MB_BYTES;
2996 
2997  ff_mpv_reallocate_putbitbuffer(s, MAX_MB_BYTES, size_increase);
2998  if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < MAX_MB_BYTES){
2999  av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
3000  return -1;
3001  }
3002  if(s->data_partitioning){
3003  if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
3004  || s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
3005  av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
3006  return -1;
3007  }
3008  }
3009 
3010  s->mb_x = mb_x;
3011  s->mb_y = mb_y; // moved into loop, can get changed by H.261
3013 
3016  xy= s->mb_y*s->mb_stride + s->mb_x;
3017  mb_type= s->mb_type[xy];
3018  }
3019 
3020  /* write gob / video packet header */
3021  if(s->rtp_mode){
3022  int current_packet_size, is_gob_start;
3023 
3024  current_packet_size= ((put_bits_count(&s->pb)+7)>>3) - (s->ptr_lastgob - s->pb.buf);
3025 
3026  is_gob_start = s->rtp_payload_size &&
3027  current_packet_size >= s->rtp_payload_size &&
3028  mb_y + mb_x > 0;
3029 
3030  if(s->start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3031 
3032  switch(s->codec_id){
3033  case AV_CODEC_ID_H263:
3034  case AV_CODEC_ID_H263P:
3035  if(!s->h263_slice_structured)
3036  if(s->mb_x || s->mb_y%s->gob_index) is_gob_start=0;
3037  break;
3039  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3041  if(s->mb_skip_run) is_gob_start=0;
3042  break;
3043  case AV_CODEC_ID_MJPEG:
3044  if(s->mb_x==0 && s->mb_y!=0) is_gob_start=1;
3045  break;
3046  }
3047 
3048  if(is_gob_start){
3049  if(s->start_mb_y != mb_y || mb_x!=0){
3050  write_slice_end(s);
3051 
3054  }
3055  }
3056 
3057  av_assert2((put_bits_count(&s->pb)&7) == 0);
3058  current_packet_size= put_bits_ptr(&s->pb) - s->ptr_lastgob;
3059 
3060  if (s->error_rate && s->resync_mb_x + s->resync_mb_y > 0) {
3061  int r= put_bits_count(&s->pb)/8 + s->picture_number + 16 + s->mb_x + s->mb_y;
3062  int d = 100 / s->error_rate;
3063  if(r % d == 0){
3064  current_packet_size=0;
3065  s->pb.buf_ptr= s->ptr_lastgob;
3066  av_assert1(put_bits_ptr(&s->pb) == s->ptr_lastgob);
3067  }
3068  }
3069 
3070 #if FF_API_RTP_CALLBACK
3072  if (s->avctx->rtp_callback){
3073  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width + mb_x - s->resync_mb_x;
3074  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, current_packet_size, number_mb);
3075  }
3077 #endif
3078  update_mb_info(s, 1);
3079 
3080  switch(s->codec_id){
3081  case AV_CODEC_ID_MPEG4:
3082  if (CONFIG_MPEG4_ENCODER) {
3085  }
3086  break;
3092  }
3093  break;
3094  case AV_CODEC_ID_H263:
3095  case AV_CODEC_ID_H263P:
3096  if (CONFIG_H263_ENCODER)
3097  ff_h263_encode_gob_header(s, mb_y);
3098  break;
3099  }
3100 
3101  if (s->avctx->flags & AV_CODEC_FLAG_PASS1) {
3102  int bits= put_bits_count(&s->pb);
3103  s->misc_bits+= bits - s->last_bits;
3104  s->last_bits= bits;
3105  }
3106 
3107  s->ptr_lastgob += current_packet_size;
3108  s->first_slice_line=1;
3109  s->resync_mb_x=mb_x;
3110  s->resync_mb_y=mb_y;
3111  }
3112  }
3113 
3114  if( (s->resync_mb_x == s->mb_x)
3115  && s->resync_mb_y+1 == s->mb_y){
3116  s->first_slice_line=0;
3117  }
3118 
3119  s->mb_skipped=0;
3120  s->dquant=0; //only for QP_RD
3121 
3122  update_mb_info(s, 0);
3123 
3124  if (mb_type & (mb_type-1) || (s->mpv_flags & FF_MPV_FLAG_QP_RD)) { // more than 1 MB type possible or FF_MPV_FLAG_QP_RD
3125  int next_block=0;
3126  int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3127 
3128  copy_context_before_encode(&backup_s, s, -1);
3129  backup_s.pb= s->pb;
3132  if(s->data_partitioning){
3133  backup_s.pb2= s->pb2;
3134  backup_s.tex_pb= s->tex_pb;
3135  }
3136 
3137  if(mb_type&CANDIDATE_MB_TYPE_INTER){
3138  s->mv_dir = MV_DIR_FORWARD;
3139  s->mv_type = MV_TYPE_16X16;
3140  s->mb_intra= 0;
3141  s->mv[0][0][0] = s->p_mv_table[xy][0];
3142  s->mv[0][0][1] = s->p_mv_table[xy][1];
3143  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3144  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3145  }
3146  if(mb_type&CANDIDATE_MB_TYPE_INTER_I){
3147  s->mv_dir = MV_DIR_FORWARD;
3148  s->mv_type = MV_TYPE_FIELD;
3149  s->mb_intra= 0;
3150  for(i=0; i<2; i++){
3151  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3152  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3153  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3154  }
3155  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3156  &dmin, &next_block, 0, 0);
3157  }
3158  if(mb_type&CANDIDATE_MB_TYPE_SKIPPED){
3159  s->mv_dir = MV_DIR_FORWARD;
3160  s->mv_type = MV_TYPE_16X16;
3161  s->mb_intra= 0;
3162  s->mv[0][0][0] = 0;
3163  s->mv[0][0][1] = 0;
3164  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3165  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3166  }
3167  if(mb_type&CANDIDATE_MB_TYPE_INTER4V){
3168  s->mv_dir = MV_DIR_FORWARD;
3169  s->mv_type = MV_TYPE_8X8;
3170  s->mb_intra= 0;
3171  for(i=0; i<4; i++){
3172  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3173  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3174  }
3175  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3176  &dmin, &next_block, 0, 0);
3177  }
3178  if(mb_type&CANDIDATE_MB_TYPE_FORWARD){
3179  s->mv_dir = MV_DIR_FORWARD;
3180  s->mv_type = MV_TYPE_16X16;
3181  s->mb_intra= 0;
3182  s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3183  s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3184  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3185  &dmin, &next_block, s->mv[0][0][0], s->mv[0][0][1]);
3186  }
3187  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD){
3188  s->mv_dir = MV_DIR_BACKWARD;
3189  s->mv_type = MV_TYPE_16X16;
3190  s->mb_intra= 0;
3191  s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3192  s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3193  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3194  &dmin, &next_block, s->mv[1][0][0], s->mv[1][0][1]);
3195  }
3196  if(mb_type&CANDIDATE_MB_TYPE_BIDIR){
3198  s->mv_type = MV_TYPE_16X16;
3199  s->mb_intra= 0;
3200  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3201  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3202  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3203  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3204  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3205  &dmin, &next_block, 0, 0);
3206  }
3207  if(mb_type&CANDIDATE_MB_TYPE_FORWARD_I){
3208  s->mv_dir = MV_DIR_FORWARD;
3209  s->mv_type = MV_TYPE_FIELD;
3210  s->mb_intra= 0;
3211  for(i=0; i<2; i++){
3212  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3213  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3214  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3215  }
3216  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3217  &dmin, &next_block, 0, 0);
3218  }
3219  if(mb_type&CANDIDATE_MB_TYPE_BACKWARD_I){
3220  s->mv_dir = MV_DIR_BACKWARD;
3221  s->mv_type = MV_TYPE_FIELD;
3222  s->mb_intra= 0;
3223  for(i=0; i<2; i++){
3224  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3225  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3226  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3227  }
3228  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3229  &dmin, &next_block, 0, 0);
3230  }
3231  if(mb_type&CANDIDATE_MB_TYPE_BIDIR_I){
3233  s->mv_type = MV_TYPE_FIELD;
3234  s->mb_intra= 0;
3235  for(dir=0; dir<2; dir++){
3236  for(i=0; i<2; i++){
3237  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3238  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3239  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3240  }
3241  }
3242  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3243  &dmin, &next_block, 0, 0);
3244  }
3245  if(mb_type&CANDIDATE_MB_TYPE_INTRA){
3246  s->mv_dir = 0;
3247  s->mv_type = MV_TYPE_16X16;
3248  s->mb_intra= 1;
3249  s->mv[0][0][0] = 0;
3250  s->mv[0][0][1] = 0;
3251  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3252  &dmin, &next_block, 0, 0);
3253  if(s->h263_pred || s->h263_aic){
3254  if(best_s.mb_intra)
3255  s->mbintra_table[mb_x + mb_y*s->mb_stride]=1;
3256  else
3257  ff_clean_intra_table_entries(s); //old mode?
3258  }
3259  }
3260 
3261  if ((s->mpv_flags & FF_MPV_FLAG_QP_RD) && dmin < INT_MAX) {
3262  if(best_s.mv_type==MV_TYPE_16X16){ //FIXME move 4mv after QPRD
3263  const int last_qp= backup_s.qscale;
3264  int qpi, qp, dc[6];
3265  int16_t ac[6][16];
3266  const int mvdir= (best_s.mv_dir&MV_DIR_BACKWARD) ? 1 : 0;
3267  static const int dquant_tab[4]={-1,1,-2,2};
3268  int storecoefs = s->mb_intra && s->dc_val[0];
3269 
3270  av_assert2(backup_s.dquant == 0);
3271 
3272  //FIXME intra
3273  s->mv_dir= best_s.mv_dir;
3274  s->mv_type = MV_TYPE_16X16;
3275  s->mb_intra= best_s.mb_intra;
3276  s->mv[0][0][0] = best_s.mv[0][0][0];
3277  s->mv[0][0][1] = best_s.mv[0][0][1];
3278  s->mv[1][0][0] = best_s.mv[1][0][0];
3279  s->mv[1][0][1] = best_s.mv[1][0][1];
3280 
3281  qpi = s->pict_type == AV_PICTURE_TYPE_B ? 2 : 0;
3282  for(; qpi<4; qpi++){
3283  int dquant= dquant_tab[qpi];
3284  qp= last_qp + dquant;
3285  if(qp < s->avctx->qmin || qp > s->avctx->qmax)
3286  continue;
3287  backup_s.dquant= dquant;
3288  if(storecoefs){
3289  for(i=0; i<6; i++){
3290  dc[i]= s->dc_val[0][ s->block_index[i] ];
3291  memcpy(ac[i], s->ac_val[0][s->block_index[i]], sizeof(int16_t)*16);
3292  }
3293  }
3294 
3295  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3296  &dmin, &next_block, s->mv[mvdir][0][0], s->mv[mvdir][0][1]);
3297  if(best_s.qscale != qp){
3298  if(storecoefs){
3299  for(i=0; i<6; i++){
3300  s->dc_val[0][ s->block_index[i] ]= dc[i];
3301  memcpy(s->ac_val[0][s->block_index[i]], ac[i], sizeof(int16_t)*16);
3302  }
3303  }
3304  }
3305  }
3306  }
3307  }
3309  int mx= s->b_direct_mv_table[xy][0];
3310  int my= s->b_direct_mv_table[xy][1];
3311 
3312  backup_s.dquant = 0;
3314  s->mb_intra= 0;
3315  ff_mpeg4_set_direct_mv(s, mx, my);
3316  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3317  &dmin, &next_block, mx, my);
3318  }
3320  backup_s.dquant = 0;
3322  s->mb_intra= 0;
3323  ff_mpeg4_set_direct_mv(s, 0, 0);
3324  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3325  &dmin, &next_block, 0, 0);
3326  }
3327  if (!best_s.mb_intra && s->mpv_flags & FF_MPV_FLAG_SKIP_RD) {
3328  int coded=0;
3329  for(i=0; i<6; i++)
3330  coded |= s->block_last_index[i];
3331  if(coded){
3332  int mx,my;
3333  memcpy(s->mv, best_s.mv, sizeof(s->mv));
3334  if(CONFIG_MPEG4_ENCODER && best_s.mv_dir & MV_DIRECT){
3335  mx=my=0; //FIXME find the one we actually used
3336  ff_mpeg4_set_direct_mv(s, mx, my);
3337  }else if(best_s.mv_dir&MV_DIR_BACKWARD){
3338  mx= s->mv[1][0][0];
3339  my= s->mv[1][0][1];
3340  }else{
3341  mx= s->mv[0][0][0];
3342  my= s->mv[0][0][1];
3343  }
3344 
3345  s->mv_dir= best_s.mv_dir;
3346  s->mv_type = best_s.mv_type;
3347  s->mb_intra= 0;
3348 /* s->mv[0][0][0] = best_s.mv[0][0][0];
3349  s->mv[0][0][1] = best_s.mv[0][0][1];
3350  s->mv[1][0][0] = best_s.mv[1][0][0];
3351  s->mv[1][0][1] = best_s.mv[1][0][1];*/
3352  backup_s.dquant= 0;
3353  s->skipdct=1;
3354  encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER /* wrong but unused */, pb, pb2, tex_pb,
3355  &dmin, &next_block, mx, my);
3356  s->skipdct=0;
3357  }
3358  }
3359 
3360  s->current_picture.qscale_table[xy] = best_s.qscale;
3361 
3362  copy_context_after_encode(s, &best_s, -1);
3363 
3364  pb_bits_count= put_bits_count(&s->pb);
3365  flush_put_bits(&s->pb);
3366  avpriv_copy_bits(&backup_s.pb, bit_buf[next_block^1], pb_bits_count);
3367  s->pb= backup_s.pb;
3368 
3369  if(s->data_partitioning){
3370  pb2_bits_count= put_bits_count(&s->pb2);
3371  flush_put_bits(&s->pb2);
3372  avpriv_copy_bits(&backup_s.pb2, bit_buf2[next_block^1], pb2_bits_count);
3373  s->pb2= backup_s.pb2;
3374 
3375  tex_pb_bits_count= put_bits_count(&s->tex_pb);
3376  flush_put_bits(&s->tex_pb);
3377  avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3378  s->tex_pb= backup_s.tex_pb;
3379  }
3380  s->last_bits= put_bits_count(&s->pb);
3381 
3382  if (CONFIG_H263_ENCODER &&
3385 
3386  if(next_block==0){ //FIXME 16 vs linesize16
3387  s->hdsp.put_pixels_tab[0][0](s->dest[0], s->sc.rd_scratchpad , s->linesize ,16);
3388  s->hdsp.put_pixels_tab[1][0](s->dest[1], s->sc.rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
3389  s->hdsp.put_pixels_tab[1][0](s->dest[2], s->sc.rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
3390  }
3391 
3394  } else {
3395  int motion_x = 0, motion_y = 0;
3397  // only one MB-Type possible
3398 
3399  switch(mb_type){
3401  s->mv_dir = 0;
3402  s->mb_intra= 1;
3403  motion_x= s->mv[0][0][0] = 0;
3404  motion_y= s->mv[0][0][1] = 0;
3405  break;
3407  s->mv_dir = MV_DIR_FORWARD;
3408  s->mb_intra= 0;
3409  motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0];
3410  motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1];
3411  break;
3413  s->mv_dir = MV_DIR_FORWARD;
3414  s->mv_type = MV_TYPE_FIELD;
3415  s->mb_intra= 0;
3416  for(i=0; i<2; i++){
3417  j= s->field_select[0][i] = s->p_field_select_table[i][xy];
3418  s->mv[0][i][0] = s->p_field_mv_table[i][j][xy][0];
3419  s->mv[0][i][1] = s->p_field_mv_table[i][j][xy][1];
3420  }
3421  break;
3423  s->mv_dir = MV_DIR_FORWARD;
3424  s->mv_type = MV_TYPE_8X8;
3425  s->mb_intra= 0;
3426  for(i=0; i<4; i++){
3427  s->mv[0][i][0] = s->current_picture.motion_val[0][s->block_index[i]][0];
3428  s->mv[0][i][1] = s->current_picture.motion_val[0][s->block_index[i]][1];
3429  }
3430  break;
3432  if (CONFIG_MPEG4_ENCODER) {
3434  s->mb_intra= 0;
3435  motion_x=s->b_direct_mv_table[xy][0];
3436  motion_y=s->b_direct_mv_table[xy][1];
3437  ff_mpeg4_set_direct_mv(s, motion_x, motion_y);
3438  }
3439  break;
3441  if (CONFIG_MPEG4_ENCODER) {
3443  s->mb_intra= 0;
3444  ff_mpeg4_set_direct_mv(s, 0, 0);
3445  }
3446  break;
3449  s->mb_intra= 0;
3450  s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0];
3451  s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1];
3452  s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0];
3453  s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1];
3454  break;
3456  s->mv_dir = MV_DIR_BACKWARD;
3457  s->mb_intra= 0;
3458  motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0];
3459  motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1];
3460  break;
3462  s->mv_dir = MV_DIR_FORWARD;
3463  s->mb_intra= 0;
3464  motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0];
3465  motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1];
3466  break;
3468  s->mv_dir = MV_DIR_FORWARD;
3469  s->mv_type = MV_TYPE_FIELD;
3470  s->mb_intra= 0;
3471  for(i=0; i<2; i++){
3472  j= s->field_select[0][i] = s->b_field_select_table[0][i][xy];
3473  s->mv[0][i][0] = s->b_field_mv_table[0][i][j][xy][0];
3474  s->mv[0][i][1] = s->b_field_mv_table[0][i][j][xy][1];
3475  }
3476  break;
3478  s->mv_dir = MV_DIR_BACKWARD;
3479  s->mv_type = MV_TYPE_FIELD;
3480  s->mb_intra= 0;
3481  for(i=0; i<2; i++){
3482  j= s->field_select[1][i] = s->b_field_select_table[1][i][xy];
3483  s->mv[1][i][0] = s->b_field_mv_table[1][i][j][xy][0];
3484  s->mv[1][i][1] = s->b_field_mv_table[1][i][j][xy][1];
3485  }
3486  break;
3489  s->mv_type = MV_TYPE_FIELD;
3490  s->mb_intra= 0;
3491  for(dir=0; dir<2; dir++){
3492  for(i=0; i<2; i++){
3493  j= s->field_select[dir][i] = s->b_field_select_table[dir][i][xy];
3494  s->mv[dir][i][0] = s->b_field_mv_table[dir][i][j][xy][0];
3495  s->mv[dir][i][1] = s->b_field_mv_table[dir][i][j][xy][1];
3496  }
3497  }
3498  break;
3499  default:
3500  av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n");
3501  }
3502 
3503  encode_mb(s, motion_x, motion_y);
3504 
3505  // RAL: Update last macroblock type
3506  s->last_mv_dir = s->mv_dir;
3507 
3508  if (CONFIG_H263_ENCODER &&
3511 
3513  }
3514 
3515  /* clean the MV table in IPS frames for direct mode in B-frames */
3516  if(s->mb_intra /* && I,P,S_TYPE */){
3517  s->p_mv_table[xy][0]=0;
3518  s->p_mv_table[xy][1]=0;
3519  }
3520 
3521  if (s->avctx->flags & AV_CODEC_FLAG_PSNR) {
3522  int w= 16;
3523  int h= 16;
3524 
3525  if(s->mb_x*16 + 16 > s->width ) w= s->width - s->mb_x*16;
3526  if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
3527 
3529  s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
3530  s->dest[0], w, h, s->linesize);
3532  s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3533  s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3535  s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
3536  s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
3537  }
3538  if(s->loop_filter){
3541  }
3542  ff_dlog(s->avctx, "MB %d %d bits\n",
3543  s->mb_x + s->mb_y * s->mb_stride, put_bits_count(&s->pb));
3544  }
3545  }
3546 
3547  //not beautiful here but we must write it before flushing so it has to be here
3550 
3551  write_slice_end(s);
3552 
3553 #if FF_API_RTP_CALLBACK
3555  /* Send the last GOB if RTP */
3556  if (s->avctx->rtp_callback) {
3557  int number_mb = (mb_y - s->resync_mb_y)*s->mb_width - s->resync_mb_x;
3558  int pdif = put_bits_ptr(&s->pb) - s->ptr_lastgob;
3559  /* Call the RTP callback to send the last GOB */
3560  emms_c();
3561  s->avctx->rtp_callback(s->avctx, s->ptr_lastgob, pdif, number_mb);
3562  }
3564 #endif
3565 
3566  return 0;
3567 }
3568 
3569 #define MERGE(field) dst->field += src->field; src->field=0
3571  MERGE(me.scene_change_score);
3572  MERGE(me.mc_mb_var_sum_temp);
3573  MERGE(me.mb_var_sum_temp);
3574 }
3575 
3577  int i;
3578 
3579  MERGE(dct_count[0]); //note, the other dct vars are not part of the context
3580  MERGE(dct_count[1]);
3581  MERGE(mv_bits);
3582  MERGE(i_tex_bits);
3583  MERGE(p_tex_bits);
3584  MERGE(i_count);
3585  MERGE(f_count);
3586  MERGE(b_count);
3587  MERGE(skip_count);
3588  MERGE(misc_bits);
3589  MERGE(er.error_count);
3594 
3595  if (dst->noise_reduction){
3596  for(i=0; i<64; i++){
3597  MERGE(dct_error_sum[0][i]);
3598  MERGE(dct_error_sum[1][i]);
3599  }
3600  }
3601 
3602  av_assert1(put_bits_count(&src->pb) % 8 ==0);
3603  av_assert1(put_bits_count(&dst->pb) % 8 ==0);
3604  avpriv_copy_bits(&dst->pb, src->pb.buf, put_bits_count(&src->pb));
3605  flush_put_bits(&dst->pb);
3606 }
3607 
3608 static int estimate_qp(MpegEncContext *s, int dry_run){
3609  if (s->next_lambda){
3612  if(!dry_run) s->next_lambda= 0;
3613  } else if (!s->fixed_qscale) {
3614  int quality = ff_rate_estimate_qscale(s, dry_run);
3616  s->current_picture.f->quality = quality;
3617  if (s->current_picture.f->quality < 0)
3618  return -1;
3619  }
3620 
3621  if(s->adaptive_quant){
3622  switch(s->codec_id){
3623  case AV_CODEC_ID_MPEG4:
3626  break;
3627  case AV_CODEC_ID_H263:
3628  case AV_CODEC_ID_H263P:
3629  case AV_CODEC_ID_FLV1:
3630  if (CONFIG_H263_ENCODER)
3632  break;
3633  default:
3634  ff_init_qscale_tab(s);
3635  }
3636 
3637  s->lambda= s->lambda_table[0];
3638  //FIXME broken
3639  }else
3640  s->lambda = s->current_picture.f->quality;
3641  update_qscale(s);
3642  return 0;
3643 }
3644 
3645 /* must be called before writing the header */
3648  s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
3649 
3650  if(s->pict_type==AV_PICTURE_TYPE_B){
3651  s->pb_time= s->pp_time - (s->last_non_b_time - s->time);
3652  av_assert1(s->pb_time > 0 && s->pb_time < s->pp_time);
3653  }else{
3654  s->pp_time= s->time - s->last_non_b_time;
3655  s->last_non_b_time= s->time;
3656  av_assert1(s->picture_number==0 || s->pp_time > 0);
3657  }
3658 }
3659 
3661 {
3662  int i, ret;
3663  int bits;
3664  int context_count = s->slice_context_count;
3665 
3667 
3668  /* Reset the average MB variance */
3669  s->me.mb_var_sum_temp =
3670  s->me.mc_mb_var_sum_temp = 0;
3671 
3672  /* we need to initialize some time vars before we can encode B-frames */
3673  // RAL: Condition added for MPEG1VIDEO
3677  ff_set_mpeg4_time(s);
3678 
3679  s->me.scene_change_score=0;
3680 
3681 // s->lambda= s->current_picture_ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
3682 
3683  if(s->pict_type==AV_PICTURE_TYPE_I){
3684  if(s->msmpeg4_version >= 3) s->no_rounding=1;
3685  else s->no_rounding=0;
3686  }else if(s->pict_type!=AV_PICTURE_TYPE_B){
3688  s->no_rounding ^= 1;
3689  }
3690 
3691  if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
3692  if (estimate_qp(s,1) < 0)
3693  return -1;
3694  ff_get_2pass_fcode(s);
3695  } else if (!(s->avctx->flags & AV_CODEC_FLAG_QSCALE)) {
3697  s->lambda= s->last_lambda_for[s->pict_type];
3698  else
3700  update_qscale(s);
3701  }
3702 
3708  }
3709 
3710  s->mb_intra=0; //for the rate distortion & bit compare functions
3711  for(i=1; i<context_count; i++){
3713  if (ret < 0)
3714  return ret;
3715  }
3716 
3717  if(ff_init_me(s)<0)
3718  return -1;
3719 
3720  /* Estimate motion for every MB */
3721  if(s->pict_type != AV_PICTURE_TYPE_I){
3722  s->lambda = (s->lambda * s->me_penalty_compensation + 128) >> 8;
3723  s->lambda2 = (s->lambda2 * (int64_t) s->me_penalty_compensation + 128) >> 8;
3724  if (s->pict_type != AV_PICTURE_TYPE_B) {
3725  if ((s->me_pre && s->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
3726  s->me_pre == 2) {
3727  s->avctx->execute(s->avctx, pre_estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3728  }
3729  }
3730 
3731  s->avctx->execute(s->avctx, estimate_motion_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3732  }else /* if(s->pict_type == AV_PICTURE_TYPE_I) */{
3733  /* I-Frame */
3734  for(i=0; i<s->mb_stride*s->mb_height; i++)
3736 
3737  if(!s->fixed_qscale){
3738  /* finding spatial complexity for I-frame rate control */
3739  s->avctx->execute(s->avctx, mb_var_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3740  }
3741  }
3742  for(i=1; i<context_count; i++){
3744  }
3746  s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp;
3747  emms_c();
3748 
3750  s->pict_type == AV_PICTURE_TYPE_P) {
3752  for(i=0; i<s->mb_stride*s->mb_height; i++)
3754  if(s->msmpeg4_version >= 3)
3755  s->no_rounding=1;
3756  ff_dlog(s, "Scene change detected, encoding as I Frame %"PRId64" %"PRId64"\n",
3758  }
3759 
3760  if(!s->umvplus){
3763 
3765  int a,b;
3766  a= ff_get_best_fcode(s, s->p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
3768  s->f_code= FFMAX3(s->f_code, a, b);
3769  }
3770 
3774  int j;
3775  for(i=0; i<2; i++){
3776  for(j=0; j<2; j++)
3779  }
3780  }
3781  }
3782 
3783  if(s->pict_type==AV_PICTURE_TYPE_B){
3784  int a, b;
3785 
3788  s->f_code = FFMAX(a, b);
3789 
3792  s->b_code = FFMAX(a, b);
3793 
3799  int dir, j;
3800  for(dir=0; dir<2; dir++){
3801  for(i=0; i<2; i++){
3802  for(j=0; j<2; j++){
3805  ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
3806  s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
3807  }
3808  }
3809  }
3810  }
3811  }
3812  }
3813 
3814  if (estimate_qp(s, 0) < 0)
3815  return -1;
3816 
3817  if (s->qscale < 3 && s->max_qcoeff <= 128 &&
3818  s->pict_type == AV_PICTURE_TYPE_I &&
3819  !(s->avctx->flags & AV_CODEC_FLAG_QSCALE))
3820  s->qscale= 3; //reduce clipping problems
3821 
3822  if (s->out_format == FMT_MJPEG) {
3823  const uint16_t * luma_matrix = ff_mpeg1_default_intra_matrix;
3824  const uint16_t *chroma_matrix = ff_mpeg1_default_intra_matrix;
3825 
3826  if (s->avctx->intra_matrix) {
3827  chroma_matrix =
3828  luma_matrix = s->avctx->intra_matrix;
3829  }
3830  if (s->avctx->chroma_intra_matrix)
3831  chroma_matrix = s->avctx->chroma_intra_matrix;
3832 
3833  /* for mjpeg, we do include qscale in the matrix */
3834  for(i=1;i<64;i++){
3835  int j = s->idsp.idct_permutation[i];
3836 
3837  s->chroma_intra_matrix[j] = av_clip_uint8((chroma_matrix[i] * s->qscale) >> 3);
3838  s-> intra_matrix[j] = av_clip_uint8(( luma_matrix[i] * s->qscale) >> 3);
3839  }
3840  s->y_dc_scale_table=
3842  s->chroma_intra_matrix[0] =
3845  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3847  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3848  s->qscale= 8;
3849  }
3850  if(s->codec_id == AV_CODEC_ID_AMV){
3851  static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3852  static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3853  for(i=1;i<64;i++){
3855 
3856  s->intra_matrix[j] = sp5x_quant_table[5*2+0][i];
3857  s->chroma_intra_matrix[j] = sp5x_quant_table[5*2+1][i];
3858  }
3859  s->y_dc_scale_table= y;
3860  s->c_dc_scale_table= c;
3861  s->intra_matrix[0] = 13;
3862  s->chroma_intra_matrix[0] = 14;
3864  s->intra_matrix, s->intra_quant_bias, 8, 8, 1);
3866  s->chroma_intra_matrix, s->intra_quant_bias, 8, 8, 1);
3867  s->qscale= 8;
3868  }
3869 
3870  //FIXME var duplication
3872  s->current_picture.f->key_frame = s->pict_type == AV_PICTURE_TYPE_I; //FIXME pic_ptr
3875 
3876  if (s->current_picture.f->key_frame)
3877  s->picture_in_gop_number=0;
3878 
3879  s->mb_x = s->mb_y = 0;
3880  s->last_bits= put_bits_count(&s->pb);
3881  switch(s->out_format) {
3882  case FMT_MJPEG:
3886  break;
3887  case FMT_H261:
3888  if (CONFIG_H261_ENCODER)
3889  ff_h261_encode_picture_header(s, picture_number);
3890  break;
3891  case FMT_H263:
3893  ff_wmv2_encode_picture_header(s, picture_number);
3894  else if (CONFIG_MSMPEG4_ENCODER && s->msmpeg4_version)
3895  ff_msmpeg4_encode_picture_header(s, picture_number);
3896  else if (CONFIG_MPEG4_ENCODER && s->h263_pred) {
3897  ret = ff_mpeg4_encode_picture_header(s, picture_number);
3898  if (ret < 0)
3899  return ret;
3900  } else if (CONFIG_RV10_ENCODER && s->codec_id == AV_CODEC_ID_RV10) {
3901  ret = ff_rv10_encode_picture_header(s, picture_number);
3902  if (ret < 0)
3903  return ret;
3904  }
3905  else if (CONFIG_RV20_ENCODER && s->codec_id == AV_CODEC_ID_RV20)
3906  ff_rv20_encode_picture_header(s, picture_number);
3907  else if (CONFIG_FLV_ENCODER && s->codec_id == AV_CODEC_ID_FLV1)
3908  ff_flv_encode_picture_header(s, picture_number);
3909  else if (CONFIG_H263_ENCODER)
3910  ff_h263_encode_picture_header(s, picture_number);
3911  break;
3912  case FMT_MPEG1:
3914  ff_mpeg1_encode_picture_header(s, picture_number);
3915  break;
3916  default:
3917  av_assert0(0);
3918  }
3919  bits= put_bits_count(&s->pb);
3920  s->header_bits= bits - s->last_bits;
3921 
3922  for(i=1; i<context_count; i++){
3924  }
3925  s->avctx->execute(s->avctx, encode_thread, &s->thread_context[0], NULL, context_count, sizeof(void*));
3926  for(i=1; i<context_count; i++){
3927  if (s->pb.buf_end == s->thread_context[i]->pb.buf)
3928  set_put_bits_buffer_size(&s->pb, FFMIN(s->thread_context[i]->pb.buf_end - s->pb.buf, INT_MAX/8-32));
3930  }
3931  emms_c();
3932  return 0;
3933 }
3934 
3935 static void denoise_dct_c(MpegEncContext *s, int16_t *block){
3936  const int intra= s->mb_intra;
3937  int i;
3938 
3939  s->dct_count[intra]++;
3940 
3941  for(i=0; i<64; i++){
3942  int level= block[i];
3943 
3944  if(level){
3945  if(level>0){
3946  s->dct_error_sum[intra][i] += level;
3947  level -= s->dct_offset[intra][i];
3948  if(level<0) level=0;
3949  }else{
3950  s->dct_error_sum[intra][i] -= level;
3951  level += s->dct_offset[intra][i];
3952  if(level>0) level=0;
3953  }
3954  block[i]= level;
3955  }
3956  }
3957 }
3958 
3960  int16_t *block, int n,
3961  int qscale, int *overflow){
3962  const int *qmat;
3963  const uint16_t *matrix;
3964  const uint8_t *scantable;
3965  const uint8_t *perm_scantable;
3966  int max=0;
3967  unsigned int threshold1, threshold2;
3968  int bias=0;
3969  int run_tab[65];
3970  int level_tab[65];
3971  int score_tab[65];
3972  int survivor[65];
3973  int survivor_count;
3974  int last_run=0;
3975  int last_level=0;
3976  int last_score= 0;
3977  int last_i;
3978  int coeff[2][64];
3979  int coeff_count[64];
3980  int qmul, qadd, start_i, last_non_zero, i, dc;
3981  const int esc_length= s->ac_esc_length;
3982  uint8_t * length;
3983  uint8_t * last_length;
3984  const int lambda= s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
3985  int mpeg2_qscale;
3986 
3987  s->fdsp.fdct(block);
3988 
3989  if(s->dct_error_sum)
3990  s->denoise_dct(s, block);
3991  qmul= qscale*16;
3992  qadd= ((qscale-1)|1)*8;
3993 
3994  if (s->q_scale_type) mpeg2_qscale = ff_mpeg2_non_linear_qscale[qscale];
3995  else mpeg2_qscale = qscale << 1;
3996 
3997  if (s->mb_intra) {
3998  int q;
3999  scantable= s->intra_scantable.scantable;
4000  perm_scantable= s->intra_scantable.permutated;
4001  if (!s->h263_aic) {
4002  if (n < 4)
4003  q = s->y_dc_scale;
4004  else
4005  q = s->c_dc_scale;
4006  q = q << 3;
4007  } else{
4008  /* For AIC we skip quant/dequant of INTRADC */
4009  q = 1 << 3;
4010  qadd=0;
4011  }
4012 
4013  /* note: block[0] is assumed to be positive */
4014  block[0] = (block[0] + (q >> 1)) / q;
4015  start_i = 1;
4016  last_non_zero = 0;
4017  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4018  matrix = n < 4 ? s->intra_matrix : s->chroma_intra_matrix;
4019  if(s->mpeg_quant || s->out_format == FMT_MPEG1 || s->out_format == FMT_MJPEG)
4020  bias= 1<<(QMAT_SHIFT-1);
4021 
4022  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4023  length = s->intra_chroma_ac_vlc_length;
4024  last_length= s->intra_chroma_ac_vlc_last_length;
4025  } else {
4026  length = s->intra_ac_vlc_length;
4027  last_length= s->intra_ac_vlc_last_length;
4028  }
4029  } else {
4030  scantable= s->inter_scantable.scantable;
4031  perm_scantable= s->inter_scantable.permutated;
4032  start_i = 0;
4033  last_non_zero = -1;
4034  qmat = s->q_inter_matrix[qscale];
4035  matrix = s->inter_matrix;
4036  length = s->inter_ac_vlc_length;
4037  last_length= s->inter_ac_vlc_last_length;
4038  }
4039  last_i= start_i;
4040 
4041  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4042  threshold2= (threshold1<<1);
4043 
4044  for(i=63; i>=start_i; i--) {
4045  const int j = scantable[i];
4046  int level = block[j] * qmat[j];
4047 
4048  if(((unsigned)(level+threshold1))>threshold2){
4049  last_non_zero = i;
4050  break;
4051  }
4052  }
4053 
4054  for(i=start_i; i<=last_non_zero; i++) {
4055  const int j = scantable[i];
4056  int level = block[j] * qmat[j];
4057 
4058 // if( bias+level >= (1<<(QMAT_SHIFT - 3))
4059 // || bias-level >= (1<<(QMAT_SHIFT - 3))){
4060  if(((unsigned)(level+threshold1))>threshold2){
4061  if(level>0){
4062  level= (bias + level)>>QMAT_SHIFT;
4063  coeff[0][i]= level;
4064  coeff[1][i]= level-1;
4065 // coeff[2][k]= level-2;
4066  }else{
4067  level= (bias - level)>>QMAT_SHIFT;
4068  coeff[0][i]= -level;
4069  coeff[1][i]= -level+1;
4070 // coeff[2][k]= -level+2;
4071  }
4072  coeff_count[i]= FFMIN(level, 2);
4073  av_assert2(coeff_count[i]);
4074  max |=level;
4075  }else{
4076  coeff[0][i]= (level>>31)|1;
4077  coeff_count[i]= 1;
4078  }
4079  }
4080 
4081  *overflow= s->max_qcoeff < max; //overflow might have happened
4082 
4083  if(last_non_zero < start_i){
4084  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4085  return last_non_zero;
4086  }
4087 
4088  score_tab[start_i]= 0;
4089  survivor[0]= start_i;
4090  survivor_count= 1;
4091 
4092  for(i=start_i; i<=last_non_zero; i++){
4093  int level_index, j, zero_distortion;
4094  int dct_coeff= FFABS(block[ scantable[i] ]);
4095  int best_score=256*256*256*120;
4096 
4097  if (s->fdsp.fdct == ff_fdct_ifast)
4098  dct_coeff= (dct_coeff*ff_inv_aanscales[ scantable[i] ]) >> 12;
4099  zero_distortion= dct_coeff*dct_coeff;
4100 
4101  for(level_index=0; level_index < coeff_count[i]; level_index++){
4102  int distortion;
4103  int level= coeff[level_index][i];
4104  const int alevel= FFABS(level);
4105  int unquant_coeff;
4106 
4107  av_assert2(level);
4108 
4109  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4110  unquant_coeff= alevel*qmul + qadd;
4111  } else if(s->out_format == FMT_MJPEG) {
4112  j = s->idsp.idct_permutation[scantable[i]];
4113  unquant_coeff = alevel * matrix[j] * 8;
4114  }else{ // MPEG-1
4115  j = s->idsp.idct_permutation[scantable[i]]; // FIXME: optimize
4116  if(s->mb_intra){
4117  unquant_coeff = (int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4118  unquant_coeff = (unquant_coeff - 1) | 1;
4119  }else{
4120  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4121  unquant_coeff = (unquant_coeff - 1) | 1;
4122  }
4123  unquant_coeff<<= 3;
4124  }
4125 
4126  distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4127  level+=64;
4128  if((level&(~127)) == 0){
4129  for(j=survivor_count-1; j>=0; j--){
4130  int run= i - survivor[j];
4131  int score= distortion + length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4132  score += score_tab[i-run];
4133 
4134  if(score < best_score){
4135  best_score= score;
4136  run_tab[i+1]= run;
4137  level_tab[i+1]= level-64;
4138  }
4139  }
4140 
4141  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4142  for(j=survivor_count-1; j>=0; j--){
4143  int run= i - survivor[j];
4144  int score= distortion + last_length[UNI_AC_ENC_INDEX(run, level)]*lambda;
4145  score += score_tab[i-run];
4146  if(score < last_score){
4147  last_score= score;
4148  last_run= run;
4149  last_level= level-64;
4150  last_i= i+1;
4151  }
4152  }
4153  }
4154  }else{
4155  distortion += esc_length*lambda;
4156  for(j=survivor_count-1; j>=0; j--){
4157  int run= i - survivor[j];
4158  int score= distortion + score_tab[i-run];
4159 
4160  if(score < best_score){
4161  best_score= score;
4162  run_tab[i+1]= run;
4163  level_tab[i+1]= level-64;
4164  }
4165  }
4166 
4167  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4168  for(j=survivor_count-1; j>=0; j--){
4169  int run= i - survivor[j];
4170  int score= distortion + score_tab[i-run];
4171  if(score < last_score){
4172  last_score= score;
4173  last_run= run;
4174  last_level= level-64;
4175  last_i= i+1;
4176  }
4177  }
4178  }
4179  }
4180  }
4181 
4182  score_tab[i+1]= best_score;
4183 
4184  // Note: there is a vlc code in MPEG-4 which is 1 bit shorter then another one with a shorter run and the same level
4185  if(last_non_zero <= 27){
4186  for(; survivor_count; survivor_count--){
4187  if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4188  break;
4189  }
4190  }else{
4191  for(; survivor_count; survivor_count--){
4192  if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4193  break;
4194  }
4195  }
4196 
4197  survivor[ survivor_count++ ]= i+1;
4198  }
4199 
4200  if(s->out_format != FMT_H263 && s->out_format != FMT_H261){
4201  last_score= 256*256*256*120;
4202  for(i= survivor[0]; i<=last_non_zero + 1; i++){
4203  int score= score_tab[i];
4204  if (i)
4205  score += lambda * 2; // FIXME more exact?
4206 
4207  if(score < last_score){
4208  last_score= score;
4209  last_i= i;
4210  last_level= level_tab[i];
4211  last_run= run_tab[i];
4212  }
4213  }
4214  }
4215 
4216  s->coded_score[n] = last_score;
4217 
4218  dc= FFABS(block[0]);
4219  last_non_zero= last_i - 1;
4220  memset(block + start_i, 0, (64-start_i)*sizeof(int16_t));
4221 
4222  if(last_non_zero < start_i)
4223  return last_non_zero;
4224 
4225  if(last_non_zero == 0 && start_i == 0){
4226  int best_level= 0;
4227  int best_score= dc * dc;
4228 
4229  for(i=0; i<coeff_count[0]; i++){
4230  int level= coeff[i][0];
4231  int alevel= FFABS(level);
4232  int unquant_coeff, score, distortion;
4233 
4234  if(s->out_format == FMT_H263 || s->out_format == FMT_H261){
4235  unquant_coeff= (alevel*qmul + qadd)>>3;
4236  } else{ // MPEG-1
4237  unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4238  unquant_coeff = (unquant_coeff - 1) | 1;
4239  }
4240  unquant_coeff = (unquant_coeff + 4) >> 3;
4241  unquant_coeff<<= 3 + 3;
4242 
4243  distortion= (unquant_coeff - dc) * (unquant_coeff - dc);
4244  level+=64;
4245  if((level&(~127)) == 0) score= distortion + last_length[UNI_AC_ENC_INDEX(0, level)]*lambda;
4246  else score= distortion + esc_length*lambda;
4247 
4248  if(score < best_score){
4249  best_score= score;
4250  best_level= level - 64;
4251  }
4252  }
4253  block[0]= best_level;
4254  s->coded_score[n] = best_score - dc*dc;
4255  if(best_level == 0) return -1;
4256  else return last_non_zero;
4257  }
4258 
4259  i= last_i;
4260  av_assert2(last_level);
4261 
4262  block[ perm_scantable[last_non_zero] ]= last_level;
4263  i -= last_run + 1;
4264 
4265  for(; i>start_i; i -= run_tab[i] + 1){
4266  block[ perm_scantable[i-1] ]= level_tab[i];
4267  }
4268 
4269  return last_non_zero;
4270 }
4271 
4272 static int16_t basis[64][64];
4273 
4274 static void build_basis(uint8_t *perm){
4275  int i, j, x, y;
4276  emms_c();
4277  for(i=0; i<8; i++){
4278  for(j=0; j<8; j++){
4279  for(y=0; y<8; y++){
4280  for(x=0; x<8; x++){
4281  double s= 0.25*(1<<BASIS_SHIFT);
4282  int index= 8*i + j;
4283  int perm_index= perm[index];
4284  if(i==0) s*= sqrt(0.5);
4285  if(j==0) s*= sqrt(0.5);
4286  basis[perm_index][8*x + y]= lrintf(s * cos((M_PI/8.0)*i*(x+0.5)) * cos((M_PI/8.0)*j*(y+0.5)));
4287  }
4288  }
4289  }
4290  }
4291 }
4292 
4293 static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
4294  int16_t *block, int16_t *weight, int16_t *orig,
4295  int n, int qscale){
4296  int16_t rem[64];
4297  LOCAL_ALIGNED_16(int16_t, d1, [64]);
4298  const uint8_t *scantable;
4299  const uint8_t *perm_scantable;
4300 // unsigned int threshold1, threshold2;
4301 // int bias=0;
4302  int run_tab[65];
4303  int prev_run=0;
4304  int prev_level=0;
4305  int qmul, qadd, start_i, last_non_zero, i, dc;
4306  uint8_t * length;
4307  uint8_t * last_length;
4308  int lambda;
4309  int rle_index, run, q = 1, sum; //q is only used when s->mb_intra is true
4310 
4311  if(basis[0][0] == 0)
4313 
4314  qmul= qscale*2;
4315  qadd= (qscale-1)|1;
4316  if (s->mb_intra) {
4317  scantable= s->intra_scantable.scantable;
4318  perm_scantable= s->intra_scantable.permutated;
4319  if (!s->h263_aic) {
4320  if (n < 4)
4321  q = s->y_dc_scale;
4322  else
4323  q = s->c_dc_scale;
4324  } else{
4325  /* For AIC we skip quant/dequant of INTRADC */
4326  q = 1;
4327  qadd=0;
4328  }
4329  q <<= RECON_SHIFT-3;
4330  /* note: block[0] is assumed to be positive */
4331  dc= block[0]*q;
4332 // block[0] = (block[0] + (q >> 1)) / q;
4333  start_i = 1;
4334 // if(s->mpeg_quant || s->out_format == FMT_MPEG1)
4335 // bias= 1<<(QMAT_SHIFT-1);
4336  if (n > 3 && s->intra_chroma_ac_vlc_length) {
4337  length = s->intra_chroma_ac_vlc_length;
4338  last_length= s->intra_chroma_ac_vlc_last_length;
4339  } else {
4340  length = s->intra_ac_vlc_length;
4341  last_length= s->intra_ac_vlc_last_length;
4342  }
4343  } else {
4344  scantable= s->inter_scantable.scantable;
4345  perm_scantable= s->inter_scantable.permutated;
4346  dc= 0;
4347  start_i = 0;
4348  length = s->inter_ac_vlc_length;
4349  last_length= s->inter_ac_vlc_last_length;
4350  }
4351  last_non_zero = s->block_last_index[n];
4352 
4353  dc += (1<<(RECON_SHIFT-1));
4354  for(i=0; i<64; i++){
4355  rem[i] = dc - (orig[i] << RECON_SHIFT); // FIXME use orig directly instead of copying to rem[]
4356  }
4357 
4358  sum=0;
4359  for(i=0; i<64; i++){
4360  int one= 36;
4361  int qns=4;
4362  int w;
4363 
4364  w= FFABS(weight[i]) + qns*one;
4365  w= 15 + (48*qns*one + w/2)/w; // 16 .. 63
4366 
4367  weight[i] = w;
4368 // w=weight[i] = (63*qns + (w/2)) / w;
4369 
4370  av_assert2(w>0);
4371  av_assert2(w<(1<<6));
4372  sum += w*w;
4373  }
4374  lambda= sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
4375 
4376  run=0;
4377  rle_index=0;
4378  for(i=start_i; i<=last_non_zero; i++){
4379  int j= perm_scantable[i];
4380  const int level= block[j];
4381  int coeff;
4382 
4383  if(level){
4384  if(level<0) coeff= qmul*level - qadd;
4385  else coeff= qmul*level + qadd;
4386  run_tab[rle_index++]=run;
4387  run=0;
4388 
4389  s->mpvencdsp.add_8x8basis(rem, basis[j], coeff);
4390  }else{
4391  run++;
4392  }
4393  }
4394 
4395  for(;;){
4396  int best_score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0], 0);
4397  int best_coeff=0;
4398  int best_change=0;
4399  int run2, best_unquant_change=0, analyze_gradient;
4400  analyze_gradient = last_non_zero > 2 || s->quantizer_noise_shaping >= 3;
4401 
4402  if(analyze_gradient){
4403  for(i=0; i<64; i++){
4404  int w= weight[i];
4405 
4406  d1[i] = (rem[i]*w*w + (1<<(RECON_SHIFT+12-1)))>>(RECON_SHIFT+12);
4407  }
4408  s->fdsp.fdct(d1);
4409  }
4410 
4411  if(start_i){
4412  const int level= block[0];
4413  int change, old_coeff;
4414 
4415  av_assert2(s->mb_intra);
4416 
4417  old_coeff= q*level;
4418 
4419  for(change=-1; change<=1; change+=2){
4420  int new_level= level + change;
4421  int score, new_coeff;
4422 
4423  new_coeff= q*new_level;
4424  if(new_coeff >= 2048 || new_coeff < 0)
4425  continue;
4426 
4427  score = s->mpvencdsp.try_8x8basis(rem, weight, basis[0],
4428  new_coeff - old_coeff);
4429  if(score<best_score){
4430  best_score= score;
4431  best_coeff= 0;
4432  best_change= change;
4433  best_unquant_change= new_coeff - old_coeff;
4434  }
4435  }
4436  }
4437 
4438  run=0;
4439  rle_index=0;
4440  run2= run_tab[rle_index++];
4441  prev_level=0;
4442  prev_run=0;
4443 
4444  for(i=start_i; i<64; i++){
4445  int j= perm_scantable[i];
4446  const int level= block[j];
4447  int change, old_coeff;
4448 
4449  if(s->quantizer_noise_shaping < 3 && i > last_non_zero + 1)
4450  break;
4451 
4452  if(level){
4453  if(level<0) old_coeff= qmul*level - qadd;
4454  else old_coeff= qmul*level + qadd;
4455  run2= run_tab[rle_index++]; //FIXME ! maybe after last
4456  }else{
4457  old_coeff=0;
4458  run2--;
4459  av_assert2(run2>=0 || i >= last_non_zero );
4460  }
4461 
4462  for(change=-1; change<=1; change+=2){
4463  int new_level= level + change;
4464  int score, new_coeff, unquant_change;
4465 
4466  score=0;
4467  if(s->quantizer_noise_shaping < 2 && FFABS(new_level) > FFABS(level))
4468  continue;
4469 
4470  if(new_level){
4471  if(new_level<0) new_coeff= qmul*new_level - qadd;
4472  else new_coeff= qmul*new_level + qadd;
4473  if(new_coeff >= 2048 || new_coeff <= -2048)
4474  continue;
4475  //FIXME check for overflow
4476 
4477  if(level){
4478  if(level < 63 && level > -63){
4479  if(i < last_non_zero)
4480  score += length[UNI_AC_ENC_INDEX(run, new_level+64)]
4481  - length[UNI_AC_ENC_INDEX(run, level+64)];
4482  else
4483  score += last_length[UNI_AC_ENC_INDEX(run, new_level+64)]
4484  - last_length[UNI_AC_ENC_INDEX(run, level+64)];
4485  }
4486  }else{
4487  av_assert2(FFABS(new_level)==1);
4488 
4489  if(analyze_gradient){
4490  int g= d1[ scantable[i] ];
4491  if(g && (g^new_level) >= 0)
4492  continue;
4493  }
4494 
4495  if(i < last_non_zero){
4496  int next_i= i + run2 + 1;
4497  int next_level= block[ perm_scantable[next_i] ] + 64;
4498 
4499  if(next_level&(~127))
4500  next_level= 0;
4501 
4502  if(next_i < last_non_zero)
4503  score += length[UNI_AC_ENC_INDEX(run, 65)]
4504  + length[UNI_AC_ENC_INDEX(run2, next_level)]
4505  - length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4506  else
4507  score += length[UNI_AC_ENC_INDEX(run, 65)]
4508  + last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4509  - last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)];
4510  }else{
4511  score += last_length[UNI_AC_ENC_INDEX(run, 65)];
4512  if(prev_level){
4513  score += length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4514  - last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4515  }
4516  }
4517  }
4518  }else{
4519  new_coeff=0;
4520  av_assert2(FFABS(level)==1);
4521 
4522  if(i < last_non_zero){
4523  int next_i= i + run2 + 1;
4524  int next_level= block[ perm_scantable[next_i] ] + 64;
4525 
4526  if(next_level&(~127))
4527  next_level= 0;
4528 
4529  if(next_i < last_non_zero)
4530  score += length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4531  - length[UNI_AC_ENC_INDEX(run2, next_level)]
4532  - length[UNI_AC_ENC_INDEX(run, 65)];
4533  else
4534  score += last_length[UNI_AC_ENC_INDEX(run + run2 + 1, next_level)]
4535  - last_length[UNI_AC_ENC_INDEX(run2, next_level)]
4536  - length[UNI_AC_ENC_INDEX(run, 65)];
4537  }else{
4538  score += -last_length[UNI_AC_ENC_INDEX(run, 65)];
4539  if(prev_level){
4540  score += last_length[UNI_AC_ENC_INDEX(prev_run, prev_level)]
4541  - length[UNI_AC_ENC_INDEX(prev_run, prev_level)];
4542  }
4543  }
4544  }
4545 
4546  score *= lambda;
4547 
4548  unquant_change= new_coeff - old_coeff;
4549  av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4550 
4551  score += s->mpvencdsp.try_8x8basis(rem, weight, basis[j],
4552  unquant_change);
4553  if(score<best_score){
4554  best_score= score;
4555  best_coeff= i;
4556  best_change= change;
4557  best_unquant_change= unquant_change;
4558  }
4559  }
4560  if(level){
4561  prev_level= level + 64;
4562  if(prev_level&(~127))
4563  prev_level= 0;
4564  prev_run= run;
4565  run=0;
4566  }else{
4567  run++;
4568  }
4569  }
4570 
4571  if(best_change){
4572  int j= perm_scantable[ best_coeff ];
4573 
4574  block[j] += best_change;
4575 
4576  if(best_coeff > last_non_zero){
4577  last_non_zero= best_coeff;
4578  av_assert2(block[j]);
4579  }else{
4580  for(; last_non_zero>=start_i; last_non_zero--){
4581  if(block[perm_scantable[last_non_zero]])
4582  break;
4583  }
4584  }
4585 
4586  run=0;
4587  rle_index=0;
4588  for(i=start_i; i<=last_non_zero; i++){
4589  int j= perm_scantable[i];
4590  const int level= block[j];
4591 
4592  if(level){
4593  run_tab[rle_index++]=run;
4594  run=0;
4595  }else{
4596  run++;
4597  }
4598  }
4599 
4600  s->mpvencdsp.add_8x8basis(rem, basis[j], best_unquant_change);
4601  }else{
4602  break;
4603  }
4604  }
4605 
4606  return last_non_zero;
4607 }
4608 
4609 /**
4610  * Permute an 8x8 block according to permutation.
4611  * @param block the block which will be permuted according to
4612  * the given permutation vector
4613  * @param permutation the permutation vector
4614  * @param last the last non zero coefficient in scantable order, used to
4615  * speed the permutation up
4616  * @param scantable the used scantable, this is only used to speed the
4617  * permutation up, the block is not (inverse) permutated
4618  * to scantable order!
4619  */
4620 void ff_block_permute(int16_t *block, uint8_t *permutation,
4621  const uint8_t *scantable, int last)
4622 {
4623  int i;
4624  int16_t temp[64];
4625 
4626  if (last <= 0)
4627  return;
4628  //FIXME it is ok but not clean and might fail for some permutations
4629  // if (permutation[1] == 1)
4630  // return;
4631 
4632  for (i = 0; i <= last; i++) {
4633  const int j = scantable[i];
4634  temp[j] = block[j];
4635  block[j] = 0;
4636  }
4637 
4638  for (i = 0; i <= last; i++) {
4639  const int j = scantable[i];
4640  const int perm_j = permutation[j];
4641  block[perm_j] = temp[j];
4642  }
4643 }
4644 
4646  int16_t *block, int n,
4647  int qscale, int *overflow)
4648 {
4649  int i, j, level, last_non_zero, q, start_i;
4650  const int *qmat;
4651  const uint8_t *scantable;
4652  int bias;
4653  int max=0;
4654  unsigned int threshold1, threshold2;
4655 
4656  s->fdsp.fdct(block);
4657 
4658  if(s->dct_error_sum)
4659  s->denoise_dct(s, block);
4660 
4661  if (s->mb_intra) {
4662  scantable= s->intra_scantable.scantable;
4663  if (!s->h263_aic) {
4664  if (n < 4)
4665  q = s->y_dc_scale;
4666  else
4667  q = s->c_dc_scale;
4668  q = q << 3;
4669  } else
4670  /* For AIC we skip quant/dequant of INTRADC */
4671  q = 1 << 3;
4672 
4673  /* note: block[0] is assumed to be positive */
4674  block[0] = (block[0] + (q >> 1)) / q;
4675  start_i = 1;
4676  last_non_zero = 0;
4677  qmat = n < 4 ? s->q_intra_matrix[qscale] : s->q_chroma_intra_matrix[qscale];
4678  bias= s->intra_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4679  } else {
4680  scantable= s->inter_scantable.scantable;
4681  start_i = 0;
4682  last_non_zero = -1;
4683  qmat = s->q_inter_matrix[qscale];
4684  bias= s->inter_quant_bias*(1<<(QMAT_SHIFT - QUANT_BIAS_SHIFT));
4685  }
4686  threshold1= (1<<QMAT_SHIFT) - bias - 1;
4687  threshold2= (threshold1<<1);
4688  for(i=63;i>=start_i;i--) {
4689  j = scantable[i];
4690  level = block[j] * qmat[j];
4691 
4692  if(((unsigned)(level+threshold1))>threshold2){
4693  last_non_zero = i;
4694  break;
4695  }else{
4696  block[j]=0;
4697  }
4698  }
4699  for(i=start_i; i<=last_non_zero; i++) {
4700  j = scantable[i];
4701  level = block[j] * qmat[j];
4702 
4703 // if( bias+level >= (1<<QMAT_SHIFT)
4704 // || bias-level >= (1<<QMAT_SHIFT)){
4705  if(((unsigned)(level+threshold1))>threshold2){
4706  if(level>0){
4707  level= (bias + level)>>QMAT_SHIFT;
4708  block[j]= level;
4709  }else{
4710  level= (bias - level)>>QMAT_SHIFT;
4711  block[j]= -level;
4712  }
4713  max |=level;
4714  }else{
4715  block[j]=0;
4716  }
4717  }
4718  *overflow= s->max_qcoeff < max; //overflow might have happened
4719 
4720  /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
4721  if (s->idsp.perm_type != FF_IDCT_PERM_NONE)
4723  scantable, last_non_zero);
4724 
4725  return last_non_zero;
4726 }
4727 
4728 #define OFFSET(x) offsetof(MpegEncContext, x)
4729 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
4730 static const AVOption h263_options[] = {
4731  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4732  { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
4734  { NULL },
4735 };
4736 
4737 static const AVClass h263_class = {
4738  .class_name = "H.263 encoder",
4739  .item_name = av_default_item_name,
4740  .option = h263_options,
4741  .version = LIBAVUTIL_VERSION_INT,
4742 };
4743 
4745  .name = "h263",
4746  .long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
4747  .type = AVMEDIA_TYPE_VIDEO,
4748  .id = AV_CODEC_ID_H263,
4749  .priv_data_size = sizeof(MpegEncContext),
4751  .encode2 = ff_mpv_encode_picture,
4752  .close = ff_mpv_encode_end,
4754  .priv_class = &h263_class,
4755 };
4756 
4757 static const AVOption h263p_options[] = {
4758  { "umv", "Use unlimited motion vectors.", OFFSET(umvplus), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4759  { "aiv", "Use alternative inter VLC.", OFFSET(alt_inter_vlc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4760  { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
4761  { "structured_slices", "Write slice start position at every GOB header instead of just GOB number.", OFFSET(h263_slice_structured), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE},
4763  { NULL },
4764 };
4765 static const AVClass h263p_class = {
4766  .class_name = "H.263p encoder",
4767  .item_name = av_default_item_name,
4768  .option = h263p_options,
4769  .version = LIBAVUTIL_VERSION_INT,
4770 };
4771 
4773  .name = "h263p",
4774  .long_name = NULL_IF_CONFIG_SMALL("H.263+ / H.263-1998 / H.263 version 2"),
4775  .type = AVMEDIA_TYPE_VIDEO,
4776  .id = AV_CODEC_ID_H263P,
4777  .priv_data_size = sizeof(MpegEncContext),
4779  .encode2 = ff_mpv_encode_picture,
4780  .close = ff_mpv_encode_end,
4781  .capabilities = AV_CODEC_CAP_SLICE_THREADS,
4783  .priv_class = &h263p_class,
4784 };
4785 
4786 static const AVClass msmpeg4v2_class = {
4787  .class_name = "msmpeg4v2 encoder",
4788  .item_name = av_default_item_name,
4789  .option = ff_mpv_generic_options,
4790  .version = LIBAVUTIL_VERSION_INT,
4791 };
4792 
4794  .name = "msmpeg4v2",
4795  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
4796  .type = AVMEDIA_TYPE_VIDEO,
4797  .id = AV_CODEC_ID_MSMPEG4V2,
4798  .priv_data_size = sizeof(MpegEncContext),
4800  .encode2 = ff_mpv_encode_picture,
4801  .close = ff_mpv_encode_end,
4803  .priv_class = &msmpeg4v2_class,
4804 };
4805 
4806 static const AVClass msmpeg4v3_class = {
4807  .class_name = "msmpeg4v3 encoder",
4808  .item_name = av_default_item_name,
4809  .option = ff_mpv_generic_options,
4810  .version = LIBAVUTIL_VERSION_INT,
4811 };
4812 
4814  .name = "msmpeg4",
4815  .long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
4816  .type = AVMEDIA_TYPE_VIDEO,
4817  .id = AV_CODEC_ID_MSMPEG4V3,
4818  .priv_data_size = sizeof(MpegEncContext),
4820  .encode2 = ff_mpv_encode_picture,
4821  .close = ff_mpv_encode_end,
4823  .priv_class = &msmpeg4v3_class,
4824 };
4825 
4826 static const AVClass wmv1_class = {
4827  .class_name = "wmv1 encoder",
4828  .item_name = av_default_item_name,
4829  .option = ff_mpv_generic_options,
4830  .version = LIBAVUTIL_VERSION_INT,
4831 };
4832 
4834  .name = "wmv1",
4835  .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
4836  .type = AVMEDIA_TYPE_VIDEO,
4837  .id = AV_CODEC_ID_WMV1,
4838  .priv_data_size = sizeof(MpegEncContext),
4840  .encode2 = ff_mpv_encode_picture,
4841  .close = ff_mpv_encode_end,
4843  .priv_class = &wmv1_class,
4844 };
int last_time_base
Definition: mpegvideo.h:388
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
Definition: mpeg12data.c:41
#define AVERROR_ENCODER_NOT_FOUND
Encoder not found.
Definition: error.h:54
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
Definition: avcodec.h:342
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:1594
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
Definition: me_cmp.c:1035
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
Definition: h261enc.c:108
int chroma_elim_threshold
Definition: mpegvideo.h:117
#define INPLACE_OFFSET
Definition: mpegutils.h:121
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
Definition: mpegvideo.h:338
IDCTDSPContext idsp
Definition: mpegvideo.h:230
av_cold int ff_dct_encode_init(MpegEncContext *s)
#define NULL
Definition: coverity.c:32
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
Definition: mpegvideo.h:341
const struct AVCodec * codec
Definition: avcodec.h:535
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
Definition: ratecontrol.c:672
#define FF_MPV_FLAG_STRICT_GOP
Definition: mpegvideo.h:589
void ff_init_block_index(MpegEncContext *s)
Definition: mpegvideo.c:2279
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1490
qpel_mc_func avg_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:74
int picture_number
Definition: mpegvideo.h:127
#define RECON_SHIFT
me_cmp_func frame_skip_cmp[6]
Definition: me_cmp.h:76
#define CANDIDATE_MB_TYPE_SKIPPED
Definition: mpegutils.h:107
rate control context.
Definition: ratecontrol.h:63
static int shift(int a, int b)
Definition: sonic.c:82
S(GMC)-VOP MPEG-4.
Definition: avutil.h:277
#define CONFIG_WMV2_ENCODER
Definition: config.h:1340
int size
void ff_mpeg1_encode_init(MpegEncContext *s)
Definition: mpeg12enc.c:1037
int esc3_level_length
Definition: mpegvideo.h:440
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2549
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
int time_increment_bits
Definition: mpegvideo.h:387
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: ituh263enc.c:103
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
Definition: mpegvideo.h:248
#define FF_CMP_DCTMAX
Definition: avcodec.h:944
void ff_fdct_ifast(int16_t *data)
Definition: jfdctfst.c:208
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
Definition: avcodec.h:1016
AVOption.
Definition: opt.h:246
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
Definition: avpacket.c:728
uint8_t * fcode_tab
smallest fcode needed for each MV
Definition: mpegvideo.h:279
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:153
#define MV_TYPE_FIELD
2 vectors, one per field
Definition: mpegvideo.h:269
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:885
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
Definition: mpegvideo.h:188
uint8_t * mb_mean
Table for MB luminance.
Definition: mpegpicture.h:74
uint64_t error[AV_NUM_DATA_POINTERS]
error
Definition: avcodec.h:1709
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
Definition: avcodec.h:321
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
Definition: mpegvideo.h:278
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
Definition: mpegpicture.h:36
int pre_pass
= 1 for the pre pass
Definition: motion_est.h:72
#define CONFIG_RV10_ENCODER
Definition: config.h:1323
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
Definition: avcodec.h:304
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:68
#define FF_MPV_FLAG_SKIP_RD
Definition: mpegvideo.h:588
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:531
AVFrame * tmp_frames[MAX_B_FRAMES+2]
Definition: mpegvideo.h:571
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
Definition: j2kenc.c:208
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
RateControlEntry * entry
Definition: ratecontrol.h:65
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:75
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
Definition: mpegutils.h:114
int64_t bit_rate
the average bitrate
Definition: avcodec.h:576
#define LIBAVUTIL_VERSION_INT
Definition: version.h:85
else temp
Definition: vf_mcdeint.c:256
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:491
const char * g
Definition: vf_curves.c:115
void ff_h263_encode_init(MpegEncContext *s)
Definition: ituh263enc.c:761
const char * desc
Definition: nvenc.c:79
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
Definition: mpegvideo.h:154
static av_cold int init(AVCodecContext *avctx)
Definition: avrndec.c:35
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
#define OFFSET(x)
uint16_t * mb_var
Table for MB variances.
Definition: mpegpicture.h:65
uint16_t(* q_chroma_intra_matrix16)[2][64]
Definition: mpegvideo.h:328
uint16_t chroma_intra_matrix[64]
Definition: mpegvideo.h:301
static int estimate_qp(MpegEncContext *s, int dry_run)
#define MAX_MV
Definition: motion_est.h:35
int acc
Definition: yuv2rgb.c:555
int max_bitrate
Maximum bitrate of the stream, in bits per second.
Definition: avcodec.h:454
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
Definition: avcodec.h:786
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:194
MJPEG encoder.
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
Definition: mpegvideo.h:132
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
Definition: encode.c:422
attribute_deprecated int frame_skip_cmp
Definition: avcodec.h:1467
#define FF_MPV_COMMON_OPTS
Definition: mpegvideo.h:616
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:1161
#define me
int frame_skip_cmp
Definition: mpegvideo.h:579
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
Definition: mpegvideo.h:438
int b_frame_strategy
Definition: mpegvideo.h:572
#define CANDIDATE_MB_TYPE_BIDIR
Definition: mpegutils.h:112
int num
Numerator.
Definition: rational.h:59
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
Definition: h263dsp.c:117
int size
Definition: packet.h:356
enum AVCodecID codec_id
Definition: mpegvideo.h:112
const char * b
Definition: vf_curves.c:116
void ff_get_2pass_fcode(MpegEncContext *s)
Definition: ratecontrol.c:857
const char * av_default_item_name(void *ptr)
Return the context name.
Definition: log.c:235
#define CONFIG_MJPEG_ENCODER
Definition: config.h:1301
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
Definition: bitstream.c:64
int obmc
overlapped block motion compensation
Definition: mpegvideo.h:366
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
Definition: bitstream.c:48
void ff_mpeg1_clean_buffers(MpegEncContext *s)
Definition: mpeg12.c:115
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:905
int frame_skip_exp
Definition: mpegvideo.h:578
int ff_h261_get_picture_format(int width, int height)
Definition: h261enc.c:40
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:736
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
Definition: mpegvideo.h:254
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
Definition: mpegvideo.h:308
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
Definition: utils.c:70
int out_size
Definition: movenc.c:55
int ildct_cmp
interlaced DCT comparison function
Definition: avcodec.h:930
int coded_score[12]
Definition: mpegvideo.h:320
mpegvideo header.
const uint16_t ff_h263_format[8][2]
Definition: h263data.c:238
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
Definition: mjpegenc.c:71
int scene_change_score
Definition: motion_est.h:87
int mpv_flags
flags set by private options
Definition: mpegvideo.h:541
uint8_t permutated[64]
Definition: idctdsp.h:33
static const AVClass h263_class
uint8_t run
Definition: svq3.c:209
static AVPacket pkt
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
Definition: avcodec.h:1757
uint8_t * intra_ac_vlc_length
Definition: mpegvideo.h:311
#define EDGE_TOP
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
Definition: mpegvideo.h:411
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
Definition: mpegpicture.c:366
#define UNI_AC_ENC_INDEX(run, level)
Definition: mpegvideo.h:318
int mb_num
number of MBs of a picture
Definition: mpegvideo.h:133
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
Definition: fdctdsp.c:26
#define FF_LAMBDA_SHIFT
Definition: avutil.h:225
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
QpelDSPContext qdsp
Definition: mpegvideo.h:235
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
Definition: packet.h:93
int stride
Definition: mace.c:144
AVCodec.
Definition: codec.h:190
#define MAX_FCODE
Definition: mpegutils.h:48
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
Definition: mpegvideo.h:389
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
Definition: motion_est.h:93
int qscale
QP.
Definition: mpegvideo.h:204
int h263_aic
Advanced INTRA Coding (AIC)
Definition: mpegvideo.h:87
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
Definition: mpegvideo.h:250
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
Definition: h263.c:307
int min_bitrate
Minimum bitrate of the stream, in bits per second.
Definition: avcodec.h:463
int chroma_x_shift
Definition: mpegvideo.h:486
int encoding
true if we are encoding (vs decoding)
Definition: mpegvideo.h:114
int field_select[2][2]
Definition: mpegvideo.h:277
attribute_deprecated int frame_skip_exp
Definition: avcodec.h:1463
#define CONFIG_RV20_ENCODER
Definition: config.h:1324
int quant_precision
Definition: mpegvideo.h:400
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
Definition: mpegvideo.c:1916
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:649
int modified_quant
Definition: mpegvideo.h:379
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
Definition: mpegvideo.h:591
int skipdct
skip dct and code zero residual
Definition: mpegvideo.h:220
int b_frame_score
Definition: mpegpicture.h:84
const uint8_t ff_mpeg2_non_linear_qscale[32]
Definition: mpegvideodata.c:26
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
Definition: avcodec.h:490
static int16_t block[64]
Definition: dct.c:115
void ff_mpeg4_clean_buffers(MpegEncContext *s)
Definition: mpeg4video.c:45
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
attribute_deprecated int mv_bits
Definition: avcodec.h:1519
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
Definition: log.h:72
#define CANDIDATE_MB_TYPE_INTER
Definition: mpegutils.h:105
float p_masking
p block masking (0-> disabled)
Definition: avcodec.h:866
int picture_in_gop_number
0-> first pic in gop, ...
Definition: mpegvideo.h:128
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
Definition: videodsp.h:63
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int alt_inter_vlc
alternative inter vlc
Definition: mpegvideo.h:378
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
Definition: mpeg12enc.c:412
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * ptr_lastgob
Definition: mpegvideo.h:503
int64_t time
time of current frame
Definition: mpegvideo.h:390
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
Definition: encode.c:32
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
Definition: avcodec.h:584
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
Definition: mpegvideo.h:264
ScratchpadContext sc
Definition: mpegvideo.h:202
uint8_t
#define av_cold
Definition: attributes.h:88
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:190
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Definition: avassert.h:64
Picture ** input_picture
next pictures on display order for encoding
Definition: mpegvideo.h:137
#define CANDIDATE_MB_TYPE_INTER4V
Definition: mpegutils.h:106
AVOptions.
PutBitContext pb2
used for data partitioned VOPs
Definition: mpegvideo.h:409
enum OutputFormat out_format
output format
Definition: mpegvideo.h:104
attribute_deprecated int i_count
Definition: avcodec.h:1527
#define CANDIDATE_MB_TYPE_FORWARD_I
Definition: mpegutils.h:115
#define CONFIG_FAANDCT
Definition: config.h:626
uint16_t(* dct_offset)[64]
Definition: mpegvideo.h:334
int noise_reduction
Definition: mpegvideo.h:582
void ff_dct_encode_init_x86(MpegEncContext *s)
Definition: mpegvideoenc.c:213
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:92
uint16_t * chroma_intra_matrix
custom intra quantization matrix
Definition: avcodec.h:2168
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
Definition: pixblockdsp.h:35
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: msmpeg4enc.c:224
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
const uint32_t ff_square_tab[512]
Definition: me_cmp.c:33
int pre_dia_size
ME prepass diamond size & shape.
Definition: avcodec.h:981
AVCodec ff_h263_encoder
int frame_skip_threshold
Definition: mpegvideo.h:576
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define FF_CMP_VSSE
Definition: avcodec.h:940
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
Definition: avcodec.h:325
void ff_free_picture_tables(Picture *pic)
Definition: mpegpicture.c:465
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:444
#define emms_c()
Definition: internal.h:55
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
int misc_bits
cbp, mb_type
Definition: mpegvideo.h:352
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:627
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Definition: mpegvideo.h:284
H.263 tables.
#define CANDIDATE_MB_TYPE_BACKWARD_I
Definition: mpegutils.h:116
int interlaced_dct
Definition: mpegvideo.h:491
int(* q_chroma_intra_matrix)[64]
Definition: mpegvideo.h:324
int me_cmp
motion estimation comparison function
Definition: avcodec.h:912
#define QUANT_BIAS_SHIFT
Definition: mpegvideo_enc.c:71
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
Definition: mpegvideo.h:180
#define CHROMA_420
Definition: mpegvideo.h:483
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
Definition: mpegpicture.c:451
int intra_dc_precision
Definition: mpegvideo.h:464
int repeat_first_field
Definition: mpegvideo.h:480
static AVFrame * frame
quarterpel DSP functions
const char data[16]
Definition: mxf.c:91
#define CONFIG_MPEG1VIDEO_ENCODER
Definition: config.h:1302
#define PICT_BOTTOM_FIELD
Definition: mpegutils.h:38
#define height
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:251
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
uint8_t * data
Definition: packet.h:355
const uint16_t ff_aanscales[64]
Definition: aandcttab.c:26
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: wmv2enc.c:74
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
#define ff_dlog(a,...)
#define AVERROR_EOF
End of file.
Definition: error.h:55
uint16_t pp_time
time distance between the last 2 p,s,i frames
Definition: mpegvideo.h:392
me_cmp_func nsse[6]
Definition: me_cmp.h:65
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define lrintf(x)
Definition: libm_mips.h:70
const uint8_t * scantable
Definition: idctdsp.h:32
av_cold void ff_mpv_idct_init(MpegEncContext *s)
Definition: mpegvideo.c:329
int mb_height
number of MBs horizontally & vertically
Definition: mpegvideo.h:129
#define max(a, b)
Definition: cuda_runtime.h:33
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
Definition: put_bits.h:79
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
Definition: avcodec.h:481
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
Definition: rational.c:35
float lumi_masking
luminance masking (0-> disabled)
Definition: avcodec.h:845
char * stats_out
pass1 encoding statistics output buffer
Definition: avcodec.h:1545
int max_qcoeff
maximum encodable coefficient
Definition: mpegvideo.h:309
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
Definition: avcodec.h:308
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
Definition: mpegvideo.h:590
int scenechange_threshold
Definition: mpegvideo.h:581
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
Definition: utils.c:2032
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
Definition: mpeg12enc.c:1028
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:435
#define MAX_LEVEL
Definition: rl.h:36
attribute_deprecated int frame_skip_threshold
Definition: avcodec.h:1455
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: h261enc.c:53
int dquant
qscale difference to prev qscale
Definition: mpegvideo.h:210
int flipflop_rounding
Definition: mpegvideo.h:437
#define CHROMA_444
Definition: mpegvideo.h:485
int num_entries
number of RateControlEntries
Definition: ratecontrol.h:64
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
Definition: mpegvideo.h:451
uint8_t * mb_info_ptr
Definition: mpegvideo.h:369
#define av_log(a,...)
static void ff_update_block_index(MpegEncContext *s)
Definition: mpegvideo.h:740
#define ff_sqrt
Definition: mathops.h:206
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
Definition: mpegvideo.c:2343
#define ROUNDED_DIV(a, b)
Definition: common.h:56
int(* q_inter_matrix)[64]
Definition: mpegvideo.h:325
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
Definition: avcodec.h:1593
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
Definition: packet.h:388
static int get_bits_diff(MpegEncContext *s)
Definition: mpegvideo.h:755
attribute_deprecated int skip_count
Definition: avcodec.h:1531
#define EDGE_WIDTH
Definition: mpegpicture.h:33
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
Definition: mpegvideo.h:323
int intra_only
if true, only intra pictures are generated
Definition: mpegvideo.h:102
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
#define src
Definition: vp8dsp.c:254
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
Definition: mpegvideo.h:187
enum AVCodecID id
Definition: codec.h:204
int h263_plus
H.263+ headers.
Definition: mpegvideo.h:109
H263DSPContext h263dsp
Definition: mpegvideo.h:237
int slice_context_count
number of used thread_contexts
Definition: mpegvideo.h:156
#define MAX_DMV
Definition: motion_est.h:37
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
Definition: mpegvideo.h:215
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
Definition: put_bits.h:324
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:816
int last_dc[3]
last DC values for MPEG-1
Definition: mpegvideo.h:185
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
uint8_t * inter_ac_vlc_last_length
Definition: mpegvideo.h:316
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
Definition: avcodec.h:279
#define MAX_MB_BYTES
Definition: mpegutils.h:47
int64_t total_bits
Definition: mpegvideo.h:337
#define PTRDIFF_SPECIFIER
Definition: internal.h:263
int mb_skipped
MUST BE SET only during DECODING.
Definition: mpegvideo.h:195
#define ARCH_X86
Definition: config.h:38
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
Definition: qpeldsp.h:65
int chroma_y_shift
Definition: mpegvideo.h:487
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
Definition: mpegvideo.h:118
int partitioned_frame
is current frame partitioned
Definition: mpegvideo.h:405
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
Definition: mpegpicture.h:37
#define AVERROR(e)
Definition: error.h:43
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
Definition: mpegpicture.h:90
#define MAX_PICTURE_COUNT
Definition: mpegpicture.h:32
av_cold int ff_rate_control_init(MpegEncContext *s)
Definition: ratecontrol.c:472
int me_sub_cmp
subpixel motion estimation comparison function
Definition: avcodec.h:918
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
Definition: avcodec.h:2027
int qmax
maximum quantizer
Definition: avcodec.h:1375
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
Definition: pixdesc.c:2577
static void update_mb_info(MpegEncContext *s, int startcode)
#define MERGE(field)
void ff_write_pass1_stats(MpegEncContext *s)
Definition: ratecontrol.c:38
int unrestricted_mv
mv can point outside of the coded picture
Definition: mpegvideo.h:223
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:188
ERContext er
Definition: mpegvideo.h:566
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:1804
int last_lambda_for[5]
last lambda for a specific pict type
Definition: mpegvideo.h:219
static int sse_mb(MpegEncContext *s)
int reference
Definition: mpegpicture.h:87
const char * r
Definition: vf_curves.c:114
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
PixblockDSPContext pdsp
Definition: mpegvideo.h:234
const char * arg
Definition: jacosubdec.c:66
uint8_t * intra_chroma_ac_vlc_length
Definition: mpegvideo.h:313
int h263_slice_structured
Definition: mpegvideo.h:377
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:606
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
Definition: allcodecs.c:914
uint8_t * buf
Definition: put_bits.h:38
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
Definition: af_afir.c:60
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
Definition: mathematics.c:37
MpegvideoEncDSPContext mpvencdsp
Definition: mpegvideo.h:233
const char * name
Name of the codec implementation.
Definition: codec.h:197
uint8_t bits
Definition: vp3data.h:202
int quarter_sample
1->qpel, 0->half pel ME/MC
Definition: mpegvideo.h:401
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
Definition: mpegvideo.h:291
int me_pre
prepass for motion estimation
Definition: mpegvideo.h:260
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
Definition: avpacket.c:555
int low_delay
no reordering needed / has no B-frames
Definition: mpegvideo.h:406
qpel_mc_func put_qpel_pixels_tab[2][16]
Definition: qpeldsp.h:73
uint8_t *[2][2] b_field_select_table
Definition: mpegvideo.h:257
static const uint8_t offset[127][2]
Definition: vf_spp.c:93
void ff_mpv_common_end(MpegEncContext *s)
Definition: mpegvideo.c:1138
#define FFMAX(a, b)
Definition: common.h:94
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
Definition: pixblockdsp.c:81
#define fail()
Definition: checkasm.h:123
int64_t mb_var_sum_temp
Definition: motion_est.h:86
int(* pix_norm1)(uint8_t *pix, int line_size)
int(* pix_sum)(uint8_t *pix, int line_size)
attribute_deprecated int b_sensitivity
Definition: avcodec.h:1132
int flags
A combination of AV_PKT_FLAG values.
Definition: packet.h:361
static int put_bits_count(PutBitContext *s)
Definition: put_bits.h:67
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
Definition: mpegvideo.h:356
int rc_buffer_size
decoder bitstream buffer size
Definition: avcodec.h:1389
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
Definition: ituh263enc.c:266
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:126
int * lambda_table
Definition: mpegvideo.h:208
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
Definition: avcodec.h:1052
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: wmv2enc.c:147
int64_t rc_min_rate
minimum bitrate
Definition: avcodec.h:1411
common internal API header
uint8_t * intra_ac_vlc_last_length
Definition: mpegvideo.h:312
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
Definition: mpegvideodata.c:77
void ff_h263_loop_filter(MpegEncContext *s)
Definition: h263.c:135
#define CHROMA_422
Definition: mpegvideo.h:484
float border_masking
Definition: mpegvideo.h:553
int progressive_frame
Definition: mpegvideo.h:489
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:383
const uint8_t ff_h263_chroma_qscale_table[32]
Definition: h263data.c:262
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
Definition: avcodec.h:275
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:418
uint16_t(* q_inter_matrix16)[2][64]
Definition: mpegvideo.h:329
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
Definition: mpegvideo.h:453
int fixed_qscale
fixed qscale if non zero
Definition: mpegvideo.h:113
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
Definition: avcodec.h:338
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
Definition: options.c:157
int umvplus
== H.263+ && unrestricted_mv
Definition: mpegvideo.h:375
Picture new_picture
copy of the source picture structure for encoding.
Definition: mpegvideo.h:174
#define width
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:521
int intra_quant_bias
bias for the quantizer
Definition: mpegvideo.h:306
int width
picture width / height.
Definition: avcodec.h:699
uint8_t w
Definition: llviddspenc.c:38
int16_t(*[2] motion_val)[2]
Definition: mpegpicture.h:53
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
Picture.
Definition: mpegpicture.h:45
attribute_deprecated int noise_reduction
Definition: avcodec.h:1044
int alternate_scan
Definition: mpegvideo.h:471
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
Definition: avcodec.h:1418
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv10enc.c:32
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
Definition: avcodec.h:312
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
Definition: ratecontrol.c:868
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
Definition: avcodec.h:296
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
Definition: mpegvideo.h:327
attribute_deprecated int frame_skip_factor
Definition: avcodec.h:1459
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
Definition: avcodec.h:1015
perm
Definition: f_perms.c:74
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
#define s(width, name)
Definition: cbs_vp9.c:257
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
Definition: avcodec.h:1797
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:535
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:423
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
Definition: mpegvideo.h:443
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:533
int block_last_index[12]
last non zero coefficient in block
Definition: mpegvideo.h:86
MotionEstContext me
Definition: mpegvideo.h:282
int frame_skip_factor
Definition: mpegvideo.h:577
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
Definition: idctdsp.h:96
void(* fdct)(int16_t *block)
Definition: fdctdsp.h:27
const int16_t ff_mpeg4_default_non_intra_matrix[64]
Definition: mpeg4data.h:348
int mb_decision
macroblock decision mode
Definition: avcodec.h:1014
#define CONFIG_FLV_ENCODER
Definition: config.h:1290
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
Definition: mpegvideo.h:198
#define MAX_B_FRAMES
Definition: mpegvideo.h:64
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:527
int ff_msmpeg4_encode_init(MpegEncContext *s)
Definition: msmpeg4enc.c:121
int ac_esc_length
num of bits needed to encode the longest esc
Definition: mpegvideo.h:310
preferred ID for MPEG-1/2 video decoding
Definition: codec_id.h:51
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
Definition: put_bits.h:358
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:1785
int block_index[6]
index to current MB in block based arrays with edges
Definition: mpegvideo.h:293
Compute and use optimal Huffman tables.
Definition: mjpegenc.h:97
#define av_log2
Definition: intmath.h:83
the normal 2^n-1 "JPEG" YUV ranges
Definition: pixfmt.h:535
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
Definition: mpegvideo.h:297
static uint8_t default_fcode_tab[MAX_MV *2+1]
Definition: mpegvideo_enc.c:83
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
Definition: codec.h:110
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:373
AVCodec ff_h263p_encoder
attribute_deprecated int i_tex_bits
Definition: avcodec.h:1523
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
Definition: mpegvideo.h:266
int first_slice_line
used in MPEG-4 too to handle resync markers
Definition: mpegvideo.h:436
int frame_pred_frame_dct
Definition: mpegvideo.h:465
attribute_deprecated int misc_bits
Definition: avcodec.h:1533
This structure describes the bitrate properties of an encoded bitstream.
Definition: avcodec.h:448
uint16_t * mc_mb_var
Table for motion compensated MB variances.
Definition: mpegpicture.h:68
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: flvenc.c:27
#define MV_DIR_BACKWARD
Definition: mpegvideo.h:263
int coded_picture_number
picture number in bitstream order
Definition: frame.h:414
#define src1
Definition: h264pred.c:139
#define AV_LOG_INFO
Standard information.
Definition: log.h:187
uint16_t inter_matrix[64]
Definition: mpegvideo.h:302
#define FF_LAMBDA_SCALE
Definition: avutil.h:226
void ff_jpeg_fdct_islow_8(int16_t *data)
int64_t last_non_b_time
Definition: mpegvideo.h:391
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: h261enc.c:237
#define QMAT_SHIFT
Definition: mpegvideo_enc.c:74
struct MpegEncContext * thread_context[MAX_THREADS]
Definition: mpegvideo.h:155
#define CONFIG_MSMPEG4_ENCODER
Definition: msmpeg4.h:75
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
Definition: mpegvideo.h:207
void ff_faandct(int16_t *data)
Definition: faandct.c:114
double buffer_index
amount of bits in the video/audio buffer
Definition: ratecontrol.h:66
Libavcodec external API header.
attribute_deprecated int mpeg_quant
Definition: avcodec.h:821
void ff_h263_update_motion_val(MpegEncContext *s)
Definition: h263.c:42
int h263_flv
use flv H.263 header
Definition: mpegvideo.h:110
attribute_deprecated int scenechange_threshold
Definition: avcodec.h:1040
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
Definition: options.c:172
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
Definition: mpegvideo.h:134
enum AVCodecID codec_id
Definition: avcodec.h:536
#define QMAT_SHIFT_MMX
Definition: mpegvideo_enc.c:73
attribute_deprecated int prediction_method
Definition: avcodec.h:885
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
Definition: mpegvideo_enc.c:90
const uint16_t ff_inv_aanscales[64]
Definition: aandcttab.c:38
attribute_deprecated int b_frame_strategy
Definition: avcodec.h:800
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Definition: me_cmp.c:474
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
uint8_t * intra_chroma_ac_vlc_last_length
Definition: mpegvideo.h:314
main external API structure.
Definition: avcodec.h:526
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
Definition: mpegpicture.c:236
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:605
long long int64_t
Definition: coverity.c:34
ScanTable intra_scantable
Definition: mpegvideo.h:91
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int qmin
minimum quantizer
Definition: avcodec.h:1368
int height
picture size. must be a multiple of 16
Definition: mpegvideo.h:100
#define FF_CMP_NSSE
Definition: avcodec.h:941
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
Definition: hpeldsp.h:38
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
Definition: mpegvideo.h:144
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
Definition: mpegvideo.h:140
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:525
FDCTDSPContext fdsp
Definition: mpegvideo.h:227
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
Definition: hpeldsp.h:56
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
Definition: encode.c:392
uint8_t * buf_end
Definition: put_bits.h:38
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
Definition: avcodec.h:859
int luma_elim_threshold
Definition: mpegvideo.h:116
attribute_deprecated int header_bits
Definition: avcodec.h:1521
Picture * picture
main picture buffer
Definition: mpegvideo.h:136
int data_partitioning
data partitioning flag from header
Definition: mpegvideo.h:404
uint8_t * inter_ac_vlc_length
Definition: mpegvideo.h:315
int progressive_sequence
Definition: mpegvideo.h:456
uint16_t * intra_matrix
custom intra quantization matrix Must be allocated with the av_malloc() family of functions...
Definition: avcodec.h:1026
H.261 codec.
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Definition: ituh263enc.c:240
uint8_t * buf_ptr
Definition: put_bits.h:38
Describe the class of an AVClass context structure.
Definition: log.h:67
int stuffing_bits
bits used for stuffing
Definition: mpegvideo.h:339
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
Definition: hpeldsp.h:82
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
Definition: mpegvideo.h:255
#define FF_COMPLIANCE_NORMAL
Definition: avcodec.h:1592
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
Definition: mpegpicture.h:82
int index
Definition: gxfenc.c:89
#define CANDIDATE_MB_TYPE_DIRECT
Definition: mpegutils.h:109
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
struct AVFrame * f
Definition: mpegpicture.h:46
#define FF_MB_DECISION_RD
rate distortion
Definition: avcodec.h:1017
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:529
FF_ENABLE_DEPRECATION_WARNINGS int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
Definition: avpacket.c:298
const uint16_t ff_mpeg1_default_intra_matrix[256]
Definition: mpeg12data.c:30
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
Definition: mpegvideo.h:125
cl_device_type type
AVCodec ff_wmv1_encoder
const uint8_t ff_zigzag_direct[64]
Definition: mathtables.c:98
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
Definition: mpegvideo.h:135
int mb_info
interval for outputting info about mb offsets as side data
Definition: mpegvideo.h:367
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
Definition: avcodec.h:1099
#define STRIDE_ALIGN
Definition: internal.h:108
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
Definition: mjpegenc.c:126
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
Definition: utils.c:574
#define CANDIDATE_MB_TYPE_BIDIR_I
Definition: mpegutils.h:117
const int16_t ff_mpeg4_default_intra_matrix[64]
Definition: mpeg4data.h:337
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:537
int f_code
forward MV resolution
Definition: mpegvideo.h:238
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
Definition: motion_est.c:1061
#define CANDIDATE_MB_TYPE_DIRECT0
Definition: mpegutils.h:119
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
Definition: mpeg4video.c:117
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
Definition: mpegvideo.h:539
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
Definition: avcodec.h:1525
static int weight(int i, int blen, int offset)
Definition: diracdec.c:1560
#define MV_DIR_FORWARD
Definition: mpegvideo.h:262
uint16_t * inter_matrix
custom inter quantization matrix Must be allocated with the av_malloc() family of functions...
Definition: avcodec.h:1035
int max_b_frames
max number of B-frames for encoding
Definition: mpegvideo.h:115
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
Definition: mpegvideo.h:212
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
int last_mv_dir
last mv_dir, used for B-frame encoding
Definition: mpegvideo.h:452
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:325
int h263_pred
use MPEG-4/H.263 ac/dc predictions
Definition: mpegvideo.h:105
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
Definition: mpegvideo.h:252
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:554
float dark_masking
darkness masking (0-> disabled)
Definition: avcodec.h:873
static int64_t pts
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
Definition: avcodec.h:852
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
Definition: pixblockdsp.h:29
int ff_init_me(MpegEncContext *s)
Definition: motion_est.c:306
uint8_t *[2] p_field_select_table
Definition: mpegvideo.h:256
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
Definition: mpegvideo.h:253
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
Definition: avcodec.h:287
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
Definition: mpegvideo.h:189
uint8_t level
Definition: svq3.c:210
me_cmp_func sad[6]
Definition: me_cmp.h:56
int me_penalty_compensation
Definition: mpegvideo.h:259
int64_t mc_mb_var_sum_temp
Definition: motion_est.h:85
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
Definition: mpegvideo.h:276
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
Definition: mpegvideo.h:249
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
Definition: mpegvideo.h:131
me_cmp_func sse[6]
Definition: me_cmp.h:57
static int estimate_motion_thread(AVCodecContext *c, void *arg)
int vbv_ignore_qmax
Definition: mpegvideo.h:555
#define BASIS_SHIFT
MpegEncContext.
Definition: mpegvideo.h:81
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
Definition: mpegvideo.h:183
int8_t * qscale_table
Definition: mpegpicture.h:50
#define MAX_RUN
Definition: rl.h:35
struct AVCodecContext * avctx
Definition: mpegvideo.h:98
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
Definition: avcodec.h:721
PutBitContext pb
bit output
Definition: mpegvideo.h:151
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
Definition: mpegpicture.c:299
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
Definition: config.h:1304
#define CONFIG_MPEG2VIDEO_ENCODER
Definition: config.h:1303
int
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
Definition: avcodec.h:924
int quantizer_noise_shaping
Definition: mpegvideo.h:542
int(* dct_error_sum)[64]
Definition: mpegvideo.h:332
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
Definition: mpegvideo.h:538
MECmpContext mecc
Definition: mpegvideo.h:231
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
Definition: msmpeg4enc.c:284
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
static const int32_t qmat16[MAT_SIZE]
Definition: hq_hqadata.c:342
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
Definition: mpegvideo.h:130
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Definition: put_bits.h:101
if(ret< 0)
Definition: vf_mcdeint.c:279
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CANDIDATE_MB_TYPE_FORWARD
Definition: mpegutils.h:110
attribute_deprecated int rtp_payload_size
Definition: avcodec.h:1508
uint8_t * dest[3]
Definition: mpegvideo.h:295
int shared
Definition: mpegpicture.h:88
static double c[64]
int last_pict_type
Definition: mpegvideo.h:214
#define CONFIG_H261_ENCODER
Definition: config.h:1292
#define COPY(a)
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: ituh263enc.c:447
int adaptive_quant
use adaptive quantization
Definition: mpegvideo.h:209
static int16_t basis[64][64]
Picture last_picture
copy of the previous picture structure.
Definition: mpegvideo.h:162
Picture * last_picture_ptr
pointer to the previous picture.
Definition: mpegvideo.h:182
Bi-dir predicted.
Definition: avutil.h:276
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there&#39;s a delay
Definition: mpegvideo.h:148
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
Definition: avcodec.h:1776
int ff_vbv_update(MpegEncContext *s, int frame_size)
Definition: ratecontrol.c:681
#define H263_GOB_HEIGHT(h)
Definition: h263.h:42
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
Definition: avpacket.c:35
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
Definition: put_bits.h:48
int den
Denominator.
Definition: rational.h:60
#define CONFIG_H263_ENCODER
Definition: config.h:1293
#define CONFIG_H263P_ENCODER
Definition: config.h:1294
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
Definition: mpegvideo.h:190
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
Definition: mpegvideo.c:466
int trellis
trellis RD quantization
Definition: avcodec.h:1475
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
Definition: utils.c:2046
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:215
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
Definition: avcodec.h:300
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: mpeg12enc.c:426
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
Definition: mpegutils.h:104
int16_t(* blocks)[12][64]
Definition: mpegvideo.h:509
int slices
Number of slices.
Definition: avcodec.h:1177
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
void * priv_data
Definition: avcodec.h:553
const AVOption ff_mpv_generic_options[]
Definition: mpegvideo_enc.c:85
#define PICT_FRAME
Definition: mpegutils.h:39
int last_bits
temp var used for calculating the above vars
Definition: mpegvideo.h:353
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
Definition: mpegvideo.c:890
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int picture_structure
Definition: mpegvideo.h:461
int dia_size
ME diamond size & shape.
Definition: avcodec.h:954
#define av_free(p)
attribute_deprecated int frame_bits
Definition: avcodec.h:1537
VideoDSPContext vdsp
Definition: mpegvideo.h:236
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
#define VE
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
Definition: avcodec.h:1087
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mpegvideo.c:2260
int avg_bitrate
Average bitrate of the stream, in bits per second.
Definition: avcodec.h:472
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
Definition: motion_est.c:1598
int resync_mb_y
y position of last resync marker
Definition: mpegvideo.h:357
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:561
int16_t(* block)[64]
points to one of the following blocks
Definition: mpegvideo.h:508
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
Definition: mjpegenc.c:283
int64_t bit_rate
wanted bit rate
Definition: mpegvideo.h:103
This side data corresponds to the AVCPBProperties struct.
Definition: packet.h:145
PutBitContext tex_pb
used for data partitioned VOPs
Definition: mpegvideo.h:408
Picture next_picture
copy of the next picture structure.
Definition: mpegvideo.h:168
attribute_deprecated int p_count
Definition: avcodec.h:1529
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:378
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
Definition: avcodec.h:1502
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
Definition: vf_owdenoise.c:72
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
Definition: avutil.h:227
#define EDGE_BOTTOM
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Definition: motion_est.c:1699
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
Definition: mpegvideo.h:138
static const struct twinvq_data tab
unsigned int byte_buffer_size
Definition: internal.h:150
atomic_int error_count
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
Definition: rv20enc.c:35
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: packet.h:354
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
Definition: mpegvideo.c:644
int height
Definition: frame.h:358
void ff_fix_long_p_mvs(MpegEncContext *s, int type)
Definition: motion_est.c:1650
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
Definition: mpegvideo.h:300
#define LOCAL_ALIGNED_16(t, v,...)
Definition: internal.h:131
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
#define av_freep(p)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
Definition: mpegvideo.h:90
#define av_always_inline
Definition: attributes.h:45
#define M_PI
Definition: mathematics.h:52
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
int rtp_payload_size
Definition: mpegvideo.h:498
#define AV_CODEC_FLAG_CLOSED_GOP
Definition: avcodec.h:343
Floating point AAN DCT
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
Definition: mpegvideo_enc.c:82
int inter_quant_bias
bias for the quantizer
Definition: mpegvideo.h:307
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
Definition: qpeldsp.c:783
#define CANDIDATE_MB_TYPE_BACKWARD
Definition: mpegutils.h:111
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
Definition: avpacket.c:332
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
Definition: mpegvideo.h:267
int b_code
backward MV resolution for B-frames (MPEG-4)
Definition: mpegvideo.h:239
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
Definition: msmpeg4enc.c:376
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
Definition: avcodec.h:1825
void ff_h261_encode_init(MpegEncContext *s)
Definition: h261enc.c:365
int dct_count[2]
Definition: mpegvideo.h:333
int64_t mb_var_sum
sum of MB variance for current frame
Definition: mpegpicture.h:81
static int encode_frame(AVCodecContext *c, AVFrame *frame)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
Definition: mpegvideo.h:523
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
This structure stores compressed data.
Definition: packet.h:332
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
Definition: internal.h:149
int delay
Codec delay.
Definition: avcodec.h:682
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:1589
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: packet.h:348
int ff_check_alignment(void)
Definition: me_cmp.c:1014
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:659
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
Definition: internal.h:149
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
Definition: utils.c:1850
me_cmp_func ildct_cmp[6]
Definition: me_cmp.h:75
#define FFMAX3(a, b, c)
Definition: common.h:95
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
Predicted.
Definition: avutil.h:275
unsigned int lambda
Lagrange multiplier used in rate distortion.
Definition: mpegvideo.h:206
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
Definition: avcodec.h:1404
uint16_t pb_time
time distance between the last b and p,s,i frame
Definition: mpegvideo.h:393
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
enum idct_permutation_type perm_type
Definition: idctdsp.h:97
attribute_deprecated int pre_me
Definition: avcodec.h:966
HpelDSPContext hdsp
Definition: mpegvideo.h:229
static const uint8_t sp5x_quant_table[20][64]
Definition: sp5x.h:135
int next_lambda
next lambda used for retrying to encode a frame
Definition: mpegvideo.h:340