71 #define QUANT_BIAS_SHIFT 8 73 #define QMAT_SHIFT_MMX 16 92 const uint16_t *quant_matrix,
93 int bias,
int qmin,
int qmax,
int intra)
99 for (qscale = qmin; qscale <= qmax; qscale++) {
104 else qscale2 = qscale << 1;
111 for (i = 0; i < 64; i++) {
113 int64_t den = (int64_t) qscale2 * quant_matrix[j];
123 for (i = 0; i < 64; i++) {
125 int64_t den =
ff_aanscales[i] * (int64_t) qscale2 * quant_matrix[j];
135 for (i = 0; i < 64; i++) {
137 int64_t den = (int64_t) qscale2 * quant_matrix[j];
148 if (
qmat16[qscale][0][i] == 0 ||
149 qmat16[qscale][0][i] == 128 * 256)
157 for (i = intra; i < 64; i++) {
162 while (((max * qmat[qscale][i]) >> shift) > INT_MAX) {
169 "Warning, QMAT_SHIFT is larger than %d, overflows possible\n",
178 int bestdiff=INT_MAX;
186 if (diff < bestdiff) {
208 for (i = 0; i < 64; i++) {
223 for (i = 0; i < s->
mb_num; i++) {
234 #define COPY(a) dst->a= src->a 259 for (i = -16; i < 16; i++) {
292 int i, ret, format_supported;
301 "only YUV420 and YUV422 are supported\n");
307 format_supported = 0;
316 format_supported = 1;
322 format_supported = 1;
324 if (!format_supported) {
354 #if FF_API_PRIVATE_OPT 371 "keyframe interval too large!, reducing it from %d to %d\n",
397 "intra dc precision must be positive, note some applications use" 398 " 0 and some 8 as base meaning 8bit, the value must not be smaller than that\n");
459 av_log(avctx,
AV_LOG_ERROR,
"Either both buffer size and max rate or neither must be specified\n");
465 "Warning min_rate > 0 but min_rate != max_rate isn't recommended!\n");
482 "impossible bitrate constraints, this will fail\n");
507 "Warning vbv_delay will be set to 0xFFFF (=VBR) as the " 508 "specified vbv buffer is too large for the given bitrate!\n");
520 "OBMC is only supported with simple mb decision\n");
538 "max b frames must be 0 or positive for mpegvideo based encoders\n");
548 "Invalid pixel aspect ratio %i/%i, limit is 255/255 reducing\n",
556 (avctx->
width > 2048 ||
563 ((avctx->
width &3) ||
570 (avctx->
width > 4095 ||
577 (avctx->
width > 16383 ||
578 avctx->
height > 16383 )) {
579 av_log(avctx,
AV_LOG_ERROR,
"MPEG-2 does not support resolutions above 16383x16383\n");
610 #if FF_API_PRIVATE_OPT 621 "mpeg2 style quantization not supported by codec\n");
641 "QP RD is no longer compatible with MJPEG or AMV\n");
645 #if FF_API_PRIVATE_OPT 655 "closed gop with scene change detection are not supported yet, " 656 "set threshold to 1000000000\n");
664 "low delay forcing is only available for mpeg2, " 665 "set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
670 "B-frames cannot be used with low delay\n");
676 if (avctx->
qmax > 28) {
678 "non linear quant only supports qmax <= 28 currently\n");
696 "multi threaded encoding not supported by codec\n");
702 "automatic thread number detection not supported by codec, " 712 #if FF_API_PRIVATE_OPT 723 "notice: b_frame_strategy only affects the first pass\n");
746 av_log(avctx,
AV_LOG_ERROR,
"qmin and or qmax are invalid, they must be 0 < min <= max\n");
755 "timebase %d/%d not supported by MPEG 4 standard, " 756 "the maximum admitted value for the timebase denominator " 790 "The specified picture size of %dx%d is not valid for the " 791 "H.261 codec.\nValid sizes are 176x144, 352x288\n",
806 "The specified picture size of %dx%d is not valid for " 807 "the H.263 codec.\nValid sizes are 128x96, 176x144, " 808 "352x288, 704x576, and 1408x1152. " 899 #if FF_API_PRIVATE_OPT 947 2 * 64 *
sizeof(uint16_t),
fail);
964 #if FF_API_PRIVATE_OPT 992 for (i = 0; i < 64; i++) {
1033 "Xvid ratecontrol requires libavcodec compiled with Xvid support.\n");
1039 #if FF_API_PRIVATE_OPT 1126 for (y = 0; y < 16; y++) {
1127 for (x = 0; x < 16; x++) {
1128 acc +=
FFABS(src[x + y * stride] - ref);
1144 for (y = 0; y <
h; y += 16) {
1145 for (x = 0; x <
w; x += 16) {
1150 int sae =
get_sae(src + offset, mean, stride);
1152 acc += sae + 500 < sad;
1170 int i, display_picture_number = 0, ret;
1173 int flush_offset = 1;
1186 "Invalid pts (%"PRId64
") <= last (%"PRId64
")\n",
1191 if (!s->
low_delay && display_picture_number == 1)
1200 "Warning: AVFrame.pts=? trying to guess (%"PRId64
")\n",
1203 pts = display_picture_number;
1207 if (!pic_arg->
buf[0] ||
1243 int h_chroma_shift, v_chroma_shift;
1248 for (i = 0; i < 3; i++) {
1249 int src_stride = pic_arg->
linesize[i];
1251 int h_shift = i ? h_chroma_shift : 0;
1252 int v_shift = i ? v_chroma_shift : 0;
1253 int w = s->
width >> h_shift;
1267 if (src_stride == dst_stride)
1268 memcpy(dst, src, src_stride * h);
1273 memcpy(dst2, src, w);
1298 for (flush_offset = 0; flush_offset < encoding_delay + 1; flush_offset++)
1302 if (flush_offset <= 1)
1305 encoding_delay = encoding_delay - flush_offset + 1;
1321 int64_t score64 = 0;
1323 for (plane = 0; plane < 3; plane++) {
1325 const int bw = plane ? 1 : 2;
1326 for (y = 0; y < s->
mb_height * bw; y++) {
1327 for (x = 0; x < s->
mb_width * bw; x++) {
1328 int off = p->
shared ? 0 : 16;
1334 case 0: score =
FFMAX(score, v);
break;
1335 case 1: score +=
FFABS(v);
break;
1336 case 2: score64 += v * (int64_t)v;
break;
1337 case 3: score64 +=
FFABS(v * (int64_t)v * v);
break;
1338 case 4: score64 += (v * (int64_t)v) * (v * (int64_t)v);
break;
1389 int64_t best_rd = INT64_MAX;
1390 int best_b_count = -1;
1401 b_lambda = p_lambda;
1411 pre_input = *pre_input_ptr;
1412 memcpy(data, pre_input_ptr->
f->
data,
sizeof(data));
1414 if (!pre_input.
shared && i) {
1429 width >> 1, height >> 1);
1434 width >> 1, height >> 1);
1513 return best_b_count;
1586 b_frames =
FFMAX(0, i - 1);
1589 for (i = 0; i < b_frames + 1; i++) {
1600 for (i = b_frames - 1; i >= 0; i--) {
1608 "warning, too many B-frames in a row\n");
1631 for (i = 0; i < b_frames; i++) {
1678 for (i = 0; i < 4; i++) {
1728 #if FF_API_CODED_FRAME 1734 #if FF_API_ERROR_FRAME 1746 for (intra = 0; intra < 2; intra++) {
1748 for (i = 0; i < 64; i++) {
1754 for (i = 0; i < 64; i++) {
1805 for (i = 0; i < 4; i++) {
1836 const AVFrame *pic_arg,
int *got_packet)
1839 int i, stuffing_count, ret;
1868 for (i = 0; i < context_count; i++) {
1885 if (growing_buffer) {
1893 #if FF_API_STAT_BITS 1942 for (i = 0; i < context_count; i++) {
1957 for (i = 0; i < 4; i++) {
1975 if (stuffing_count) {
1977 stuffing_count + 50) {
1985 while (stuffing_count--) {
1992 stuffing_count -= 4;
1993 while (stuffing_count--) {
2022 "Internal error, negative bits\n");
2030 vbv_delay =
FFMAX(vbv_delay, min_delay);
2052 #if FF_API_VBV_DELAY 2059 #if FF_API_STAT_BITS 2092 *got_packet = !!pkt->
size;
2097 int n,
int threshold)
2099 static const char tab[64] = {
2100 3, 2, 2, 1, 1, 1, 1, 1,
2101 1, 1, 1, 1, 1, 1, 1, 1,
2102 1, 1, 1, 1, 1, 1, 1, 1,
2103 0, 0, 0, 0, 0, 0, 0, 0,
2104 0, 0, 0, 0, 0, 0, 0, 0,
2105 0, 0, 0, 0, 0, 0, 0, 0,
2106 0, 0, 0, 0, 0, 0, 0, 0,
2107 0, 0, 0, 0, 0, 0, 0, 0
2116 if (threshold < 0) {
2118 threshold = -threshold;
2123 if (last_index <= skip_dc - 1)
2126 for (i = 0; i <= last_index; i++) {
2130 if (skip_dc && i == 0)
2134 }
else if (level > 1) {
2140 if (score >= threshold)
2142 for (i = skip_dc; i <= last_index; i++) {
2165 for (; i <= last_index; i++) {
2167 int level = block[j];
2169 if (level > maxlevel) {
2172 }
else if (level < minlevel) {
2182 "warning, clipping %d dct coefficients to %d..%d\n",
2183 overflow, minlevel, maxlevel);
2190 for (y = 0; y < 8; y++) {
2191 for (x = 0; x < 8; x++) {
2197 for (y2 =
FFMAX(y - 1, 0); y2 <
FFMIN(8, y + 2); y2++) {
2198 for (x2=
FFMAX(x - 1, 0); x2 <
FFMIN(8, x + 2); x2++) {
2199 int v = ptr[x2 + y2 *
stride];
2205 weight[x + 8 * y]= (36 *
ff_sqrt(count * sqr - sum * sum)) / count;
2211 int motion_x,
int motion_y,
2212 int mb_block_height,
2217 int16_t orig[12][64];
2224 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
2225 ptrdiff_t wrap_y, wrap_c;
2227 for (i = 0; i < mb_block_count; i++)
2231 const int last_qp = s->
qscale;
2232 const int mb_xy = mb_x + mb_y * s->
mb_stride;
2263 (mb_y * 16 * wrap_y) + mb_x * 16;
2265 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2267 (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
2275 16, 16, mb_x * 16, mb_y * 16,
2280 mb_block_width, mb_block_height,
2281 mb_x * mb_block_width, mb_y * mb_block_height,
2283 ptr_cb = ebuf + 16 * wrap_y;
2286 mb_block_width, mb_block_height,
2287 mb_x * mb_block_width, mb_y * mb_block_height,
2289 ptr_cr = ebuf + 16 * wrap_y + 16;
2294 int progressive_score, interlaced_score;
2299 NULL, wrap_y, 8) - 400;
2301 if (progressive_score > 0) {
2303 NULL, wrap_y * 2, 8) +
2305 NULL, wrap_y * 2, 8);
2306 if (progressive_score > interlaced_score) {
2309 dct_offset = wrap_y;
2310 uv_dct_offset = wrap_c;
2345 uint8_t *dest_y, *dest_cb, *dest_cr;
2347 dest_y = s->
dest[0];
2348 dest_cb = s->
dest[1];
2349 dest_cr = s->
dest[2];
2373 int progressive_score, interlaced_score;
2376 progressive_score = s->
mecc.
ildct_cmp[0](
s, dest_y, ptr_y, wrap_y, 8) +
2382 progressive_score -= 400;
2384 if (progressive_score > 0) {
2391 if (progressive_score > interlaced_score) {
2394 dct_offset = wrap_y;
2395 uv_dct_offset = wrap_c;
2406 dest_y + dct_offset, wrap_y);
2408 dest_y + dct_offset + 8, wrap_y);
2418 dest_cb + uv_dct_offset, wrap_c);
2420 dest_cr + uv_dct_offset, wrap_c);
2431 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset, dest_y + dct_offset,
2432 wrap_y, 8) < 20 * s->
qscale)
2434 if (s->
mecc.
sad[1](
NULL, ptr_y + dct_offset + 8, dest_y + dct_offset + 8,
2435 wrap_y, 8) < 20 * s->
qscale)
2443 dest_cb + uv_dct_offset,
2444 wrap_c, 8) < 20 * s->
qscale)
2447 dest_cr + uv_dct_offset,
2448 wrap_c, 8) < 20 * s->
qscale)
2475 memcpy(orig[0], s->
block[0],
sizeof(int16_t) * 64 * mb_block_count);
2481 for (i = 0; i < mb_block_count; i++) {
2496 for (i = 0; i < mb_block_count; i++) {
2506 for (i = 0; i < 4; i++)
2509 for (i = 4; i < mb_block_count; i++)
2513 for (i = 0; i < mb_block_count; i++) {
2526 for (i=6; i<12; i++) {
2535 for (i = 0; i < mb_block_count; i++) {
2538 for (j = 63; j > 0; j--) {
2628 memcpy(d->
mv, s->
mv, 2*4*2*
sizeof(
int));
2666 int *dmin,
int *next_block,
int motion_x,
int motion_y)
2674 s->
pb= pb[*next_block];
2676 s->
pb2 = pb2 [*next_block];
2677 s->
tex_pb= tex_pb[*next_block];
2681 memcpy(dest_backup, s->
dest,
sizeof(s->
dest));
2704 memcpy(s->
dest, dest_backup,
sizeof(s->
dest));
2722 else if(w==8 && h==8)
2812 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
2820 (((unsigned) sum * sum) >> 8) + 500 + 128) >> 8;
2857 bytestream_put_le32(&ptr, offset);
2858 bytestream_put_byte(&ptr, s->
qscale);
2859 bytestream_put_byte(&ptr, gobn);
2860 bytestream_put_le16(&ptr, mba);
2861 bytestream_put_byte(&ptr, pred_x);
2862 bytestream_put_byte(&ptr, pred_y);
2864 bytestream_put_byte(&ptr, 0);
2865 bytestream_put_byte(&ptr, 0);
2900 int new_buffer_size = 0;
2997 for(mb_x=0; mb_x < s->
mb_width; mb_x++) {
3031 int current_packet_size, is_gob_start;
3039 if(s->
start_mb_y == mb_y && mb_y > 0 && mb_x==0) is_gob_start=1;
3048 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
3053 if(s->
mb_x==0 && s->
mb_y!=0) is_gob_start=1;
3073 current_packet_size=0;
3079 #if FF_API_RTP_CALLBACK 3135 int pb_bits_count, pb2_bits_count, tex_pb_bits_count;
3142 backup_s.pb2= s->
pb2;
3143 backup_s.tex_pb= s->
tex_pb;
3152 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER, pb, pb2, tex_pb,
3153 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3164 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER_I, pb, pb2, tex_pb,
3165 &dmin, &next_block, 0, 0);
3173 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_SKIPPED, pb, pb2, tex_pb,
3174 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3184 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER4V, pb, pb2, tex_pb,
3185 &dmin, &next_block, 0, 0);
3193 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD, pb, pb2, tex_pb,
3194 &dmin, &next_block, s->
mv[0][0][0], s->
mv[0][0][1]);
3202 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD, pb, pb2, tex_pb,
3203 &dmin, &next_block, s->
mv[1][0][0], s->
mv[1][0][1]);
3213 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR, pb, pb2, tex_pb,
3214 &dmin, &next_block, 0, 0);
3225 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_FORWARD_I, pb, pb2, tex_pb,
3226 &dmin, &next_block, 0, 0);
3237 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BACKWARD_I, pb, pb2, tex_pb,
3238 &dmin, &next_block, 0, 0);
3244 for(dir=0; dir<2; dir++){
3251 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_BIDIR_I, pb, pb2, tex_pb,
3252 &dmin, &next_block, 0, 0);
3260 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTRA, pb, pb2, tex_pb,
3261 &dmin, &next_block, 0, 0);
3272 const int last_qp= backup_s.qscale;
3276 static const int dquant_tab[4]={-1,1,-2,2};
3285 s->
mv[0][0][0] = best_s.
mv[0][0][0];
3286 s->
mv[0][0][1] = best_s.
mv[0][0][1];
3287 s->
mv[1][0][0] = best_s.
mv[1][0][0];
3288 s->
mv[1][0][1] = best_s.
mv[1][0][1];
3291 for(; qpi<4; qpi++){
3292 int dquant= dquant_tab[qpi];
3304 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3305 &dmin, &next_block, s->
mv[mvdir][0][0], s->
mv[mvdir][0][1]);
3321 backup_s.dquant = 0;
3325 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3326 &dmin, &next_block, mx, my);
3329 backup_s.dquant = 0;
3333 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_DIRECT, pb, pb2, tex_pb,
3334 &dmin, &next_block, 0, 0);
3342 memcpy(s->
mv, best_s.
mv,
sizeof(s->
mv));
3363 encode_mb_hq(s, &backup_s, &best_s, CANDIDATE_MB_TYPE_INTER , pb, pb2, tex_pb,
3364 &dmin, &next_block, mx, my);
3382 s->
pb2= backup_s.pb2;
3386 avpriv_copy_bits(&backup_s.tex_pb, bit_buf_tex[next_block^1], tex_pb_bits_count);
3387 s->
tex_pb= backup_s.tex_pb;
3404 int motion_x = 0, motion_y = 0;
3412 motion_x= s->
mv[0][0][0] = 0;
3413 motion_y= s->
mv[0][0][1] = 0;
3500 for(dir=0; dir<2; dir++){
3562 #if FF_API_RTP_CALLBACK 3578 #define MERGE(field) dst->field += src->field; src->field=0 3605 for(i=0; i<64; i++){
3726 for(i=1; i<context_count; i++){
3757 for(i=1; i<context_count; i++){
3771 ff_dlog(s,
"Scene change detected, encoding as I Frame %"PRId64
" %"PRId64
"\n",
3815 for(dir=0; dir<2; dir++){
3866 static const uint8_t y[32]={13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13,13};
3867 static const uint8_t c[32]={14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14,14};
3937 for(i=1; i<context_count; i++){
3941 for(i=1; i<context_count; i++){
3956 for(i=0; i<64; i++){
3957 int level= block[i];
3963 if(level<0) level=0;
3967 if(level>0) level=0;
3976 int qscale,
int *overflow){
3978 const uint16_t *matrix;
3980 const uint8_t *perm_scantable;
3982 unsigned int threshold1, threshold2;
3994 int coeff_count[64];
3995 int qmul, qadd, start_i, last_non_zero, i,
dc;
4007 qadd= ((qscale-1)|1)*8;
4010 else mpeg2_qscale = qscale << 1;
4029 block[0] = (block[0] + (q >> 1)) / q;
4057 threshold2= (threshold1<<1);
4059 for(i=63; i>=start_i; i--) {
4060 const int j = scantable[i];
4061 int level = block[j] * qmat[j];
4063 if(((
unsigned)(level+threshold1))>threshold2){
4069 for(i=start_i; i<=last_non_zero; i++) {
4070 const int j = scantable[i];
4071 int level = block[j] * qmat[j];
4075 if(((
unsigned)(level+threshold1))>threshold2){
4079 coeff[1][i]= level-1;
4083 coeff[0][i]= -
level;
4084 coeff[1][i]= -level+1;
4087 coeff_count[i]=
FFMIN(level, 2);
4091 coeff[0][i]= (level>>31)|1;
4098 if(last_non_zero < start_i){
4099 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4100 return last_non_zero;
4103 score_tab[start_i]= 0;
4104 survivor[0]= start_i;
4107 for(i=start_i; i<=last_non_zero; i++){
4108 int level_index, j, zero_distortion;
4109 int dct_coeff=
FFABS(block[ scantable[i] ]);
4110 int best_score=256*256*256*120;
4114 zero_distortion= dct_coeff*dct_coeff;
4116 for(level_index=0; level_index < coeff_count[i]; level_index++){
4118 int level= coeff[level_index][i];
4119 const int alevel=
FFABS(level);
4125 unquant_coeff= alevel*qmul + qadd;
4128 unquant_coeff = alevel * matrix[j] * 8;
4132 unquant_coeff = (
int)( alevel * mpeg2_qscale * matrix[j]) >> 4;
4133 unquant_coeff = (unquant_coeff - 1) | 1;
4135 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[j])) >> 5;
4136 unquant_coeff = (unquant_coeff - 1) | 1;
4141 distortion= (unquant_coeff - dct_coeff) * (unquant_coeff - dct_coeff) - zero_distortion;
4143 if((level&(~127)) == 0){
4144 for(j=survivor_count-1; j>=0; j--){
4145 int run= i - survivor[j];
4147 score += score_tab[i-
run];
4149 if(score < best_score){
4152 level_tab[i+1]= level-64;
4157 for(j=survivor_count-1; j>=0; j--){
4158 int run= i - survivor[j];
4160 score += score_tab[i-
run];
4161 if(score < last_score){
4164 last_level= level-64;
4170 distortion += esc_length*
lambda;
4171 for(j=survivor_count-1; j>=0; j--){
4172 int run= i - survivor[j];
4173 int score= distortion + score_tab[i-
run];
4175 if(score < best_score){
4178 level_tab[i+1]= level-64;
4183 for(j=survivor_count-1; j>=0; j--){
4184 int run= i - survivor[j];
4185 int score= distortion + score_tab[i-
run];
4186 if(score < last_score){
4189 last_level= level-64;
4197 score_tab[i+1]= best_score;
4200 if(last_non_zero <= 27){
4201 for(; survivor_count; survivor_count--){
4202 if(score_tab[ survivor[survivor_count-1] ] <= best_score)
4206 for(; survivor_count; survivor_count--){
4207 if(score_tab[ survivor[survivor_count-1] ] <= best_score + lambda)
4212 survivor[ survivor_count++ ]= i+1;
4216 last_score= 256*256*256*120;
4217 for(i= survivor[0]; i<=last_non_zero + 1; i++){
4218 int score= score_tab[i];
4220 score += lambda * 2;
4222 if(score < last_score){
4225 last_level= level_tab[i];
4226 last_run= run_tab[i];
4233 dc=
FFABS(block[0]);
4234 last_non_zero= last_i - 1;
4235 memset(block + start_i, 0, (64-start_i)*
sizeof(int16_t));
4237 if(last_non_zero < start_i)
4238 return last_non_zero;
4240 if(last_non_zero == 0 && start_i == 0){
4242 int best_score= dc *
dc;
4244 for(i=0; i<coeff_count[0]; i++){
4245 int level= coeff[i][0];
4246 int alevel=
FFABS(level);
4247 int unquant_coeff, score, distortion;
4250 unquant_coeff= (alevel*qmul + qadd)>>3;
4252 unquant_coeff = ((( alevel << 1) + 1) * mpeg2_qscale * ((int) matrix[0])) >> 5;
4253 unquant_coeff = (unquant_coeff - 1) | 1;
4255 unquant_coeff = (unquant_coeff + 4) >> 3;
4256 unquant_coeff<<= 3 + 3;
4258 distortion= (unquant_coeff -
dc) * (unquant_coeff - dc);
4261 else score= distortion + esc_length*
lambda;
4263 if(score < best_score){
4265 best_level= level - 64;
4268 block[0]= best_level;
4270 if(best_level == 0)
return -1;
4271 else return last_non_zero;
4277 block[ perm_scantable[last_non_zero] ]= last_level;
4280 for(; i>start_i; i -= run_tab[i] + 1){
4281 block[ perm_scantable[i-1] ]= level_tab[i];
4284 return last_non_zero;
4299 int perm_index= perm[
index];
4300 if(i==0) s*= sqrt(0.5);
4301 if(j==0) s*= sqrt(0.5);
4302 basis[perm_index][8*x + y]=
lrintf(s * cos((
M_PI/8.0)*i*(x+0.5)) * cos((
M_PI/8.0)*j*(y+0.5)));
4315 const uint8_t *perm_scantable;
4321 int qmul, qadd, start_i, last_non_zero, i,
dc;
4325 int rle_index,
run, q = 1, sum;
4328 static int after_last=0;
4329 static int to_zero=0;
4330 static int from_zero=0;
4333 static int messed_sign=0;
4336 if(basis[0][0] == 0)
4382 for(i=0; i<64; i++){
4389 for(i=0; i<64; i++){
4394 w=
FFABS(weight[i]) + qns*one;
4395 w= 15 + (48*qns*one + w/2)/w;
4410 for(i=start_i; i<=last_non_zero; i++){
4411 int j= perm_scantable[i];
4412 const int level= block[j];
4416 if(level<0) coeff= qmul*level - qadd;
4417 else coeff= qmul*level + qadd;
4418 run_tab[rle_index++]=
run;
4427 if(last_non_zero>0){
4438 int run2, best_unquant_change=0, analyze_gradient;
4444 if(analyze_gradient){
4448 for(i=0; i<64; i++){
4464 const int level= block[0];
4465 int change, old_coeff;
4471 for(change=-1; change<=1; change+=2){
4472 int new_level= level + change;
4473 int score, new_coeff;
4475 new_coeff= q*new_level;
4476 if(new_coeff >= 2048 || new_coeff < 0)
4480 new_coeff - old_coeff);
4481 if(score<best_score){
4484 best_change= change;
4485 best_unquant_change= new_coeff - old_coeff;
4492 run2= run_tab[rle_index++];
4496 for(i=start_i; i<64; i++){
4497 int j= perm_scantable[i];
4498 const int level= block[j];
4499 int change, old_coeff;
4505 if(level<0) old_coeff= qmul*level - qadd;
4506 else old_coeff= qmul*level + qadd;
4507 run2= run_tab[rle_index++];
4514 for(change=-1; change<=1; change+=2){
4515 int new_level= level + change;
4516 int score, new_coeff, unquant_change;
4523 if(new_level<0) new_coeff= qmul*new_level - qadd;
4524 else new_coeff= qmul*new_level + qadd;
4525 if(new_coeff >= 2048 || new_coeff <= -2048)
4530 if(level < 63 && level > -63){
4531 if(i < last_non_zero)
4541 if(analyze_gradient){
4542 int g= d1[ scantable[i] ];
4543 if(g && (g^new_level) >= 0)
4547 if(i < last_non_zero){
4548 int next_i= i + run2 + 1;
4549 int next_level= block[ perm_scantable[next_i] ] + 64;
4551 if(next_level&(~127))
4554 if(next_i < last_non_zero)
4574 if(i < last_non_zero){
4575 int next_i= i + run2 + 1;
4576 int next_level= block[ perm_scantable[next_i] ] + 64;
4578 if(next_level&(~127))
4581 if(next_i < last_non_zero)
4600 unquant_change= new_coeff - old_coeff;
4601 av_assert2((score < 100*lambda && score > -100*lambda) || lambda==0);
4605 if(score<best_score){
4608 best_change= change;
4609 best_unquant_change= unquant_change;
4613 prev_level= level + 64;
4614 if(prev_level&(~127))
4627 int j= perm_scantable[ best_coeff ];
4629 block[j] += best_change;
4631 if(best_coeff > last_non_zero){
4632 last_non_zero= best_coeff;
4640 if(block[j] - best_change){
4641 if(
FFABS(block[j]) >
FFABS(block[j] - best_change)){
4653 for(; last_non_zero>=start_i; last_non_zero--){
4654 if(block[perm_scantable[last_non_zero]])
4660 if(256*256*256*64 % count == 0){
4661 av_log(s->
avctx,
AV_LOG_DEBUG,
"after_last:%d to_zero:%d from_zero:%d raise:%d lower:%d sign:%d xyp:%d/%d/%d\n", after_last, to_zero, from_zero,
raise, lower, messed_sign, s->
mb_x, s->
mb_y, s->
picture_number);
4666 for(i=start_i; i<=last_non_zero; i++){
4667 int j= perm_scantable[i];
4668 const int level= block[j];
4671 run_tab[rle_index++]=
run;
4684 if(last_non_zero>0){
4690 return last_non_zero;
4705 const uint8_t *scantable,
int last)
4716 for (i = 0; i <= last; i++) {
4717 const int j = scantable[i];
4722 for (i = 0; i <= last; i++) {
4723 const int j = scantable[i];
4724 const int perm_j = permutation[j];
4725 block[perm_j] = temp[j];
4731 int qscale,
int *overflow)
4733 int i, j,
level, last_non_zero, q, start_i;
4738 unsigned int threshold1, threshold2;
4758 block[0] = (block[0] + (q >> 1)) / q;
4771 threshold2= (threshold1<<1);
4772 for(i=63;i>=start_i;i--) {
4774 level = block[j] * qmat[j];
4776 if(((
unsigned)(level+threshold1))>threshold2){
4783 for(i=start_i; i<=last_non_zero; i++) {
4785 level = block[j] * qmat[j];
4789 if(((
unsigned)(level+threshold1))>threshold2){
4807 scantable, last_non_zero);
4809 return last_non_zero;
4812 #define OFFSET(x) offsetof(MpegEncContext, x) 4813 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM 4816 {
"mb_info",
"emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size",
OFFSET(
mb_info),
AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX,
VE },
4878 .
name =
"msmpeg4v2",
const uint16_t ff_mpeg1_default_non_intra_matrix[64]
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
static const AVClass wmv1_class
void ff_h261_reorder_mb_index(MpegEncContext *s)
int chroma_elim_threshold
void ff_jpeg_fdct_islow_10(int16_t *data)
static const AVOption h263_options[]
int frame_bits
bits used for the current frame
av_cold int ff_dct_encode_init(MpegEncContext *s)
RateControlContext rc_context
contains stuff only accessed in ratecontrol.c
const struct AVCodec * codec
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
av_cold void ff_rate_control_uninit(MpegEncContext *s)
#define FF_MPV_FLAG_STRICT_GOP
void ff_init_block_index(MpegEncContext *s)
void ff_estimate_b_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
qpel_mc_func avg_qpel_pixels_tab[2][16]
me_cmp_func frame_skip_cmp[6]
#define CANDIDATE_MB_TYPE_SKIPPED
static int shift(int a, int b)
#define CONFIG_WMV2_ENCODER
void ff_mpeg1_encode_init(MpegEncContext *s)
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number)
This structure describes decoded (raw) audio or video data.
AVCodec * avcodec_find_encoder(enum AVCodecID id)
Find a registered encoder with a matching codec ID.
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
void ff_fdct_ifast(int16_t *data)
#define FF_MB_DECISION_BITS
chooses the one which needs the fewest bits
int ff_side_data_set_encoder_stats(AVPacket *pkt, int quality, int64_t *error, int error_count, int pict_type)
uint8_t * fcode_tab
smallest fcode needed for each MV
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
void ff_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * mb_mean
Table for MB luminance.
uint64_t error[AV_NUM_DATA_POINTERS]
error
#define AV_CODEC_FLAG_INTERLACED_DCT
Use interlaced DCT.
int last_mv[2][2][2]
last MV, used for MV prediction in MPEG-1 & B-frame MPEG-4
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int pre_pass
= 1 for the pre pass
#define CONFIG_RV10_ENCODER
#define AV_CODEC_FLAG_LOOP_FILTER
loop filter.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define FF_MPV_FLAG_SKIP_RD
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
AVFrame * tmp_frames[MAX_B_FRAMES+2]
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
#define AV_LOG_WARNING
Something somehow does not look correct.
qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16]
void(* shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height)
#define CANDIDATE_MB_TYPE_INTER_I
int64_t bit_rate
the average bitrate
#define LIBAVUTIL_VERSION_INT
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
void ff_h263_encode_init(MpegEncContext *s)
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
static av_cold int init(AVCodecContext *avctx)
void ff_init_qscale_tab(MpegEncContext *s)
init s->current_picture.qscale_table from s->lambda_table
uint16_t * mb_var
Table for MB variances.
uint16_t(* q_chroma_intra_matrix16)[2][64]
uint16_t chroma_intra_matrix[64]
static int estimate_qp(MpegEncContext *s, int dry_run)
int max_bitrate
Maximum bitrate of the stream, in bits per second.
int max_b_frames
maximum number of B-frames between non-B-frames Note: The output will be delayed by max_b_frames+1 re...
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
Read encoded data from the encoder.
attribute_deprecated int frame_skip_cmp
#define FF_MPV_COMMON_OPTS
enum AVColorRange color_range
MPEG vs JPEG YUV range.
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
#define CANDIDATE_MB_TYPE_BIDIR
av_cold void ff_h263dsp_init(H263DSPContext *ctx)
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
void ff_get_2pass_fcode(MpegEncContext *s)
const char * av_default_item_name(void *ptr)
Return the context name.
#define CONFIG_MJPEG_ENCODER
void avpriv_copy_bits(PutBitContext *pb, const uint8_t *src, int length)
Copy the content of src to the bitstream.
int obmc
overlapped block motion compensation
void avpriv_align_put_bits(PutBitContext *s)
Pad the bitstream with zeros up to the next byte boundary.
void ff_mpeg1_clean_buffers(MpegEncContext *s)
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
int ff_h261_get_picture_format(int width, int height)
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
static int select_input_picture(MpegEncContext *s)
static const AVClass msmpeg4v3_class
int min_qcoeff
minimum encodable coefficient
static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride)
void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size)
Same behaviour av_fast_malloc but the buffer has additional AV_INPUT_BUFFER_PADDING_SIZE at the end w...
int ildct_cmp
interlaced DCT comparison function
const uint16_t ff_h263_format[8][2]
av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
int mpv_flags
flags set by private options
static const AVClass h263_class
void ff_xvid_rate_control_uninit(struct MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
uint8_t * intra_ac_vlc_length
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
#define UNI_AC_ENC_INDEX(run, level)
int mb_num
number of MBs of a picture
av_cold void ff_fdctdsp_init(FDCTDSPContext *c, AVCodecContext *avctx)
static void clip_coeffs(MpegEncContext *s, int16_t *block, int last_index)
An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of structures with info about macroblo...
static void write_mb_info(MpegEncContext *s)
int time_base
time in seconds of last I,P,S Frame
uint8_t(* mv_penalty)[MAX_DMV *2+1]
bit amount needed to encode a MV
int h263_aic
Advanced INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
int16_t * ff_h263_pred_motion(MpegEncContext *s, int block, int dir, int *px, int *py)
int min_bitrate
Minimum bitrate of the stream, in bits per second.
int encoding
true if we are encoding (vs decoding)
attribute_deprecated int frame_skip_exp
#define CONFIG_RV20_ENCODER
void ff_mpeg4_merge_partitions(MpegEncContext *s)
static int mb_var_thread(AVCodecContext *c, void *arg)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
common functions for use with the Xvid wrappers
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
float ff_xvid_rate_estimate_qscale(struct MpegEncContext *s, int dry_run)
static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
#define FF_MPV_FLAG_CBP_RD
int skipdct
skip dct and code zero residual
const uint8_t ff_mpeg2_non_linear_qscale[32]
uint64_t vbv_delay
The delay between the time the packet this structure is associated with is received and the time when...
void ff_mpeg4_clean_buffers(MpegEncContext *s)
void(* draw_edges)(uint8_t *buf, int wrap, int width, int height, int w, int h, int sides)
attribute_deprecated int mv_bits
const char * class_name
The name of the class; usually it is the same name as the context structure type to which the AVClass...
#define CANDIDATE_MB_TYPE_INTER
float p_masking
p block masking (0-> disabled)
int picture_in_gop_number
0-> first pic in gop, ...
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
int alt_inter_vlc
alternative inter vlc
void ff_mpeg1_encode_slice_header(MpegEncContext *s)
int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
int64_t time
time of current frame
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
Check AVPacket size and/or allocate data.
static int encode_picture(MpegEncContext *s, int picture_number)
av_cold void ff_mpegvideoencdsp_init(MpegvideoEncDSPContext *c, AVCodecContext *avctx)
int bit_rate_tolerance
number of bits the bitstream is allowed to diverge from the reference.
static const AVClass msmpeg4v2_class
#define MV_DIRECT
bidirectional mode where the difference equals the MV of the last P/S/I-Frame (MPEG-4) ...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
Picture ** input_picture
next pictures on display order for encoding
#define CANDIDATE_MB_TYPE_INTER4V
PutBitContext pb2
used for data partitioned VOPs
enum OutputFormat out_format
output format
attribute_deprecated int i_count
#define CANDIDATE_MB_TYPE_FORWARD_I
uint16_t(* dct_offset)[64]
void ff_dct_encode_init_x86(MpegEncContext *s)
static av_cold int end(AVCodecContext *avctx)
uint16_t * chroma_intra_matrix
custom intra quantization matrix
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
void(* diff_pixels)(int16_t *av_restrict block, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride)
void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
static void mpv_encode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for encoding.
Multithreading support functions.
const uint32_t ff_square_tab[512]
int pre_dia_size
ME prepass diamond size & shape.
static const AVOption h263p_options[]
static int get_sae(uint8_t *src, int ref, int stride)
#define AV_CODEC_FLAG_LOW_DELAY
Force low delay.
void ff_free_picture_tables(Picture *pic)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
int misc_bits
cbp, mb_type
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
#define CANDIDATE_MB_TYPE_BACKWARD_I
int(* q_chroma_intra_matrix)[64]
int me_cmp
motion estimation comparison function
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
Picture current_picture
copy of the current picture structure.
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
#define CONFIG_MPEG1VIDEO_ENCODER
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
static void dct_single_coeff_elimination(MpegEncContext *s, int n, int threshold)
const uint16_t ff_aanscales[64]
static double av_q2d(AVRational a)
Convert an AVRational to a double.
int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
#define AVERROR_EOF
End of file.
uint16_t pp_time
time distance between the last 2 p,s,i frames
#define AV_LOG_VERBOSE
Detailed information.
const uint8_t * scantable
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
static void rebase_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Rebase the bit writer onto a reallocated buffer.
int buffer_size
The size of the buffer to which the ratecontrol is applied, in bits.
int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max)
Reduce a fraction.
float lumi_masking
luminance masking (0-> disabled)
char * stats_out
pass1 encoding statistics output buffer
int max_qcoeff
maximum encodable coefficient
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
high precision timer, useful to profile code
static void update_noise_reduction(MpegEncContext *s)
#define FF_MPV_FLAG_QP_RD
int scenechange_threshold
AVCPBProperties * av_cpb_properties_alloc(size_t *size)
Allocate a CPB properties structure and initialize its fields to default values.
void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64], int motion_x, int motion_y)
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
attribute_deprecated int frame_skip_threshold
void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
int dquant
qscale difference to prev qscale
int num_entries
number of RateControlEntries
int gop_picture_number
index of the first picture of a GOP based on fake_pic_num & MPEG-1 specific
static void ff_update_block_index(MpegEncContext *s)
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
#define ROUNDED_DIV(a, b)
int(* q_inter_matrix)[64]
#define FF_COMPLIANCE_UNOFFICIAL
Allow unofficial extensions.
#define AV_PKT_FLAG_KEY
The packet contains a keyframe.
static int get_bits_diff(MpegEncContext *s)
attribute_deprecated int skip_count
int(* q_intra_matrix)[64]
precomputed matrix (combine qscale and DCT renorm)
int intra_only
if true, only intra pictures are generated
av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
int h263_plus
H.263+ headers.
int slice_context_count
number of used thread_contexts
int last_non_b_pict_type
used for MPEG-4 gmc B-frames & ratecontrol
void ff_block_permute(int16_t *block, uint8_t *permutation, const uint8_t *scantable, int last)
Permute an 8x8 block according to permutation.
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static uint8_t * put_bits_ptr(PutBitContext *s)
Return the pointer to the byte where the bitstream writer will put the next bit.
int has_b_frames
Size of the frame reordering buffer in the decoder.
int last_dc[3]
last DC values for MPEG-1
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
uint8_t * inter_ac_vlc_last_length
#define AV_CODEC_FLAG_4MV
4 MV per MB allowed / advanced prediction for H.263.
#define PTRDIFF_SPECIFIER
int mb_skipped
MUST BE SET only during DECODING.
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
int strict_std_compliance
strictly follow the std (MPEG-4, ...)
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
uint64_t encoding_error[AV_NUM_DATA_POINTERS]
#define MAX_PICTURE_COUNT
av_cold int ff_rate_control_init(MpegEncContext *s)
int me_sub_cmp
subpixel motion estimation comparison function
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
attribute_deprecated uint64_t vbv_delay
VBV delay coded in the last frame (in periods of a 27 MHz clock).
int qmax
maximum quantizer
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
static void update_mb_info(MpegEncContext *s, int startcode)
void ff_write_pass1_stats(MpegEncContext *s)
int unrestricted_mv
mv can point outside of the coded picture
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
static int sse_mb(MpegEncContext *s)
int ff_xvid_rate_control_init(struct MpegEncContext *s)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
uint8_t * intra_chroma_ac_vlc_length
int h263_slice_structured
int flags
AV_CODEC_FLAG_*.
int64_t av_gcd(int64_t a, int64_t b)
Compute the greatest common divisor of two integer operands.
MpegvideoEncDSPContext mpvencdsp
const char * name
Name of the codec implementation.
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
int me_pre
prepass for motion estimation
void ff_mjpeg_encode_picture_trailer(PutBitContext *pb, int header_bits)
int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Shrink the already allocated side data buffer.
int low_delay
no reordering needed / has no B-frames
qpel_mc_func put_qpel_pixels_tab[2][16]
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void ff_mpv_common_end(MpegEncContext *s)
av_cold void ff_pixblockdsp_init(PixblockDSPContext *c, AVCodecContext *avctx)
int(* pix_norm1)(uint8_t *pix, int line_size)
int(* pix_sum)(uint8_t *pix, int line_size)
attribute_deprecated int b_sensitivity
int flags
A combination of AV_PKT_FLAG values.
static int put_bits_count(PutBitContext *s)
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
static void frame_end(MpegEncContext *s)
int resync_mb_x
x position of last resync marker
int rc_buffer_size
decoder bitstream buffer size
void ff_clean_h263_qscales(MpegEncContext *s)
modify qscale so that encoding is actually possible in H.263 (limit difference to -2...
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
static int estimate_best_b_count(MpegEncContext *s)
int intra_dc_precision
precision of the intra DC coefficient - 8
void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int64_t rc_min_rate
minimum bitrate
common internal API header
uint8_t * intra_ac_vlc_last_length
static av_always_inline void encode_mb_internal(MpegEncContext *s, int motion_x, int motion_y, int mb_block_height, int mb_block_width, int mb_block_count)
const uint8_t *const ff_mpeg2_dc_scale_table[4]
void ff_h263_loop_filter(MpegEncContext *s)
enum AVPictureType pict_type
Picture type of the frame.
const uint8_t ff_h263_chroma_qscale_table[32]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define AV_CODEC_FLAG_QSCALE
Use fixed qscale.
int display_picture_number
picture number in display order
uint16_t(* q_inter_matrix16)[2][64]
uint8_t * vbv_delay_ptr
pointer to vbv_delay in the bitstream
int fixed_qscale
fixed qscale if non zero
void ff_clean_mpeg4_qscales(MpegEncContext *s)
modify mb_type & qscale so that encoding is actually possible in MPEG-4
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
#define AV_CODEC_FLAG_AC_PRED
H.263 advanced intra coding / MPEG-4 AC prediction.
AVCodecContext * avcodec_alloc_context3(const AVCodec *codec)
Allocate an AVCodecContext and set its fields to default values.
int umvplus
== H.263+ && unrestricted_mv
Picture new_picture
copy of the source picture structure for encoding.
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int intra_quant_bias
bias for the quantizer
int width
picture width / height.
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
attribute_deprecated int noise_reduction
float rc_max_available_vbv_use
Ratecontrol attempt to use, at maximum, of what can be used without an underflow. ...
int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
#define AV_CODEC_FLAG_PASS1
Use internal 2pass ratecontrol in first pass mode.
uint16_t(* q_intra_matrix16)[2][64]
identical to the above but for MMX & these are not permutated, second 64 entries are bias ...
attribute_deprecated int frame_skip_factor
#define FF_MB_DECISION_SIMPLE
uses mb_cmp
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
int(* ac_stats)[2][MAX_LEVEL+1][MAX_RUN+1][2]
[mb_intra][isChroma][level][run][last]
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int block_last_index[12]
last non zero coefficient in block
static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale)
uint8_t idct_permutation[64]
IDCT input permutation.
void(* fdct)(int16_t *block)
const int16_t ff_mpeg4_default_non_intra_matrix[64]
int mb_decision
macroblock decision mode
#define CONFIG_FLV_ENCODER
static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride)
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int ff_msmpeg4_encode_init(MpegEncContext *s)
int ac_esc_length
num of bits needed to encode the longest esc
preferred ID for MPEG-1/2 video decoding
static void set_put_bits_buffer_size(PutBitContext *s, int size)
Change the end of the buffer.
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
Compute and use optimal Huffman tables.
the normal 2^n-1 "JPEG" YUV ranges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
static uint8_t default_fcode_tab[MAX_MV *2+1]
#define AV_CODEC_CAP_SLICE_THREADS
Codec supports slice-based (or partition-based) multithreading.
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
attribute_deprecated int i_tex_bits
static void build_basis(uint8_t *perm)
#define MV_TYPE_16X16
1 vector for the whole mb
int first_slice_line
used in MPEG-4 too to handle resync markers
attribute_deprecated int misc_bits
This structure describes the bitrate properties of an encoded bitstream.
uint16_t * mc_mb_var
Table for motion compensated MB variances.
void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
int coded_picture_number
picture number in bitstream order
#define AV_LOG_INFO
Standard information.
uint16_t inter_matrix[64]
void ff_jpeg_fdct_islow_8(int16_t *data)
void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
struct MpegEncContext * thread_context[MAX_THREADS]
#define CONFIG_MSMPEG4_ENCODER
unsigned int lambda2
(lambda*lambda) >> FF_LAMBDA_SHIFT
void ff_faandct(int16_t *data)
double buffer_index
amount of bits in the video/audio buffer
Libavcodec external API header.
attribute_deprecated int mpeg_quant
void ff_h263_update_motion_val(MpegEncContext *s)
int h263_flv
use flv H.263 header
attribute_deprecated int scenechange_threshold
void avcodec_free_context(AVCodecContext **avctx)
Free the codec context and everything associated with it and write NULL to the provided pointer...
static const AVClass h263p_class
ptrdiff_t linesize
line size, in bytes, may be different from width
attribute_deprecated int prediction_method
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
void ff_convert_matrix(MpegEncContext *s, int(*qmat)[64], uint16_t(*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra)
const uint16_t ff_inv_aanscales[64]
attribute_deprecated int b_frame_strategy
void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
uint8_t * intra_chroma_ac_vlc_last_length
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
ScanTable intra_scantable
void(* add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale)
int qmin
minimum quantizer
int height
picture size. must be a multiple of 16
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
static void write_slice_end(MpegEncContext *s)
int64_t dts_delta
pts difference between the first and second input frame, used for calculating dts of the first frame ...
int64_t user_specified_pts
last non-zero pts from AVFrame which was passed into avcodec_encode_video2()
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static void denoise_dct_c(MpegEncContext *s, int16_t *block)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
Supply a raw video or audio frame to the encoder.
static int frame_start(MpegEncContext *s)
float spatial_cplx_masking
spatial complexity masking (0-> disabled)
attribute_deprecated int header_bits
void ff_fix_long_p_mvs(MpegEncContext *s)
Picture * picture
main picture buffer
int data_partitioning
data partitioning flag from header
uint8_t * inter_ac_vlc_length
uint16_t * intra_matrix
custom intra quantization matrix
void ff_h263_encode_gob_header(MpegEncContext *s, int mb_line)
Encode a group of blocks header.
Describe the class of an AVClass context structure.
int stuffing_bits
bits used for stuffing
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
#define FF_COMPLIANCE_NORMAL
int64_t mc_mb_var_sum
motion compensated MB variance for current frame
#define CANDIDATE_MB_TYPE_DIRECT
int(* try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale)
#define FF_MB_DECISION_RD
rate distortion
static void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type)
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, uint8_t *data, size_t size)
Wrap an existing array as a packet side data.
const uint16_t ff_mpeg1_default_intra_matrix[256]
int input_picture_number
used to set pic->display_picture_number, should not be used for/by anything else
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
int mb_info
interval for outputting info about mb offsets as side data
void ff_set_mpeg4_time(MpegEncContext *s)
static void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type)
attribute_deprecated int brd_scale
av_cold void ff_mjpeg_encode_close(MpegEncContext *s)
int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options)
Initialize the AVCodecContext to use the given AVCodec.
#define CANDIDATE_MB_TYPE_BIDIR_I
const int16_t ff_mpeg4_default_intra_matrix[64]
int(* dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
int f_code
forward MV resolution
int ff_pre_estimate_p_frame_motion(MpegEncContext *s, int mb_x, int mb_y)
#define CANDIDATE_MB_TYPE_DIRECT0
int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
int ff_mjpeg_encode_stuffing(MpegEncContext *s)
Writes the complete JPEG frame when optimal huffman tables are enabled, otherwise writes the stuffing...
void(* denoise_dct)(struct MpegEncContext *s, int16_t *block)
void ff_mjpeg_encode_picture_header(AVCodecContext *avctx, PutBitContext *pb, ScanTable *intra_scantable, int pred, uint16_t luma_intra_matrix[64], uint16_t chroma_intra_matrix[64])
attribute_deprecated int p_tex_bits
static int weight(int i, int blen, int offset)
uint16_t * inter_matrix
custom inter quantization matrix
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
static enum AVPixelFormat pix_fmts[]
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
int last_mv_dir
last mv_dir, used for B-frame encoding
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
int h263_pred
use MPEG-4/H.263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
float dark_masking
darkness masking (0-> disabled)
float temporal_cplx_masking
temporary complexity masking (0-> disabled)
void(* get_pixels)(int16_t *av_restrict block, const uint8_t *pixels, ptrdiff_t stride)
int ff_init_me(MpegEncContext *s)
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
#define AV_CODEC_FLAG_QPEL
Use qpel MC.
AAN (Arai, Agui and Nakajima) (I)DCT tables.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
int me_penalty_compensation
int64_t mc_mb_var_sum_temp
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static int estimate_motion_thread(AVCodecContext *c, void *arg)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
int gop_size
the number of pictures in a group of pictures, or 0 for intra_only
PutBitContext pb
bit output
static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
#define CONFIG_MPEG4_ENCODER
#define CONFIG_MPEG2VIDEO_ENCODER
static void update_qscale(MpegEncContext *s)
int mb_cmp
macroblock comparison function (not supported yet)
int quantizer_noise_shaping
int(* fast_dct_quantize)(struct MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
void ff_msmpeg4_encode_ext_header(MpegEncContext *s)
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg)
#define FF_DISABLE_DEPRECATION_WARNINGS
static const int32_t qmat16[MAT_SIZE]
common internal api header.
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
static int ref[MAX_W *MAX_W]
#define CANDIDATE_MB_TYPE_FORWARD
attribute_deprecated int rtp_payload_size
#define CONFIG_H261_ENCODER
void ff_h263_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int adaptive_quant
use adaptive quantization
static int16_t basis[64][64]
Picture last_picture
copy of the previous picture structure.
Picture * last_picture_ptr
pointer to the previous picture.
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
int64_t reordered_pts
reordered pts to be used as dts for the next output frame when there's a delay
attribute_deprecated AVFrame * coded_frame
the picture in the bitstream
int ff_vbv_update(MpegEncContext *s, int frame_size)
#define H263_GOB_HEIGHT(h)
void av_init_packet(AVPacket *pkt)
Initialize optional fields of a packet with default values.
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
#define CONFIG_H263_ENCODER
#define CONFIG_H263P_ENCODER
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
AVCodec ff_msmpeg4v3_encoder
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
int trellis
trellis RD quantization
AVCPBProperties * ff_add_cpb_side_data(AVCodecContext *avctx)
Add a CPB properties side data to an encoding context.
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
#define AV_CODEC_FLAG_PASS2
Use internal 2pass ratecontrol in second pass mode.
void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
void ff_mpeg4_stuffing(PutBitContext *pbc)
add MPEG-4 stuffing bits (01...1)
#define CANDIDATE_MB_TYPE_INTRA
int16_t(* blocks)[12][64]
int slices
Number of slices.
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, const AVFrame *pic_arg, int *got_packet)
const AVOption ff_mpv_generic_options[]
int last_bits
temp var used for calculating the above vars
void ff_mpeg4_init_partitions(MpegEncContext *s)
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
static av_always_inline int diff(const uint32_t a, const uint32_t b)
int dia_size
ME diamond size & shape.
attribute_deprecated int frame_bits
#define FF_ENABLE_DEPRECATION_WARNINGS
static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src)
attribute_deprecated int me_penalty_compensation
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
int avg_bitrate
Average bitrate of the stream, in bits per second.
int ff_get_best_fcode(MpegEncContext *s, int16_t(*mv_table)[2], int type)
int resync_mb_y
y position of last resync marker
struct AVCodecInternal * internal
Private context used for internal data.
int16_t(* block)[64]
points to one of the following blocks
void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
int64_t bit_rate
wanted bit rate
This side data corresponds to the AVCPBProperties struct.
PutBitContext tex_pb
used for data partitioned VOPs
Picture next_picture
copy of the next picture structure.
attribute_deprecated int p_count
int key_frame
1 -> keyframe, 0-> not
attribute_deprecated void(* rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb)
static void set_frame_distances(MpegEncContext *s)
static const double coeff[2][5]
#define FF_QP2LAMBDA
factor to convert from H.263 QP to lambda
void ff_fix_long_mvs(MpegEncContext *s, uint8_t *field_select_table, int field_select, int16_t(*mv_table)[2], int f_code, int type, int truncate)
Picture ** reordered_input_picture
pointer to the next pictures in coded order for encoding
static const struct twinvq_data tab
unsigned int byte_buffer_size
void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number)
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
static int encode_thread(AVCodecContext *c, void *arg)
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
#define LOCAL_ALIGNED_16(t, v,...)
static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
static void update_duplicate_context_after_me(MpegEncContext *dst, MpegEncContext *src)
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
#define AV_CODEC_FLAG_CLOSED_GOP
static uint8_t default_mv_penalty[MAX_FCODE+1][MAX_DMV *2+1]
int inter_quant_bias
bias for the quantizer
av_cold void ff_qpeldsp_init(QpelDSPContext *c)
#define CANDIDATE_MB_TYPE_BACKWARD
uint8_t * av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, int size)
Allocate new information of a packet.
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int b_code
backward MV resolution for B-frames (MPEG-4)
void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64], int motion_x, int motion_y)
int(* execute)(struct AVCodecContext *c, int(*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size)
The codec may call this to execute several independent things.
void ff_h261_encode_init(MpegEncContext *s)
int64_t mb_var_sum
sum of MB variance for current frame
static int encode_frame(AVCodecContext *c, AVFrame *frame)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
AVPixelFormat
Pixel format.
This structure stores compressed data.
uint8_t * byte_buffer
temporary buffer used for encoders to store their bitstream
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
int ff_check_alignment(void)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
int ff_match_2uint16(const uint16_t(*tab)[2], int size, int a, int b)
Return the index into tab at which {a,b} match elements {[0],[1]} of tab.
#define AV_NOPTS_VALUE
Undefined timestamp value.
static void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type, PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2], int *dmin, int *next_block, int motion_x, int motion_y)
unsigned int lambda
Lagrange multiplier used in rate distortion.
AVCodec ff_msmpeg4v2_encoder
int64_t rc_max_rate
maximum bitrate
uint16_t pb_time
time distance between the last b and p,s,i frame
enum idct_permutation_type perm_type
attribute_deprecated int pre_me
static const uint8_t sp5x_quant_table[20][64]
int next_lambda
next lambda used for retrying to encode a frame