54 int16_t *
block,
int n,
int qscale)
56 int i,
level, nCoeffs;
57 const uint16_t *quant_matrix;
64 for(i=1;i<=nCoeffs;i++) {
70 level = (
int)(level * qscale * quant_matrix[j]) >> 3;
71 level = (level - 1) | 1;
74 level = (
int)(level * qscale * quant_matrix[j]) >> 3;
75 level = (level - 1) | 1;
83 int16_t *
block,
int n,
int qscale)
85 int i,
level, nCoeffs;
86 const uint16_t *quant_matrix;
91 for(i=0; i<=nCoeffs; i++) {
97 level = (((level << 1) + 1) * qscale *
98 ((
int) (quant_matrix[j]))) >> 4;
99 level = (level - 1) | 1;
102 level = (((level << 1) + 1) * qscale *
103 ((
int) (quant_matrix[j]))) >> 4;
104 level = (level - 1) | 1;
112 int16_t *
block,
int n,
int qscale)
114 int i,
level, nCoeffs;
115 const uint16_t *quant_matrix;
125 for(i=1;i<=nCoeffs;i++) {
131 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
134 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
142 int16_t *
block,
int n,
int qscale)
144 int i,
level, nCoeffs;
145 const uint16_t *quant_matrix;
157 for(i=1;i<=nCoeffs;i++) {
163 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
166 level = (
int)(level * qscale * quant_matrix[j]) >> 4;
176 int16_t *
block,
int n,
int qscale)
178 int i,
level, nCoeffs;
179 const uint16_t *quant_matrix;
189 for(i=0; i<=nCoeffs; i++) {
195 level = (((level << 1) + 1) * qscale *
196 ((
int) (quant_matrix[j]))) >> 5;
199 level = (((level << 1) + 1) * qscale *
200 ((
int) (quant_matrix[j]))) >> 5;
210 int16_t *
block,
int n,
int qscale)
212 int i,
level, qmul, qadd;
221 qadd = (qscale - 1) | 1;
230 for(i=1; i<=nCoeffs; i++) {
234 level = level * qmul - qadd;
236 level = level * qmul + qadd;
244 int16_t *
block,
int n,
int qscale)
246 int i,
level, qmul, qadd;
251 qadd = (qscale - 1) | 1;
256 for(i=0; i<=nCoeffs; i++) {
260 level = level * qmul - qadd;
262 level = level * qmul + qadd;
273 memset(dst + h*linesize, 128, 16);
279 memset(dst + h*linesize, 128, 8);
293 for (i=0; i<4; i++) {
360 int yc_size = y_size + 2 * c_size;
380 2 * 64 *
sizeof(
int),
fail)
386 for (i = 0; i < 12; i++) {
400 yc_size *
sizeof(int16_t) * 16,
fail);
434 #define COPY(a) bak->a = src->a 435 COPY(sc.edge_emu_buffer);
438 COPY(sc.rd_scratchpad);
439 COPY(sc.b_scratchpad);
440 COPY(sc.obmc_scratchpad);
469 for (i = 0; i < 12; i++) {
480 "scratch buffers.\n");
509 if (
s1->context_initialized){
541 if (
s1->picture &&
s1->picture[i].f->buf[0] &&
546 #define UPDATE_PICTURE(pic)\ 548 ff_mpeg_unref_picture(s->avctx, &s->pic);\ 549 if (s1->pic.f && s1->pic.f->buf[0])\ 550 ret = ff_mpeg_ref_picture(s->avctx, &s->pic, &s1->pic);\ 552 ret = ff_update_picture_tables(&s->pic, &s1->pic);\ 561 #define REBASE_PICTURE(pic, new_ctx, old_ctx) \ 562 ((pic && pic >= old_ctx->picture && \ 563 pic < old_ctx->picture + MAX_PICTURE_COUNT) ? \ 564 &new_ctx->picture[pic - old_ctx->picture] : NULL) 577 (
char *) &
s1->pb_field_time +
sizeof(
s1->pb_field_time) -
578 (
char *) &
s1->last_time_base);
588 if (
s1->bitstream_buffer) {
589 if (
s1->bitstream_buffer_size +
593 s1->allocated_bitstream_buffer_size);
601 s1->bitstream_buffer_size);
610 &s->
sc,
s1->linesize) < 0) {
612 "scratch buffers.\n");
617 "be allocated due to unknown size.\n");
622 (
char *) &
s1->rtp_mode - (
char *) &
s1->progressive_sequence);
624 if (!
s1->first_field) {
626 if (
s1->current_picture_ptr)
684 int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
708 yc_size = y_size + 2 * c_size;
742 mb_array_size *
sizeof(
float),
fail);
744 mb_array_size *
sizeof(
float),
fail);
751 for (i = 0; i < 2; i++) {
753 for (j = 0; j < 2; j++) {
754 for (k = 0; k < 2; k++) {
757 mv_table_size * 2 *
sizeof(int16_t),
786 for (i = 0; i < yc_size; i++)
849 for (i = 0; i < 2; i++) {
850 for (j = 0; j < 2; j++) {
851 for (k = 0; k < 2; k++) {
902 "decoding to AV_PIX_FMT_NONE is not supported.\n");
913 " reducing to %d\n", nb_slices, max_slices);
914 nb_slices = max_slices;
961 for (i = 0; i < nb_slices; i++) {
970 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
972 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1011 for (i = 0; i < 2; i++) {
1012 for (j = 0; j < 2; j++) {
1013 for (k = 0; k < 2; k++) {
1089 if (nb_slices > 1) {
1090 for (i = 0; i < nb_slices; i++) {
1101 (s->
mb_height * (i) + nb_slices / 2) / nb_slices;
1103 (s->
mb_height * (i + 1) + nb_slices / 2) / nb_slices;
1178 int i, h_chroma_shift, v_chroma_shift;
1182 for(i=0; i<frame->
height; i++)
1186 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1188 0x80, AV_CEIL_RSHIFT(frame->
width, h_chroma_shift));
1285 ff_dlog(s->
avctx,
"L%p N%p C%p L%p N%p C%p type:%d drop:%d\n",
1294 int h_chroma_shift, v_chroma_shift;
1296 &h_chroma_shift, &v_chroma_shift);
1299 "allocating dummy last picture for B frame\n");
1302 "warning: first frame is no keyframe\n");
1322 for(i=0; i<avctx->
height; i++)
1324 0x80, avctx->
width);
1328 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1330 0x80, AV_CEIL_RSHIFT(avctx->
width, h_chroma_shift));
1335 for(i=0; i<avctx->
height; i++)
1365 #if 0 // BUFREF-FIXME 1387 for (i = 0; i < 4; i++) {
1449 int field_based,
int field_select,
1450 int src_x,
int src_y,
1452 int h_edge_pos,
int v_edge_pos,
1454 int motion_x,
int motion_y)
1457 const int op_index =
FFMIN(lowres, 3);
1458 const int s_mask = (2 <<
lowres) - 1;
1467 sx = motion_x & s_mask;
1468 sy = motion_y & s_mask;
1469 src_x += motion_x >> lowres + 1;
1470 src_y += motion_y >> lowres + 1;
1472 src += src_y * stride + src_x;
1474 if ((
unsigned)src_x >
FFMAX( h_edge_pos - (!!sx) - w, 0) ||
1475 (unsigned)src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1478 w + 1, (h + 1) << field_based,
1479 src_x, src_y << field_based,
1480 h_edge_pos, v_edge_pos);
1485 sx = (sx << 2) >>
lowres;
1486 sy = (sy << 2) >>
lowres;
1489 pix_op[op_index](dest,
src,
stride,
h, sx, sy);
1503 int motion_x,
int motion_y,
1506 uint8_t *ptr_y, *ptr_cb, *ptr_cr;
1507 int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
1511 const int block_s = 8>>
lowres;
1512 const int s_mask = (2 <<
lowres) - 1;
1525 motion_y += (bottom_field - field_select)*((1 << lowres)-1);
1528 sx = motion_x & s_mask;
1529 sy = motion_y & s_mask;
1530 src_x = s->
mb_x * 2 * block_s + (motion_x >> lowres + 1);
1531 src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
1534 uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
1535 uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
1536 uvsrc_x = src_x >> 1;
1537 uvsrc_y = src_y >> 1;
1542 uvsx = (2 * mx) & s_mask;
1543 uvsy = (2 * my) & s_mask;
1544 uvsrc_x = s->
mb_x * block_s + (mx >>
lowres);
1545 uvsrc_y = mb_y * block_s + (my >>
lowres);
1552 uvsrc_x = s->
mb_x * block_s + (mx >> lowres + 1);
1553 uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
1559 uvsy = motion_y & s_mask;
1561 uvsrc_x = s->
mb_x*block_s + (mx >> (lowres+1));
1564 uvsx = motion_x & s_mask;
1565 uvsy = motion_y & s_mask;
1572 ptr_y = ref_picture[0] + src_y * linesize + src_x;
1573 ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
1574 ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
1576 if ((
unsigned) src_x >
FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) || uvsrc_y<0 ||
1577 (
unsigned) src_y >
FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
1579 linesize >> field_based, linesize >> field_based,
1580 17, 17 + field_based,
1581 src_x, src_y << field_based, h_edge_pos,
1590 uvlinesize >> field_based, uvlinesize >> field_based,
1592 uvsrc_x, uvsrc_y << field_based,
1593 h_edge_pos >> 1, v_edge_pos >> 1);
1595 uvlinesize >> field_based,uvlinesize >> field_based,
1597 uvsrc_x, uvsrc_y << field_based,
1598 h_edge_pos >> 1, v_edge_pos >> 1);
1617 sx = (sx << 2) >>
lowres;
1618 sy = (sy << 2) >>
lowres;
1619 pix_op[lowres - 1](dest_y, ptr_y,
linesize,
h, sx, sy);
1623 uvsx = (uvsx << 2) >>
lowres;
1624 uvsy = (uvsy << 2) >>
lowres;
1626 pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
1627 pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
1640 const int op_index =
FFMIN(lowres, 3);
1641 const int block_s = 8 >>
lowres;
1642 const int s_mask = (2 <<
lowres) - 1;
1643 const int h_edge_pos = s->
h_edge_pos >> lowres + 1;
1644 const int v_edge_pos = s->
v_edge_pos >> lowres + 1;
1645 int emu = 0, src_x, src_y, sx, sy;
1661 src_x = s->
mb_x * block_s + (mx >> lowres + 1);
1662 src_y = s->
mb_y * block_s + (my >> lowres + 1);
1665 ptr = ref_picture[1] +
offset;
1666 if ((
unsigned) src_x >
FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
1667 (unsigned) src_y >
FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
1671 src_x, src_y, h_edge_pos, v_edge_pos);
1675 sx = (sx << 2) >>
lowres;
1676 sy = (sy << 2) >>
lowres;
1677 pix_op[op_index](dest_cb, ptr, s->
uvlinesize, block_s, sx, sy);
1679 ptr = ref_picture[2] +
offset;
1684 src_x, src_y, h_edge_pos, v_edge_pos);
1687 pix_op[op_index](dest_cr, ptr, s->
uvlinesize, block_s, sx, sy);
1704 int dir,
uint8_t **ref_picture,
1710 const int block_s = 8 >>
lowres;
1719 ref_picture, pix_op,
1720 s->
mv[dir][0][0], s->
mv[dir][0][1],
1726 for (i = 0; i < 4; i++) {
1729 ref_picture[0], 0, 0,
1730 (2 * mb_x + (i & 1)) * block_s,
1731 (2 * mb_y + (i >> 1)) * block_s,
1734 block_s, block_s, pix_op,
1735 s->
mv[dir][i][0], s->
mv[dir][i][1]);
1737 mx += s->
mv[dir][i][0];
1738 my += s->
mv[dir][i][1];
1750 ref_picture, pix_op,
1751 s->
mv[dir][0][0], s->
mv[dir][0][1],
1756 ref_picture, pix_op,
1757 s->
mv[dir][1][0], s->
mv[dir][1][1],
1767 ref_picture, pix_op,
1769 s->
mv[dir][0][1], 2 * block_s, mb_y >> 1);
1773 for (i = 0; i < 2; i++) {
1778 ref2picture = ref_picture;
1785 ref2picture, pix_op,
1786 s->
mv[dir][i][0], s->
mv[dir][i][1] +
1787 2 * block_s * i, block_s, mb_y >> 1);
1789 dest_y += 2 * block_s * s->
linesize;
1796 for (i = 0; i < 2; i++) {
1798 for (j = 0; j < 2; j++) {
1801 ref_picture, pix_op,
1802 s->
mv[dir][2 * i + j][0],
1803 s->
mv[dir][2 * i + j][1],
1809 for (i = 0; i < 2; i++) {
1812 ref_picture, pix_op,
1813 s->
mv[dir][2 * i][0],s->
mv[dir][2 * i][1],
1814 2 * block_s, mb_y >> 1);
1837 int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->
quarter_sample;
1838 int my, off, i, mvs;
1857 for (i = 0; i < mvs; i++) {
1858 my = s->
mv[dir][i][1];
1859 my_max =
FFMAX(my_max, my);
1860 my_min =
FFMIN(my_min, my);
1863 off = ((
FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
1872 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
1888 int16_t *
block,
int i,
uint8_t *dest,
int line_size,
int qscale)
1910 memset(s->
ac_val[0][xy ], 0, 32 *
sizeof(int16_t));
1911 memset(s->
ac_val[0][xy + wrap], 0, 32 *
sizeof(int16_t));
1924 memset(s->
ac_val[1][xy], 0, 16 *
sizeof(int16_t));
1925 memset(s->
ac_val[2][xy], 0, 16 *
sizeof(int16_t));
1942 int lowres_flag,
int is_mpeg12)
1957 for(j=0; j<64; j++){
1984 uint8_t *dest_y, *dest_cb, *dest_cr;
1985 int dct_linesize, dct_offset;
1991 const int block_size= lowres_flag ? 8>>s->
avctx->
lowres : 8;
2014 dest_cb= s->
dest[1];
2015 dest_cr= s->
dest[2];
2098 add_dct(s, block[0], 0, dest_y , dct_linesize);
2099 add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
2100 add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
2101 add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
2105 add_dct(s, block[4], 4, dest_cb, uvlinesize);
2106 add_dct(s, block[5], 5, dest_cr, uvlinesize);
2110 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2112 add_dct(s, block[4], 4, dest_cb, dct_linesize);
2113 add_dct(s, block[5], 5, dest_cr, dct_linesize);
2114 add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
2115 add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
2117 add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
2118 add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
2119 add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
2120 add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
2132 const int act_block_size = block_size * 2;
2136 s->
idsp.
idct_put(dest_y + dct_offset + act_block_size, dct_linesize, (int16_t*)(*s->
block32)[3]);
2139 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2148 s->
idsp.
idct_put(dest_cb + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->
block32)[10]);
2149 s->
idsp.
idct_put(dest_cr + act_block_size + dct_offset, dct_linesize, (int16_t*)(*s->
block32)[11]);
2155 put_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->
qscale);
2156 put_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->
qscale);
2157 put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->
qscale);
2174 s->
idsp.
idct_put(dest_y + block_size, dct_linesize, block[1]);
2175 s->
idsp.
idct_put(dest_y + dct_offset, dct_linesize, block[2]);
2176 s->
idsp.
idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
2185 dct_offset = s->
interlaced_dct ? uvlinesize : uvlinesize*block_size;
2189 s->
idsp.
idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
2190 s->
idsp.
idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
2192 s->
idsp.
idct_put(dest_cb + block_size, dct_linesize, block[8]);
2193 s->
idsp.
idct_put(dest_cr + block_size, dct_linesize, block[9]);
2194 s->
idsp.
idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
2195 s->
idsp.
idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
2252 s->
dest[0] += s->
mb_y * linesize << height_of_mb;
2256 s->
dest[0] += (s->
mb_y>>1) * linesize << height_of_mb;
2299 else if (qscale > 31)
int bitstream_buffer_size
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
#define AV_CODEC_FLAG_INTERLACED_ME
interlaced motion estimation
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
static int init_duplicate_context(MpegEncContext *s)
int ff_thread_can_start_frame(AVCodecContext *avctx)
const struct AVCodec * codec
int16_t(* b_bidir_back_mv_table_base)[2]
av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
discard all frames except keyframes
void ff_init_block_index(MpegEncContext *s)
void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64], uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
ScanTable intra_v_scantable
av_cold void ff_mpegvideodsp_init(MpegVideoDSPContext *c)
#define CONFIG_WMV2_ENCODER
This structure describes decoded (raw) audio or video data.
int16_t(* p_mv_table)[2]
MV table (1MV per MB) P-frame encoding.
int start_mb_y
start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
#define MV_TYPE_FIELD
2 vectors, one per field
const uint8_t * y_dc_scale_table
qscale -> y_dc_scale table
uint8_t * edge_emu_buffer
temporary buffer for if MVs point to out-of-frame data
int coded_width
Bitstream width / height, may be different from width/height e.g.
op_pixels_func avg_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
void(* dct_unquantize_h263_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
static av_always_inline void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64], int lowres_flag, int is_mpeg12)
#define AV_LOG_WARNING
Something somehow does not look correct.
static void chroma_4mv_motion_lowres(MpegEncContext *s, uint8_t *dest_cb, uint8_t *dest_cr, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int mx, int my)
uint8_t * coded_block_base
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
int end_mb_y
end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) ...
int16_t(*[3] ac_val)[16]
used for MPEG-4 AC prediction, all 3 arrays must be continuous
int v_edge_pos
horizontal / vertical position of the right/bottom edge (pixel replication)
h264_chroma_mc_func put_h264_chroma_pixels_tab[4]
static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
static void gray_frame(AVFrame *frame)
int msmpeg4_version
0=not msmpeg4, 1=mp41, 2=mp42, 3=mp43/divx3 4=wmv1/7 5=wmv2/8
int needs_realloc
Picture needs to be reallocated (eg due to a frame size change)
#define HAVE_INTRINSICS_NEON
uint8_t * bitstream_buffer
av_cold void ff_blockdsp_init(BlockDSPContext *c, AVCodecContext *avctx)
int field_picture
whether or not the picture was encoded in separate fields
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
int16_t(*[2][2] p_field_mv_table)[2]
MV table (2MV per MB) interlaced P-frame encoding.
int16_t(* p_mv_table_base)[2]
static int lowest_referenced_row(MpegEncContext *s, int dir)
find the lowest MB row referenced in the MVs
av_cold void ff_h264chroma_init(H264ChromaContext *c, int bit_depth)
uint32_t * score_map
map to store the scores
static void free_duplicate_context(MpegEncContext *s)
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
int padding_bug_score
used to detect the VERY common padding bug in MPEG-4
int ff_mpeg_ref_picture(AVCodecContext *avctx, Picture *dst, Picture *src)
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
int mb_num
number of MBs of a picture
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
void ff_draw_horiz_band(AVCodecContext *avctx, AVFrame *cur, AVFrame *last, int y, int h, int picture_structure, int first_field, int low_delay)
Draw a horizontal band if supported.
int h263_aic
Advanced INTRA Coding (AIC)
int16_t(* b_back_mv_table)[2]
MV table (1MV per MB) backward mode B-frame encoding.
void(* draw_horiz_band)(struct AVCodecContext *s, const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], int y, int type, int height)
If non NULL, 'draw_horiz_band' is called by the libavcodec decoder to draw a horizontal band...
enum AVPictureType last_picture
int encoding
true if we are encoding (vs decoding)
static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Macro definitions for various function/variable attributes.
int16_t(* b_back_mv_table_base)[2]
#define REBASE_PICTURE(pic, new_ctx, old_ctx)
static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
void ff_clean_intra_table_entries(MpegEncContext *s)
Clean dc, ac, coded_block for the current non-intra MB.
const uint8_t ff_mpeg2_non_linear_qscale[32]
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
#define av_assert0(cond)
assert() equivalent, that is always enabled.
void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
const uint8_t ff_mpeg1_dc_scale_table[128]
av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
enum OutputFormat out_format
output format
int ff_mpv_common_frame_size_change(MpegEncContext *s)
void ff_mpv_motion(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, op_pixels_func(*pix_op)[4], qpel_mc_func(*qpix_op)[16])
uint8_t * pred_dir_table
used to store pred_dir for partitioned decoding
Multithreading support functions.
qpel_mc_func(* qpel_put)[16]
void ff_free_picture_tables(Picture *pic)
int no_rounding
apply no rounding to motion compensation (MPEG-4, msmpeg4, ...) for B-frames rounding mode is always ...
Picture current_picture
copy of the current picture structure.
int ff_find_unused_picture(AVCodecContext *avctx, Picture *picture, int shared)
void ff_mpv_common_init_ppc(MpegEncContext *s)
#define PICT_BOTTOM_FIELD
int16_t(* b_bidir_forw_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
uint16_t pp_time
time distance between the last 2 p,s,i frames
static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
int interlaced_frame
The content of the picture is interlaced.
av_cold void ff_mpv_idct_init(MpegEncContext *s)
int mb_height
number of MBs horizontally & vertically
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
int codec_tag
internal codec_tag upper case converted from avctx codec_tag
#define AV_CODEC_FLAG_GRAY
Only decode/encode grayscale.
high precision timer, useful to profile code
int16_t(*[2][2] p_field_mv_table_base)[2]
void ff_set_qscale(MpegEncContext *s, int qscale)
set qscale and update qscale dependent variables.
static void gray16(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
int intra_only
if true, only intra pictures are generated
int16_t * dc_val[3]
used for MPEG-4 DC prediction, all 3 arrays must be continuous
int h263_plus
H.263+ headers.
int slice_context_count
number of used thread_contexts
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int last_dc[3]
last DC values for MPEG-1
static void add_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size)
int mb_skipped
MUST BE SET only during DECODING.
void(* qpel_mc_func)(uint8_t *dst, const uint8_t *src, ptrdiff_t stride)
int partitioned_frame
is current frame partitioned
uint8_t * rd_scratchpad
scratchpad for rate distortion mb decision
#define MAX_PICTURE_COUNT
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift)
Utility function to access log2_chroma_w log2_chroma_h from the pixel format AVPixFmtDescriptor.
int active_thread_type
Which multithreading methods are in use by the codec.
int last_lambda_for[5]
last lambda for a specific pict type
void(* h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, ptrdiff_t srcStride, int h, int x, int y)
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s, int16_t *block, int n, int qscale)
int flags
AV_CODEC_FLAG_*.
static void put_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
simple assert() macros that are a bit more flexible than ISO C assert().
int overread_index
the index into ParseContext.buffer of the overread bytes
static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
int quarter_sample
1->qpel, 0->half pel ME/MC
uint16_t * mb_type
Table for candidate MB types for encoding (defines in mpegutils.h)
int low_delay
no reordering needed / has no B-frames
uint8_t *[2][2] b_field_select_table
static const uint8_t offset[127][2]
void * av_memdup(const void *p, size_t size)
Duplicate a buffer with av_malloc().
void ff_mpv_common_end(MpegEncContext *s)
av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
void ff_mpeg_flush(AVCodecContext *avctx)
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
int coded_picture_number
used to set pic->coded_picture_number, should not be used for/by anything else
uint8_t * error_status_table
const uint8_t ff_alternate_horizontal_scan[64]
int ff_mpeg_er_init(MpegEncContext *s)
void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size)
Allocate a buffer, reusing the given one if large enough.
common internal API header
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
enum AVPictureType pict_type
Picture type of the frame.
#define UPDATE_PICTURE(pic)
#define AV_CODEC_FLAG_BITEXACT
Use only bitexact stuff (except (I)DCT).
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
#define FF_THREAD_FRAME
Decode more than one frame at once.
int overread
the number of bytes which where irreversibly read from the next frame
int next_p_frame_damaged
set if the next p frame is damaged, to avoid showing trashed B-frames
static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture new_picture
copy of the source picture structure for encoding.
void(* dct_unquantize_mpeg1_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int width
picture width / height.
uint8_t * mbskip_table
used to avoid copy if macroblock skipped (for black regions for example) and used for B-frame encodin...
int16_t(*[2] motion_val)[2]
Picture * current_picture_ptr
pointer to the current picture
unsigned int allocated_bitstream_buffer_size
void ff_thread_report_progress(ThreadFrame *f, int n, int field)
Notify later decoding threads when part of their reference picture is ready.
int16_t(* ac_val_base)[16]
#define AV_CODEC_FLAG_PSNR
error[?] variables will be set during encoding.
void(* idct_add)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> add dest -> clip to unsigned 8 bit -> dest.
#define FF_THREAD_SLICE
Decode more than one part of a single frame at once.
int16_t(*[2][2][2] b_field_mv_table_base)[2]
void(* dct_unquantize_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int16_t(* b_forw_mv_table_base)[2]
void(* dct_unquantize_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
int16_t(*[12] pblocks)[64]
int block_last_index[12]
last non zero coefficient in block
uint8_t idct_permutation[64]
IDCT input permutation.
av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
int mb_decision
macroblock decision mode
uint8_t * mbintra_table
used to avoid setting {ac, dc, cbp}-pred stuff to zero on inter MB decoding
void(* idct_put)(uint8_t *dest, ptrdiff_t line_size, int16_t *block)
block -> idct -> clip to unsigned 8 bit -> dest.
void(* dct_unquantize_mpeg2_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_table, uint32_t *mbtype_table, int8_t *qscale_table, int16_t(*motion_val[2])[2], int *low_delay, int mb_width, int mb_height, int mb_stride, int quarter_sample)
Print debugging info for the given picture.
int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
preferred ID for MPEG-1/2 video decoding
void ff_mpv_decode_defaults(MpegEncContext *s)
Set the given MpegEncContext to defaults for decoding.
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
int block_index[6]
index to current MB in block based arrays with edges
int * mb_index2xy
mb_index -> mb_x + mb_y*mb_stride
int first_field
is 1 for the first field of a field picture 0 otherwise
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
#define MV_TYPE_16X16
1 vector for the whole mb
static void clear_context(MpegEncContext *s)
AVBufferRef * qscale_table_buf
int16_t(* b_bidir_forw_mv_table_base)[2]
int coded_picture_number
picture number in bitstream order
uint16_t inter_matrix[64]
struct MpegEncContext * thread_context[MAX_THREADS]
Libavcodec external API header.
ptrdiff_t linesize
line size, in bytes, may be different from width
enum AVDiscard skip_idct
Skip IDCT/dequantization for selected frames.
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_alloc_picture(AVCodecContext *avctx, Picture *pic, MotionEstContext *me, ScratchpadContext *sc, int shared, int encoding, int chroma_x_shift, int chroma_y_shift, int out_format, int mb_stride, int mb_width, int mb_height, int b8_stride, ptrdiff_t *linesize, ptrdiff_t *uvlinesize)
Allocate a Picture.
ScanTable intra_scantable
uint8_t * data
The data buffer.
uint8_t * coded_block
used for coded block pattern prediction (msmpeg4v3, wmv1)
int height
picture size. must be a multiple of 16
void(* op_pixels_func)(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
unsigned int codec_tag
fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
void(* dct_unquantize_mpeg2_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
op_pixels_func put_pixels_tab[4][4]
Halfpel motion compensation with rounding (a+b+1)>>1.
#define MV_TYPE_16X8
2 vectors, one per 16x8 block
void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
uint32_t state
contains the last few bytes in MSB order
Picture * picture
main picture buffer
ScanTable intra_h_scantable
op_pixels_func put_no_rnd_pixels_tab[4][4]
Halfpel motion compensation with no rounding (a+b)>>1.
int16_t(*[2][2][2] b_field_mv_table)[2]
MV table (4MV per MB) interlaced B-frame encoding.
uint8_t * cbp_table
used to store cbp, ac_pred for partitioned decoding
int closed_gop
MPEG1/2 GOP is closed.
int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me, ScratchpadContext *sc, int linesize)
unsigned int avpriv_toupper4(unsigned int x)
#define FF_DEBUG_DCT_COEFF
#define FF_MB_DECISION_RD
rate distortion
void(* dct_unquantize_h263_intra)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
const uint8_t ff_zigzag_direct[64]
ptrdiff_t uvlinesize
line size, for chroma in bytes, may be different from width
static int ff_h263_round_chroma(int x)
int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
generic function called after decoding the header and before a frame is decoded.
int f_code
forward MV resolution
int max_b_frames
max number of B-frames for encoding
int pict_type
AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int size
Size of data in bytes.
int h263_pred
use MPEG-4/H.263 ac/dc predictions
int16_t(* b_bidir_back_mv_table)[2]
MV table (1MV per MB) bidir mode B-frame encoding.
static int init_context_frame(MpegEncContext *s)
Initialize and allocates MpegEncContext fields dependent on the resolution.
uint8_t *[2] p_field_select_table
int16_t(* b_direct_mv_table)[2]
MV table (1MV per MB) direct mode B-frame encoding.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
const uint8_t * c_dc_scale_table
qscale -> c_dc_scale table
qpel_mc_func(* qpel_avg)[16]
int mv[2][4][2]
motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend...
int16_t(* b_forw_mv_table)[2]
MV table (1MV per MB) forward mode B-frame encoding.
int b8_stride
2*mb_width+1 used for some 8x8 block arrays to allow simple addressing
static void dct_unquantize_h263_intra_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture * next_picture_ptr
pointer to the next picture (for bidir pred)
struct AVCodecContext * avctx
void ff_mpeg_unref_picture(AVCodecContext *avctx, Picture *pic)
Deallocate a picture.
A reference to a data buffer.
discard all non reference
common internal api header.
int32_t(* block32)[12][64]
int mb_stride
mb_width+1 used for some arrays to allow simple addressing of left & top MBs without sig11 ...
void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
static int ref[MAX_W *MAX_W]
const uint8_t ff_default_chroma_qscale_table[32]
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
static av_cold int dct_init(MpegEncContext *s)
static void dct_unquantize_h263_inter_c(MpegEncContext *s, int16_t *block, int n, int qscale)
Picture last_picture
copy of the previous picture structure.
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Picture * last_picture_ptr
pointer to the previous picture.
int workaround_bugs
Work around bugs in encoders which sometimes cannot be detected automatically.
uint8_t * b_scratchpad
scratchpad used for writing into write only buffers
const uint8_t * chroma_qscale_table
qscale -> chroma_qscale (H.263)
const uint8_t ff_alternate_vertical_scan[64]
uint32_t * map
map to avoid duplicate evaluations
int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
H264ChromaContext h264chroma
int16_t(* blocks)[12][64]
h264_chroma_mc_func avg_h264_chroma_pixels_tab[4]
int slices
Number of slices.
av_cold int ff_mpv_common_init(MpegEncContext *s)
init common structure for both encoder and decoder.
av_cold void ff_init_scantable(uint8_t *permutation, ScanTable *st, const uint8_t *src_scantable)
av_cold void ff_idctdsp_init(IDCTDSPContext *c, AVCodecContext *avctx)
int top_field_first
If the content is interlaced, is top field displayed first.
void ff_mpv_frame_end(MpegEncContext *s)
#define MV_TYPE_DMV
2 vectors, special mpeg2 Dual Prime Vectors
void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
uint8_t * obmc_scratchpad
int16_t(* block)[64]
points to one of the following blocks
ParseContext parse_context
static void add_dequant_dct(MpegEncContext *s, int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
Picture next_picture
copy of the next picture structure.
int key_frame
1 -> keyframe, 0-> not
#define CONFIG_WMV2_DECODER
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int field_based, int bottom_field, int field_select, uint8_t **ref_picture, h264_chroma_mc_func *pix_op, int motion_x, int motion_y, int h, int mb_y)
int chroma_qscale
chroma QP
void ff_mpv_common_defaults(MpegEncContext *s)
Set the given MpegEncContext to common defaults (same for encoding and decoding). ...
static void free_context_frame(MpegEncContext *s)
Frees and resets MpegEncContext fields depending on the resolution.
static int hpel_motion_lowres(MpegEncContext *s, uint8_t *dest, uint8_t *src, int field_based, int field_select, int src_x, int src_y, int width, int height, ptrdiff_t stride, int h_edge_pos, int v_edge_pos, int w, int h, h264_chroma_mc_func *pix_op, int motion_x, int motion_y)
uint16_t intra_matrix[64]
matrix transmitted in the bitstream
uint32_t * mb_type
types and macros are defined in mpegutils.h
void(* decode_mb)(struct MpegEncContext *s)
Called for every Macroblock in a slice.
int workaround_bugs
workaround bugs in encoders which cannot be detected automatically
ScanTable inter_scantable
if inter == intra then intra should be used to reduce the cache usage
#define FFSWAP(type, a, b)
#define MV_TYPE_8X8
4 vectors (H.263, MPEG-4 4MV)
int16_t(* b_direct_mv_table_base)[2]
int b_code
backward MV resolution for B-frames (MPEG-4)
void(* dct_unquantize_mpeg1_inter)(struct MpegEncContext *s, int16_t *block, int n, int qscale)
void ff_mpv_report_decode_progress(MpegEncContext *s)
static void MPV_motion_lowres(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr, int dir, uint8_t **ref_picture, h264_chroma_mc_func *pix_op)
motion compensation of a single macroblock
#define FF_ALLOCZ_OR_GOTO(ctx, p, size, label)
#define AV_CEIL_RSHIFT(a, b)