53 uint32_t v32 = v * 0x01010101;
62 uint64_t v64 = v * 0x0101010101010101ULL;
68 uint32_t v32 = v * 0x01010101;
83 0x0, 0x8, 0x0, 0x8, 0xc, 0x8, 0xc, 0xe, 0xc, 0xe, 0xf, 0xe, 0xf
86 0x0, 0x0, 0x8, 0x8, 0x8, 0xc, 0xc, 0xc, 0xe, 0xe, 0xe, 0xf, 0xf
94 int row = td->
row, col = td->
col, row7 = td->
row7;
95 enum TxfmMode max_tx = max_tx_for_bl_bp[b->
bs];
117 for (y = 0; y < h4; y++) {
118 int idx_base = (y + row) * 8 * s->
sb_cols + col;
119 for (x = 0; x < w4; x++)
120 pred =
FFMIN(pred, refsegmap[idx_base + x]);
158 if (have_a && have_l) {
270 }
else if (b->
intra) {
299 static const uint8_t size_group[10] = {
300 3, 3, 3, 3, 2, 2, 2, 1, 1, 1
302 int sz = size_group[b->
bs];
315 static const uint8_t inter_mode_ctx_lut[14][14] = {
316 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
317 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
318 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
319 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
320 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
321 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
322 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
323 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
324 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
325 { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 5, 5, 5, 5 },
326 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
327 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 2, 2, 1, 3 },
328 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 1, 1, 0, 3 },
329 { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 3, 3, 3, 4 },
402 c = (refa == refl) ? 3 : 1;
419 c = (refl == refa) ? 4 : 2;
583 static const uint8_t off[10] = {
584 3, 0, 0, 1, 0, 0, 0, 0, 0, 0
682 #define SPLAT_CTX(var, val, n) \ 684 case 1: var = val; break; \ 685 case 2: AV_WN16A(&var, val * 0x0101); break; \ 686 case 4: AV_WN32A(&var, val * 0x01010101); break; \ 687 case 8: AV_WN64A(&var, val * 0x0101010101010101ULL); break; \ 689 uint64_t v64 = val * 0x0101010101010101ULL; \ 690 AV_WN64A( &var, v64); \ 691 AV_WN64A(&((uint8_t *) &var)[8], v64); \ 696 #define SPLAT_CTX(var, val, n) \ 698 case 1: var = val; break; \ 699 case 2: AV_WN16A(&var, val * 0x0101); break; \ 700 case 4: AV_WN32A(&var, val * 0x01010101); break; \ 702 uint32_t v32 = val * 0x01010101; \ 703 AV_WN32A( &var, v32); \ 704 AV_WN32A(&((uint8_t *) &var)[4], v32); \ 708 uint32_t v32 = val * 0x01010101; \ 709 AV_WN32A( &var, v32); \ 710 AV_WN32A(&((uint8_t *) &var)[4], v32); \ 711 AV_WN32A(&((uint8_t *) &var)[8], v32); \ 712 AV_WN32A(&((uint8_t *) &var)[12], v32); \ 719 #define SET_CTXS(perf, dir, off, n) \ 721 SPLAT_CTX(perf->dir##_skip_ctx[off], b->skip, n); \ 722 SPLAT_CTX(perf->dir##_txfm_ctx[off], b->tx, n); \ 723 SPLAT_CTX(perf->dir##_partition_ctx[off], dir##_ctx[b->bs], n); \ 724 if (!s->s.h.keyframe && !s->s.h.intraonly) { \ 725 SPLAT_CTX(perf->dir##_intra_ctx[off], b->intra, n); \ 726 SPLAT_CTX(perf->dir##_comp_ctx[off], b->comp, n); \ 727 SPLAT_CTX(perf->dir##_mode_ctx[off], b->mode[3], n); \ 729 SPLAT_CTX(perf->dir##_ref_ctx[off], vref, n); \ 730 if (s->s.h.filtermode == FILTER_SWITCHABLE) { \ 731 SPLAT_CTX(perf->dir##_filter_ctx[off], filter_id, n); \ 736 case 1:
SET_CTXS(s, above, col, 1);
break;
737 case 2:
SET_CTXS(s, above, col, 2);
break;
738 case 4:
SET_CTXS(s, above, col, 4);
break;
739 case 8:
SET_CTXS(s, above, col, 8);
break;
742 case 1:
SET_CTXS(td, left, row7, 1);
break;
743 case 2:
SET_CTXS(td, left, row7, 2);
break;
744 case 4:
SET_CTXS(td, left, row7, 4);
break;
745 case 8:
SET_CTXS(td, left, row7, 8);
break;
765 for (n = 0; n < w4 * 2; n++) {
769 for (n = 0; n < h4 * 2; n++) {
777 for (y = 0; y < h4; y++) {
778 int x, o = (row + y) * s->
sb_cols * 8 + col;
782 for (x = 0; x < w4; x++) {
786 }
else if (b->
comp) {
787 for (x = 0; x < w4; x++) {
788 mv[x].ref[0] = b->
ref[0];
789 mv[x].ref[1] = b->
ref[1];
794 for (x = 0; x < w4; x++) {
795 mv[x].ref[0] = b->
ref[0];
806 int is_tx32x32,
int is8bitsperpixel,
int bpp,
unsigned (*cnt)[6][3],
807 unsigned (*eob)[6][2],
uint8_t (*p)[6][11],
808 int nnz,
const int16_t *scan,
const int16_t (*nb)[2],
809 const int16_t *band_counts, int16_t *qmul)
811 int i = 0, band = 0, band_left = band_counts[band];
819 eob[band][nnz][
val]++;
827 band_left = band_counts[++band];
829 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
879 if (!is8bitsperpixel) {
904 #define STORE_COEF(c, i, v) do { \ 905 if (is8bitsperpixel) { \ 908 AV_WN32A(&c[i * 2], v); \ 912 band_left = band_counts[++band];
917 nnz = (1 + cache[nb[i][0]] + cache[nb[i][1]]) >> 1;
919 }
while (++i < n_coeffs);
925 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
926 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
927 const int16_t (*nb)[2],
const int16_t *band_counts,
931 nnz, scan, nb, band_counts, qmul);
935 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
936 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
937 const int16_t (*nb)[2],
const int16_t *band_counts,
941 nnz, scan, nb, band_counts, qmul);
945 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
946 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
947 const int16_t (*nb)[2],
const int16_t *band_counts,
951 nnz, scan, nb, band_counts, qmul);
955 unsigned (*cnt)[6][3],
unsigned (*eob)[6][2],
956 uint8_t (*p)[6][11],
int nnz,
const int16_t *scan,
957 const int16_t (*nb)[2],
const int16_t *band_counts,
961 nnz, scan, nb, band_counts, qmul);
968 int row = td->
row, col = td->
col;
973 int end_x =
FFMIN(2 * (s->
cols - col), w4);
974 int end_y =
FFMIN(2 * (s->
rows - row), h4);
975 int n, pl, x, y, ret;
984 static const int16_t band_counts[4][8] = {
985 { 1, 2, 3, 4, 3, 16 - 13 },
986 { 1, 2, 3, 4, 11, 64 - 21 },
987 { 1, 2, 3, 4, 11, 256 - 21 },
988 { 1, 2, 3, 4, 11, 1024 - 21 },
990 const int16_t *y_band_counts = band_counts[b->tx];
991 const int16_t *uv_band_counts = band_counts[b->
uvtx];
992 int bytesperpixel = is8bitsperpixel ? 1 : 2;
995 #define MERGE(la, end, step, rd) \ 996 for (n = 0; n < end; n += step) \ 998 #define MERGE_CTX(step, rd) \ 1000 MERGE(l, end_y, step, rd); \ 1001 MERGE(a, end_x, step, rd); \ 1004 #define DECODE_Y_COEF_LOOP(step, mode_index, v) \ 1005 for (n = 0, y = 0; y < end_y; y += step) { \ 1006 for (x = 0; x < end_x; x += step, n += step * step) { \ 1007 enum TxfmType txtp = ff_vp9_intra_txfm_type[b->mode[mode_index]]; \ 1008 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ 1009 (td, td->block + 16 * n * bytesperpixel, 16 * step * step, \ 1010 c, e, p, a[x] + l[y], yscans[txtp], \ 1011 ynbs[txtp], y_band_counts, qmul[0]); \ 1012 a[x] = l[y] = !!ret; \ 1013 total_coeff |= !!ret; \ 1015 AV_WN16A(&td->eob[n], ret); \ 1022 #define SPLAT(la, end, step, cond) \ 1024 for (n = 1; n < end; n += step) \ 1025 la[n] = la[n - 1]; \ 1026 } else if (step == 4) { \ 1028 for (n = 0; n < end; n += step) \ 1029 AV_WN32A(&la[n], la[n] * 0x01010101); \ 1031 for (n = 0; n < end; n += step) \ 1032 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 3)); \ 1036 if (HAVE_FAST_64BIT) { \ 1037 for (n = 0; n < end; n += step) \ 1038 AV_WN64A(&la[n], la[n] * 0x0101010101010101ULL); \ 1040 for (n = 0; n < end; n += step) { \ 1041 uint32_t v32 = la[n] * 0x01010101; \ 1042 AV_WN32A(&la[n], v32); \ 1043 AV_WN32A(&la[n + 4], v32); \ 1047 for (n = 0; n < end; n += step) \ 1048 memset(&la[n + 1], la[n], FFMIN(end - n - 1, 7)); \ 1051 #define SPLAT_CTX(step) \ 1053 SPLAT(a, end_x, step, end_x == w4); \ 1054 SPLAT(l, end_y, step, end_y == h4); \ 1079 #define DECODE_UV_COEF_LOOP(step, v) \ 1080 for (n = 0, y = 0; y < end_y; y += step) { \ 1081 for (x = 0; x < end_x; x += step, n += step * step) { \ 1082 ret = (is8bitsperpixel ? decode_coeffs_b##v##_8bpp : decode_coeffs_b##v##_16bpp) \ 1083 (td, td->uvblock[pl] + 16 * n * bytesperpixel, \ 1084 16 * step * step, c, e, p, a[x] + l[y], \ 1085 uvscan, uvnb, uv_band_counts, qmul[1]); \ 1086 a[x] = l[y] = !!ret; \ 1087 total_coeff |= !!ret; \ 1089 AV_WN16A(&td->uveob[pl][n], ret); \ 1091 td->uveob[pl][n] = ret; \ 1103 for (pl = 0; pl < 2; pl++) {
1142 int row_and_7,
int col_and_7,
1143 int w,
int h,
int col_end,
int row_end,
1146 static const unsigned wide_filter_col_mask[2] = { 0x11, 0x01 };
1147 static const unsigned wide_filter_row_mask[2] = { 0x03, 0x07 };
1159 if (tx ==
TX_4X4 && (ss_v | ss_h)) {
1174 if (tx ==
TX_4X4 && !skip_inter) {
1175 int t = 1 << col_and_7, m_col = (t <<
w) - t, y;
1177 int m_row_8 = m_col & wide_filter_col_mask[ss_h], m_row_4 = m_col - m_row_8;
1179 for (y = row_and_7; y < h + row_and_7; y++) {
1180 int col_mask_id = 2 - !(y & wide_filter_row_mask[ss_v]);
1182 mask[0][y][1] |= m_row_8;
1183 mask[0][y][2] |= m_row_4;
1194 if ((ss_h & ss_v) && (col_end & 1) && (y & 1)) {
1195 mask[1][y][col_mask_id] |= (t << (w - 1)) - t;
1197 mask[1][y][col_mask_id] |= m_col;
1200 mask[0][y][3] |= m_col;
1202 if (ss_h && (col_end & 1))
1203 mask[1][y][3] |= (t << (w - 1)) - t;
1205 mask[1][y][3] |= m_col;
1209 int y, t = 1 << col_and_7, m_col = (t <<
w) - t;
1212 int mask_id = (tx ==
TX_8X8);
1213 int l2 = tx + ss_h - 1, step1d;
1214 static const unsigned masks[4] = { 0xff, 0x55, 0x11, 0x01 };
1215 int m_row = m_col & masks[l2];
1219 if (ss_h && tx >
TX_8X8 && (w ^ (w - 1)) == 1) {
1220 int m_row_16 = ((t << (w - 1)) - t) & masks[l2];
1221 int m_row_8 = m_row - m_row_16;
1223 for (y = row_and_7; y < h + row_and_7; y++) {
1224 mask[0][y][0] |= m_row_16;
1225 mask[0][y][1] |= m_row_8;
1228 for (y = row_and_7; y < h + row_and_7; y++)
1229 mask[0][y][mask_id] |= m_row;
1234 if (ss_v && tx >
TX_8X8 && (h ^ (h - 1)) == 1) {
1235 for (y = row_and_7; y < h + row_and_7 - 1; y += step1d)
1236 mask[1][y][0] |= m_col;
1237 if (y - row_and_7 == h - 1)
1238 mask[1][y][1] |= m_col;
1240 for (y = row_and_7; y < h + row_and_7; y += step1d)
1241 mask[1][y][mask_id] |= m_col;
1243 }
else if (tx !=
TX_4X4) {
1246 mask_id = (tx ==
TX_8X8) || (h == ss_v);
1247 mask[1][row_and_7][mask_id] |= m_col;
1248 mask_id = (tx ==
TX_8X8) || (w == ss_h);
1249 for (y = row_and_7; y < h + row_and_7; y++)
1250 mask[0][y][mask_id] |= t;
1252 int t8 = t & wide_filter_col_mask[ss_h],
t4 = t -
t8;
1254 for (y = row_and_7; y < h + row_and_7; y++) {
1258 mask[1][row_and_7][2 - !(row_and_7 & wide_filter_row_mask[ss_v])] |= m_col;
1264 VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff,
1280 td->
min_mv.
x = -(128 + col * 64);
1281 td->
min_mv.
y = -(128 + row * 64);
1290 b->
uvtx = b->tx - ((s->
ss_h && w4 * 2 == (1 << b->tx)) ||
1291 (s->
ss_v && h4 * 2 == (1 << b->tx)));
1296 if (bytesperpixel == 1) {
1307 int row7 = td->
row7;
1309 #define SPLAT_ZERO_CTX(v, n) \ 1311 case 1: v = 0; break; \ 1312 case 2: AV_ZERO16(&v); break; \ 1313 case 4: AV_ZERO32(&v); break; \ 1314 case 8: AV_ZERO64(&v); break; \ 1315 case 16: AV_ZERO128(&v); break; \ 1317 #define SPLAT_ZERO_YUV(dir, var, off, n, dir2) \ 1319 SPLAT_ZERO_CTX(dir##_y_##var[off * 2], n * 2); \ 1320 if (s->ss_##dir2) { \ 1321 SPLAT_ZERO_CTX(dir##_uv_##var[0][off], n); \ 1322 SPLAT_ZERO_CTX(dir##_uv_##var[1][off], n); \ 1324 SPLAT_ZERO_CTX(dir##_uv_##var[0][off * 2], n * 2); \ 1325 SPLAT_ZERO_CTX(dir##_uv_##var[1][off * 2], n * 2); \ 1345 s->
td[0].
block += w4 * h4 * 64 * bytesperpixel;
1348 s->
td[0].
eob += 4 * w4 * h4;
1359 emu[0] = (col + w4) * 8 * bytesperpixel > f->
linesize[0] ||
1360 (row + h4) > s->
rows;
1361 emu[1] = ((col + w4) * 8 >> s->
ss_h) * bytesperpixel > f->
linesize[1] ||
1362 (row + h4) > s->
rows;
1367 td->
dst[0] = f->
data[0] + yoff;
1375 td->
dst[1] = f->
data[1] + uvoff;
1376 td->
dst[2] = f->
data[2] + uvoff;
1380 if (s->
s.
h.
bpp > 8) {
1386 if (s->
s.
h.
bpp > 8) {
1395 for (
n = 0; o <
w;
n++) {
1401 td->
tmp_y + o * bytesperpixel, 128,
h, 0, 0);
1410 for (
n = s->
ss_h; o < w;
n++) {
1416 td->
tmp_uv[0] + o * bytesperpixel, 128,
h, 0, 0);
1418 td->
tmp_uv[1] + o * bytesperpixel, 128,
h, 0, 0);
1432 mask_edges(lflvl->
mask[0], 0, 0, row7, col7, x_end, y_end, 0, 0, b->tx, skip_inter);
1437 b->
uvtx, skip_inter);
1442 s->
td[0].
block += w4 * h4 * 64 * bytesperpixel;
1445 s->
td[0].
eob += 4 * w4 * h4;
vp9_mc_func mc[5][N_FILTERS][2][2][2]
uint8_t left_uv_nnz_ctx[2][16]
const char const char void * val
uint8_t * segmentation_map
unsigned single_ref[5][2][2]
static int decode_coeffs_b32_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
This structure describes decoded (raw) audio or video data.
VP5 and VP6 compatible video decoder (common features)
static int decode_coeffs_16bpp(VP9TileData *td)
static av_always_inline int vp8_rac_get_tree(VP56RangeCoder *c, const int8_t(*tree)[2], const uint8_t *probs)
static void decode_mode(VP9TileData *td)
static int decode_coeffs_b32_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
unsigned coef[4][2][2][6][6][3]
uint8_t left_segpred_ctx[8]
void ff_thread_await_progress(ThreadFrame *f, int n, int field)
Wait for earlier decoding threads to finish reference pictures.
const uint8_t ff_vp9_default_kf_ymode_probs[10][10][9]
uint8_t left_mode_ctx[16]
static av_always_inline void setctx_2d(uint8_t *ptr, int w, int h, ptrdiff_t stride, int v)
void ff_vp9_intra_recon_8bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
uint8_t left_intra_ctx[8]
const uint8_t ff_vp9_default_kf_uvmode_probs[10][9]
uint8_t coef[4][2][2][6][6][3]
struct VP9Context::@155 prob
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
void ff_vp9_inter_recon_8bpp(VP9TileData *td)
void ff_vp9_intra_recon_16bpp(VP9TileData *td, ptrdiff_t y_off, ptrdiff_t uv_off)
#define SET_CTXS(perf, dir, off, n)
static av_always_inline void mask_edges(uint8_t(*mask)[8][4], int ss_h, int ss_v, int row_and_7, int col_and_7, int w, int h, int col_end, int row_end, enum TxfmMode tx, int skip_inter)
static av_always_inline int decode_coeffs(VP9TileData *td, int is8bitsperpixel)
const int16_t *const ff_vp9_scans[5][4]
uint8_t * above_uv_nnz_ctx[2]
#define DECODE_Y_COEF_LOOP(step, mode_index, v)
static int decode_coeffs_b_16bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
uint8_t left_filter_ctx[8]
static const uint16_t mask[17]
const int8_t ff_vp9_intramode_tree[9][2]
#define STORE_COEF(c, i, v)
simple assert() macros that are a bit more flexible than ISO C assert().
void ff_vp9_inter_recon_16bpp(VP9TileData *td)
uint8_t * above_filter_ctx
uint8_t left_y_nnz_ctx[16]
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
const int8_t ff_vp9_inter_mode_tree[3][2]
static av_always_inline int decode_coeffs_b_generic(VP56RangeCoder *c, int16_t *coef, int n_coeffs, int is_tx32x32, int is8bitsperpixel, int bpp, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
const int8_t ff_vp9_segmentation_tree[7][2]
enum FilterMode ff_vp9_filter_lut[3]
#define vp56_rac_get_prob
struct VP9TileData::@156 counts
uint8_t * above_segpred_ctx
static const float pred[4]
static const int8_t mv[256][2]
static av_always_inline int vp56_rac_get_prob_branchy(VP56RangeCoder *c, int prob)
struct VP9TileData::@157 max_mv
VP56mv(* above_mv_ctx)[2]
Libavcodec external API header.
static int decode_coeffs_8bpp(VP9TileData *td)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
uint8_t * above_y_nnz_ctx
static int decode_coeffs_b_8bpp(VP9TileData *td, int16_t *coef, int n_coeffs, unsigned(*cnt)[6][3], unsigned(*eob)[6][2], uint8_t(*p)[6][11], int nnz, const int16_t *scan, const int16_t(*nb)[2], const int16_t *band_counts, int16_t *qmul)
#define SPLAT_CTX(var, val, n)
#define MERGE_CTX(step, rd)
void ff_vp9_fill_mv(VP9TileData *td, VP56mv *mv, int mode, int sb)
uint8_t tmp_uv[2][64 *64 *2]
uint8_t * above_intra_ctx
const uint8_t ff_vp9_bwh_tab[2][N_BS_SIZES][2]
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
#define DECODE_UV_COEF_LOOP(step, v)
void ff_vp9_decode_block(VP9TileData *td, int row, int col, VP9Filter *lflvl, ptrdiff_t yoff, ptrdiff_t uvoff, enum BlockLevel bl, enum BlockPartition bp)
unsigned eob[4][2][2][6][6][2]
common internal api header.
static av_always_inline int vp8_rac_get(VP56RangeCoder *c)
Core video DSP helper functions.
#define SPLAT_ZERO_YUV(dir, var, off, n, dir2)
const int8_t ff_vp9_filter_tree[2][2]
const int16_t(*const [5][4] ff_vp9_scans_nb)[2]
VP56mv left_mv_ctx[16][2]
struct VP9TileData::@157 min_mv