42 const uint8_t *obmc1= obmc + y*obmc_stride;
43 const uint8_t *obmc2= obmc1+ (obmc_stride>>1);
44 const uint8_t *obmc3= obmc1+ obmc_stride*(obmc_stride>>1);
48 int v= obmc1[x] * block[3][x + y*src_stride]
49 +obmc2[x] * block[2][x + y*src_stride]
50 +obmc3[x] * block[1][x + y*src_stride]
51 +obmc4[x] * block[0][x + y*src_stride];
60 if(v&(~255)) v= ~(v>>31);
61 dst8[x + y*src_stride] = v;
83 for (i = 0; frame->
data[i]; i++) {
97 int plane_index,
level, orientation;
99 for(plane_index=0; plane_index<3; plane_index++){
101 for(orientation=level ? 1:0; orientation<4; orientation++){
129 for(i=0; i<
QROOT; i++){
131 v *= pow(2, 1.0 / QROOT);
146 static const uint8_t brane[256]={
147 0x00,0x01,0x01,0x01,0x01,0x01,0x01,0x01,0x11,0x12,0x12,0x12,0x12,0x12,0x12,0x12,
148 0x04,0x05,0xcc,0xcc,0xcc,0xcc,0xcc,0x41,0x15,0x16,0xcc,0xcc,0xcc,0xcc,0xcc,0x52,
149 0x04,0xcc,0x05,0xcc,0xcc,0xcc,0x41,0xcc,0x15,0xcc,0x16,0xcc,0xcc,0xcc,0x52,0xcc,
150 0x04,0xcc,0xcc,0x05,0xcc,0x41,0xcc,0xcc,0x15,0xcc,0xcc,0x16,0xcc,0x52,0xcc,0xcc,
151 0x04,0xcc,0xcc,0xcc,0x41,0xcc,0xcc,0xcc,0x15,0xcc,0xcc,0xcc,0x16,0xcc,0xcc,0xcc,
152 0x04,0xcc,0xcc,0x41,0xcc,0x05,0xcc,0xcc,0x15,0xcc,0xcc,0x52,0xcc,0x16,0xcc,0xcc,
153 0x04,0xcc,0x41,0xcc,0xcc,0xcc,0x05,0xcc,0x15,0xcc,0x52,0xcc,0xcc,0xcc,0x16,0xcc,
154 0x04,0x41,0xcc,0xcc,0xcc,0xcc,0xcc,0x05,0x15,0x52,0xcc,0xcc,0xcc,0xcc,0xcc,0x16,
155 0x44,0x45,0x45,0x45,0x45,0x45,0x45,0x45,0x55,0x56,0x56,0x56,0x56,0x56,0x56,0x56,
156 0x48,0x49,0xcc,0xcc,0xcc,0xcc,0xcc,0x85,0x59,0x5A,0xcc,0xcc,0xcc,0xcc,0xcc,0x96,
157 0x48,0xcc,0x49,0xcc,0xcc,0xcc,0x85,0xcc,0x59,0xcc,0x5A,0xcc,0xcc,0xcc,0x96,0xcc,
158 0x48,0xcc,0xcc,0x49,0xcc,0x85,0xcc,0xcc,0x59,0xcc,0xcc,0x5A,0xcc,0x96,0xcc,0xcc,
159 0x48,0xcc,0xcc,0xcc,0x49,0xcc,0xcc,0xcc,0x59,0xcc,0xcc,0xcc,0x96,0xcc,0xcc,0xcc,
160 0x48,0xcc,0xcc,0x85,0xcc,0x49,0xcc,0xcc,0x59,0xcc,0xcc,0x96,0xcc,0x5A,0xcc,0xcc,
161 0x48,0xcc,0x85,0xcc,0xcc,0xcc,0x49,0xcc,0x59,0xcc,0x96,0xcc,0xcc,0xcc,0x5A,0xcc,
162 0x48,0x85,0xcc,0xcc,0xcc,0xcc,0xcc,0x49,0x59,0x96,0xcc,0xcc,0xcc,0xcc,0xcc,0x5A,
165 static const uint8_t needs[16]={
175 int16_t *tmpI= tmpIt;
179 r= brane[dx + 16*dy]&15;
180 l= brane[dx + 16*dy]>>4;
182 b= needs[l] | needs[
r];
188 for(x=0; x < b_w; x++){
189 int a_1=src[x + HTAPS_MAX/2-4];
190 int a0= src[x + HTAPS_MAX/2-3];
191 int a1= src[x + HTAPS_MAX/2-2];
192 int a2= src[x + HTAPS_MAX/2-1];
193 int a3= src[x + HTAPS_MAX/2+0];
194 int a4= src[x + HTAPS_MAX/2+1];
195 int a5= src[x + HTAPS_MAX/2+2];
196 int a6= src[x + HTAPS_MAX/2+3];
199 am= 20*(a2+
a3) - 5*(a1+a4) + (a0+
a5);
208 if(am&(~255)) am= ~(am>>31);
221 for(y=0; y < b_h; y++){
222 for(x=0; x < b_w+1; x++){
233 am= (20*(a2+
a3) - 5*(a1+a4) + (a0+
a5) + 16)>>5;
237 if(am&(~255)) am= ~(am>>31);
249 for(y=0; y < b_h; y++){
250 for(x=0; x < b_w; x++){
261 am= (20*(a2+
a3) - 5*(a1+a4) + (a0+
a5) + 512)>>10;
264 if(am&(~255)) am= ~(am>>31);
278 hpel[ 6]= tmp2t[1] + 1;
281 hpel[ 9]= hpel[1] + 64;
282 hpel[10]= hpel[8] + 1;
284 #define MC_STRIDE(x) (needs[x] ? 64 : stride) 287 int dxy = dx / 8 + dy / 8 * 4;
289 const uint8_t *src2 = hpel[dxy + 1];
290 const uint8_t *src3 = hpel[dxy + 4];
291 const uint8_t *src4 = hpel[dxy + 5];
298 for(y=0; y < b_h; y++){
299 for(x=0; x < b_w; x++){
300 dst[x]= ((8-dx)*(8-dy)*src1[x] + dx*(8-dy)*src2[x]+
301 (8-dx)* dy *src3[x] + dx* dy *src4[x]+32)>>6;
314 int a= weight[((dx&7) + (8*(dy&7)))];
316 for(y=0; y < b_h; y++){
317 for(x=0; x < b_w; x++){
318 dst[x]= (a*src1[x] + b*src2[x] + 4)>>3;
327 void ff_snow_pred_block(
SnowContext *
s,
uint8_t *dst,
uint8_t *
tmp, ptrdiff_t
stride,
int sx,
int sy,
int b_w,
int b_h,
const BlockNode *
block,
int plane_index,
int w,
int h){
330 const unsigned color = block->
color[plane_index];
331 const unsigned color4 = color*0x01010101;
333 for(y=0; y < b_h; y++){
334 *(uint32_t*)&dst[0 + y*stride]= color4;
335 *(uint32_t*)&dst[4 + y*stride]= color4;
336 *(uint32_t*)&dst[8 + y*stride]= color4;
337 *(uint32_t*)&dst[12+ y*stride]= color4;
338 *(uint32_t*)&dst[16+ y*stride]= color4;
339 *(uint32_t*)&dst[20+ y*stride]= color4;
340 *(uint32_t*)&dst[24+ y*stride]= color4;
341 *(uint32_t*)&dst[28+ y*stride]= color4;
344 for(y=0; y < b_h; y++){
345 *(uint32_t*)&dst[0 + y*stride]= color4;
346 *(uint32_t*)&dst[4 + y*stride]= color4;
347 *(uint32_t*)&dst[8 + y*stride]= color4;
348 *(uint32_t*)&dst[12+ y*stride]= color4;
351 for(y=0; y < b_h; y++){
352 *(uint32_t*)&dst[0 + y*stride]= color4;
353 *(uint32_t*)&dst[4 + y*stride]= color4;
356 for(y=0; y < b_h; y++){
357 *(uint32_t*)&dst[0 + y*stride]= color4;
360 for(y=0; y < b_h; y++){
361 for(x=0; x < b_w; x++){
369 int mx= block->
mx*scale;
370 int my= block->
my*scale;
373 const int tab_index= 3 - (b_w>>2) + (b_w>>4);
388 av_assert2((tab_index>=0 && tab_index<4) || b_w==32);
390 || !(b_w == b_h || 2*b_w == b_h || b_w == 2*b_h)
395 mc_block(&s->
plane[plane_index], dst, src, stride, b_w, b_h, dx, dy);
398 for(y=0; y<b_h; y+=16){
415 #define mca(dx,dy,b_w)\ 416 static void mc_block_hpel ## dx ## dy ## b_w(uint8_t *dst, const uint8_t *src, ptrdiff_t stride, int h){\ 418 mc_block(NULL, dst, src-(HTAPS_MAX/2-1)-(HTAPS_MAX/2-1)*stride, stride, b_w, b_w, dx, dy);\ 446 s->qdsp.put_qpel_pixels_tab [0][dy+dx/4]=\ 447 s->qdsp.put_no_rnd_qpel_pixels_tab[0][dy+dx/4]=\ 448 s->h264qpel.put_h264_qpel_pixels_tab[0][dy+dx/4];\ 449 s->qdsp.put_qpel_pixels_tab [1][dy+dx/4]=\ 450 s->qdsp.put_no_rnd_qpel_pixels_tab[1][dy+dx/4]=\ 451 s->h264qpel.put_h264_qpel_pixels_tab[1][dy+dx/4]; 471 s->hdsp.put_pixels_tab [0][dy/4+dx/8]=\ 472 s->hdsp.put_no_rnd_pixels_tab[0][dy/4+dx/8]=\ 473 mc_block_hpel ## dx ## dy ## 16;\ 474 s->hdsp.put_pixels_tab [1][dy/4+dx/8]=\ 475 s->hdsp.put_no_rnd_pixels_tab[1][dy/4+dx/8]=\ 476 mc_block_hpel ## dx ## dy ## 8; 516 int plane_index,
level, orientation;
517 int ret, emu_buf_size;
533 for(plane_index=0; plane_index < s->
nb_planes; plane_index++){
545 for(orientation=level ? 1 : 0; orientation<4; orientation++){
551 b->
width = (w + !(orientation&1))>>1;
552 b->
height= (h + !(orientation>1))>>1;
586 #define USE_HALFPEL_PLANE 0 601 if (!halfpel[1][p] || !halfpel[2][p] || !halfpel[3][p]) {
616 halfpel[1][p][i]= (20*(src[i] + src[i+1]) - 5*(src[i-1] + src[i+2]) + (src[i-2] + src[i+3]) + 16 )>>5;
623 halfpel[2][p][i]= (20*(src[i] + src[i+ls]) - 5*(src[i-ls] + src[i+2*ls]) + (src[i-2*ls] + src[i+3*ls]) + 16 )>>5;
631 halfpel[3][p][i]= (20*(src[i] + src[i+ls]) - 5*(src[i-ls] + src[i+2*ls]) + (src[i-2*ls] + src[i+3*ls]) + 16 )>>5;
695 int plane_index,
level, orientation, i;
722 for(plane_index=0; plane_index <
MAX_PLANES; plane_index++){
724 for(orientation=level ? 1 : 0; orientation<4; orientation++){
uint8_t * scratchpad
data area for the ME algo, so that the ME does not need to malloc/free.
av_cold void ff_me_cmp_init(MECmpContext *c, AVCodecContext *avctx)
av_cold void ff_videodsp_init(VideoDSPContext *ctx, int bpc)
const struct AVCodec * codec
int ff_snow_frame_start(SnowContext *s)
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
#define FF_ALLOCZ_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride, uint8_t **block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer *sb, int add, uint8_t *dst8)
#define BLOCK_INTRA
Intra block, inter otherwise.
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
uint32_t * score_map
map to store the scores
static const uint8_t obmc4[16]
int ff_snow_common_init_after_header(AVCodecContext *avctx)
void ff_snow_reset_contexts(SnowContext *s)
int av_codec_is_encoder(const AVCodec *codec)
void(* emulated_edge_mc)(uint8_t *dst, const uint8_t *src, ptrdiff_t dst_linesize, ptrdiff_t src_linesize, int block_w, int block_h, int src_x, int src_y, int w, int h)
Copy a rectangular area of samples to a temporary buffer and replicate the border samples...
uint8_t ref
Reference frame index.
#define av_assert0(cond)
assert() equivalent, that is always enabled.
av_cold void ff_dwt_init(SnowDWTContext *c)
int16_t mx
Motion vector component X, see mv_scale.
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
#define av_assert2(cond)
assert() equivalent, that does lie in speed critical code.
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
uint32_t * ref_scores[MAX_REF_FRAMES]
int16_t my
Motion vector component Y, see mv_scale.
void ff_snow_release_buffer(AVCodecContext *avctx)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
int16_t(*[MAX_REF_FRAMES] ref_mvs)[2]
static int halfpel_interpol(SnowContext *s, uint8_t *halfpel[4][4], AVFrame *frame)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
static const uint8_t offset[127][2]
qpel_mc_func put_h264_qpel_pixels_tab[4][16]
uint8_t * emu_edge_buffer
uint8_t color[3]
Color for intra.
av_cold void ff_hpeldsp_init(HpelDSPContext *c, int flags)
#define USE_HALFPEL_PLANE
SubBand band[DWT_LEVELS][4]
static void mc_block(Plane *p, uint8_t *dst, const uint8_t *src, int stride, int b_w, int b_h, int dx, int dy)
static av_cold void init_qexp(void)
int width
picture width / height.
uint8_t * halfpel_plane[MAX_REF_FRAMES][4][4]
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
uint8_t block_state[128+32 *128]
int ff_scale_mv_ref[MAX_REF_FRAMES][MAX_REF_FRAMES]
int ff_snow_get_buffer(SnowContext *s, AVFrame *frame)
Libavcodec external API header.
int spatial_decomposition_count
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int8_t hcoeff[HTAPS_MAX/2]
void ff_snow_pred_block(SnowContext *s, uint8_t *dst, uint8_t *tmp, ptrdiff_t stride, int sx, int sy, int b_w, int b_h, const BlockNode *block, int plane_index, int w, int h)
int ff_snow_alloc_blocks(SnowContext *s)
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
DWTELEM * temp_dwt_buffer
static int weight(int i, int blen, int offset)
int ff_snow_common_init(AVCodecContext *avctx)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
AVFrame * current_picture
#define MAX_DECOMPOSITIONS
#define FF_ALLOC_ARRAY_OR_GOTO(ctx, p, nelem, elsize, label)
common internal api header.
IDWTELEM * temp_idwt_buffer
#define FF_ALLOC_OR_GOTO(ctx, p, size, label)
#define slice_buffer_get_line(slice_buf, line_num)
DWTELEM * spatial_dwt_buffer
uint32_t * map
map to avoid duplicate evaluations
IDWTELEM * spatial_idwt_buffer
uint8_t * obmc_scratchpad
int key_frame
1 -> keyframe, 0-> not
av_cold void ff_snow_common_end(SnowContext *s)
#define av_malloc_array(a, b)
uint8_t type
Bitfield of BLOCK_*.
Used to minimize the amount of memory used in order to optimize cache performance.
AVFrame * last_picture[MAX_REF_FRAMES]
av_cold void ff_h264qpel_init(H264QpelContext *c, int bit_depth)
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
#define AV_CEIL_RSHIFT(a, b)
int stride_line
Stride measured in lines, not pixels.