33 #define HEADER_SIZE 27 35 #define MODEL2_SCALE 13 36 #define MODEL_SCALE 15 37 #define MODEL256_SEC_SCALE 9 45 typedef struct Model {
46 int weights[16], freqs[16];
53 int weights[256], freqs[256];
60 #define RAC_BOTTOM 0x01000000 182 m->
freqs[i] = sum * scale >> 16;
197 for (i = 0; i < m->
num_syms - 1; i++)
230 for (i = 0; i < 256; i++) {
237 for (i = 0; i < 256; i++) {
238 m->
freqs[i] = sum * scale >> 16;
244 while (sidx < m->sec_size)
257 for (i = 0; i < 255; i++)
284 for (i = 0; i <
FFMIN(size, 4); i++)
286 c->
range = 0xFFFFFFFF;
297 }
else if (!c->
low) {
341 bit = (c->
low >= helper);
361 unsigned prob, prob2, helper;
371 if (helper <= c->low) {
378 end = (end2 +
val) >> 1;
379 }
while (end != val);
381 c->
range = prob2 - prob;
395 unsigned prob, prob2, helper;
404 end = start = m->
secondary[ssym + 1] + 1;
405 while (end > val + 1) {
406 ssym = (end +
val) >> 1;
407 if (m->
freqs[ssym] <= helper) {
411 end = (end +
val) >> 1;
420 c->
range = prob2 - prob;
461 for (i = 0; i < block_size; i++, dst +=
stride)
462 memset(dst, fc->
fill_val, block_size);
475 for (i = 0; i < vec_size; i++)
479 memset(prev_line, 0,
sizeof(prev_line));
481 for (j = 0; j < block_size; j++) {
484 for (i = 0; i < block_size; i++) {
502 int skip,
val, sign, pos = 1, zz_pos,
dc;
505 memset(block, 0,
sizeof(*block) * 64);
527 block[0] = dc * bc->
qmat[0];
554 block[zz_pos] = val * bc->
qmat[zz_pos];
558 return pos == 64 ? 0 : -1;
563 int *
block,
int mb_x,
int mb_y)
567 int nblocks = block_size >> 3;
572 for (j = 0; j < nblocks; j++) {
573 for (i = 0; i < nblocks; i++) {
574 if (
decode_dct(c, bc, block, bx + i, by + j)) {
586 int block_size,
int *
block)
588 const int hsize = block_size >> 1;
592 for (j = 0; j < block_size; j++) {
593 for (i = 0; i < block_size; i++) {
594 if (i < hsize && j < hsize)
598 block[i] *= hc->
scale;
602 block -= block_size * block_size;
604 for (j = 0; j < hsize; j++) {
605 for (i = 0; i < hsize; i++) {
607 B = block[i + hsize];
608 C = block[i + hsize * block_size];
609 D = block[i + hsize * block_size + hsize];
615 dst[i * 2] = av_clip_uint8(t1 - t2);
616 dst[i * 2 +
stride] = av_clip_uint8(t1 + t2);
617 dst[i * 2 + 1] = av_clip_uint8(t3 - t4);
618 dst[i * 2 + 1 +
stride] = av_clip_uint8(t3 + t4);
629 for (i = 0; i < 3; i++) {
631 for (j = 0; j < 5; j++)
638 for (j = 0; j < 125; j++)
664 for (i = 0; i < 3; i++) {
665 for (j = 0; j < 5; j++)
671 for (j = 0; j < 125; j++)
684 int buf_size = avpkt->
size;
689 int dec_width, dec_height, dec_x, dec_y, quality, keyframe;
690 int x, y, i, mb_width, mb_height, blk_size, btype;
695 "Frame should have at least %d bytes, got %d instead\n",
701 keyframe = bytestream2_get_be32(&gb);
702 if (keyframe & ~0x301) {
706 keyframe = !(keyframe & 1);
708 dec_x = bytestream2_get_be16(&gb);
709 dec_y = bytestream2_get_be16(&gb);
710 dec_width = bytestream2_get_be16(&gb);
711 dec_height = bytestream2_get_be16(&gb);
713 if (dec_x + dec_width > avctx->
width ||
714 dec_y + dec_height > avctx->
height ||
715 (dec_width | dec_height) & 0xF) {
717 dec_width, dec_height, dec_x, dec_y);
721 quality = bytestream2_get_byte(&gb);
722 if (quality < 1 || quality > 100) {
752 mb_width = dec_width >> 4;
753 mb_height = dec_height >> 4;
757 for (y = 0; y < mb_height; y++) {
758 for (x = 0; x < mb_width; x++) {
759 for (i = 0; i < 3; i++) {
766 dst[i] + x * blk_size,
771 dst[i] + x * blk_size,
776 dst[i] + x * blk_size,
782 dst[i] + x * blk_size,
814 for (i = 0; i < 3; i++)
827 if ((avctx->
width & 0xF) || (avctx->
height & 0xF)) {
829 "Image dimensions should be a multiple of 16.\n");
834 for (i = 0; i < 3; i++) {
835 int b_width = avctx->
width >> (2 + !!i);
836 int b_height = avctx->
height >> (2 + !!i);
const char const char void * val
void ff_mss34_gen_quant_mat(uint16_t *qmat, int quality, int luma)
Generate quantisation matrix for given quality.
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
static void decode_image_block(RangeCoder *c, ImageBlockCoder *ic, uint8_t *dst, ptrdiff_t stride, int block_size)
static int decode_block_type(RangeCoder *c, BlockTypeContext *bt)
This structure describes decoded (raw) audio or video data.
#define MODEL256_SEC_SCALE
static void model2_reset(Model2 *m)
static av_cold int init(AVCodecContext *avctx)
ImageBlockCoder image_coder[3]
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
static av_always_inline void bytestream2_init(GetByteContext *g, const uint8_t *buf, int buf_size)
int16_t weights[MODEL_MAX_SYMS+1]
static void decode_haar_block(RangeCoder *c, HaarBlockCoder *hc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block)
static int mss3_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
static av_cold int end(AVCodecContext *avctx)
static void rac_normalise(RangeCoder *c)
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
static int decode_dct(RangeCoder *c, DCTBlockCoder *bc, int *block, int bx, int by)
#define u(width, name, range_min, range_max)
static void reset_coders(MSS3Context *ctx, int quality)
static void rac_init(RangeCoder *c, const uint8_t *src, int size)
static void model256_update(Model256 *m, int val)
static av_cold void model_init(Model *m, int num_syms)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
static av_always_inline void bytestream2_skip(GetByteContext *g, unsigned int size)
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
DCTBlockCoder dct_coder[3]
static av_always_inline unsigned int bytestream2_get_bytes_left(GetByteContext *g)
const char * name
Name of the codec implementation.
static av_cold void model256_init(Model256 *m)
HaarBlockCoder haar_coder[3]
void ff_mss34_dct_put(uint8_t *dst, ptrdiff_t stride, int *block)
Transform and output DCT block.
static const uint16_t fc[]
enum AVPictureType pict_type
Picture type of the frame.
static av_cold int mss3_decode_init(AVCodecContext *avctx)
int width
picture width / height.
static void decode_dct_block(RangeCoder *c, DCTBlockCoder *bc, uint8_t *dst, ptrdiff_t stride, int block_size, int *block, int mb_x, int mb_y)
static int rac_get_bits(RangeCoder *c, int nbits)
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
static void model_reset(Model *m)
static int rac_get_model256_sym(RangeCoder *c, Model256 *m)
Libavcodec external API header.
static int rac_get_model_sym(RangeCoder *c, Model *m)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
static void model_update(Model *m, int val)
static av_cold void init_coders(MSS3Context *ctx)
static void decode_fill_block(RangeCoder *c, FillBlockCoder *fc, uint8_t *dst, ptrdiff_t stride, int block_size)
static int rac_get_bit(RangeCoder *c)
static void model256_reset(Model256 *m)
const uint8_t ff_zigzag_direct[64]
FillBlockCoder fill_coder[3]
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> dc
static av_cold int mss3_decode_end(AVCodecContext *avctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static void model2_update(Model2 *m, int bit)
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static int decode_coeff(RangeCoder *c, Model *m)
int key_frame
1 -> keyframe, 0-> not
BlockTypeContext btype[3]
static int rac_get_model2_sym(RangeCoder *c, Model2 *m)
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.