33 for (i=0; i<n/2; i++) {
34 ((uint16_t *)dst)[i] =
AV_RB16(src+2*i);
43 int buf_size = avpkt->
size;
46 int i, j, k,
n, linesize,
h, upgrade = 0, is_mono = 0;
48 int components, sample_len, ret;
100 n = avctx->
width * 2;
105 n = avctx->
width * 2;
112 n = avctx->
width * 4;
120 n = (avctx->
width + 7) >> 3;
129 if(s->
type < 4 || (is_mono && s->
type==7)){
130 for (i=0; i<avctx->
height; i++) {
133 for(j=0; j<avctx->
width * components; j++){
146 for (k = 0; k < 5 && c <= 9; k += 1) {
155 if (sample_len == 16) {
156 ((uint16_t*)ptr)[j] = (((1<<sample_len)-1)*v + (s->
maxval>>1))/s->
maxval;
160 if (sample_len != 16)
165 for (i = 0; i < avctx->
height; i++) {
168 else if (upgrade == 1) {
169 unsigned int j, f = (255 * 128 + s->
maxval / 2) / s->
maxval;
170 for (j = 0; j <
n; j++)
172 }
else if (upgrade == 2) {
173 unsigned int j, v, f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
174 for (j = 0; j < n / 2; j++) {
176 ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
188 unsigned char *ptr1, *ptr2;
197 for (i = 0; i < avctx->
height; i++) {
206 for (i = 0; i <
h; i++) {
218 uint16_t *ptr1, *ptr2;
219 const int f = (65535 * 32768 + s->
maxval / 2) / s->
maxval;
222 n = avctx->
width * 2;
227 for (i = 0; i < avctx->
height; i++) {
228 for (j = 0; j < n / 2; j++) {
230 ((uint16_t *)ptr)[j] = (v * f + 16384) >> 15;
235 ptr1 = (uint16_t*)p->
data[1];
236 ptr2 = (uint16_t*)p->
data[2];
239 for (i = 0; i <
h; i++) {
240 for (j = 0; j < n / 2; j++) {
242 ptr1[j] = (v * f + 16384) >> 15;
246 for (j = 0; j < n / 2; j++) {
248 ptr2[j] = (v * f + 16384) >> 15;
264 #if CONFIG_PGM_DECODER 276 #if CONFIG_PGMYUV_DECODER 288 #if CONFIG_PPM_DECODER 300 #if CONFIG_PBM_DECODER 312 #if CONFIG_PAM_DECODER
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
This structure describes decoded (raw) audio or video data.
static void put_bits(Jpeg2000EncoderContext *s, int val, int n)
put n times val bit
packed RGB 8:8:8, 24bpp, RGBRGB...
#define AV_PIX_FMT_RGBA64
int maxval
maximum value of a pixel
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
AVCodec ff_pgmyuv_decoder
int bits_per_raw_sample
Bits per sample/pixel of internal libavcodec pixel/sample format.
static void decode(AVCodecContext *dec_ctx, AVPacket *pkt, AVFrame *frame, FILE *outfile)
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
const char * name
Name of the codec implementation.
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
enum AVPictureType pict_type
Picture type of the frame.
#define AV_PIX_FMT_GRAY16
int width
picture width / height.
#define AV_PIX_FMT_YUV420P16
Libavcodec external API header.
int ff_pnm_decode_header(AVCodecContext *avctx, PNMContext *const s)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
main external API structure.
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
#define AV_PIX_FMT_YUV420P10
#define AV_PIX_FMT_YUV420P9
static void samplecpy(uint8_t *dst, const uint8_t *src, int n, int maxval)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
static int pnm_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb...
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
common internal api header.
static void flush_put_bits(PutBitContext *s)
Pad the end of the output stream with zeros.
Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb...
static void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size)
Initialize the PutBitContext s.
uint8_t * bytestream_start
int key_frame
1 -> keyframe, 0-> not
This structure stores compressed data.
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.