FFmpeg  4.0
decode.c
Go to the documentation of this file.
1 /*
2  * generic decoding-related code
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include <stdint.h>
22 #include <string.h>
23 
24 #include "config.h"
25 
26 #if CONFIG_ICONV
27 # include <iconv.h>
28 #endif
29 
30 #include "libavutil/avassert.h"
31 #include "libavutil/avstring.h"
32 #include "libavutil/bprint.h"
33 #include "libavutil/common.h"
34 #include "libavutil/frame.h"
35 #include "libavutil/hwcontext.h"
36 #include "libavutil/imgutils.h"
37 #include "libavutil/internal.h"
38 #include "libavutil/intmath.h"
39 
40 #include "avcodec.h"
41 #include "bytestream.h"
42 #include "decode.h"
43 #include "hwaccel.h"
44 #include "internal.h"
45 #include "thread.h"
46 
47 static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
48 {
49  int size = 0, ret;
50  const uint8_t *data;
51  uint32_t flags;
52  int64_t val;
53 
55  if (!data)
56  return 0;
57 
58  if (!(avctx->codec->capabilities & AV_CODEC_CAP_PARAM_CHANGE)) {
59  av_log(avctx, AV_LOG_ERROR, "This decoder does not support parameter "
60  "changes, but PARAM_CHANGE side data was sent to it.\n");
61  ret = AVERROR(EINVAL);
62  goto fail2;
63  }
64 
65  if (size < 4)
66  goto fail;
67 
68  flags = bytestream_get_le32(&data);
69  size -= 4;
70 
72  if (size < 4)
73  goto fail;
74  val = bytestream_get_le32(&data);
75  if (val <= 0 || val > INT_MAX) {
76  av_log(avctx, AV_LOG_ERROR, "Invalid channel count");
77  ret = AVERROR_INVALIDDATA;
78  goto fail2;
79  }
80  avctx->channels = val;
81  size -= 4;
82  }
84  if (size < 8)
85  goto fail;
86  avctx->channel_layout = bytestream_get_le64(&data);
87  size -= 8;
88  }
90  if (size < 4)
91  goto fail;
92  val = bytestream_get_le32(&data);
93  if (val <= 0 || val > INT_MAX) {
94  av_log(avctx, AV_LOG_ERROR, "Invalid sample rate");
95  ret = AVERROR_INVALIDDATA;
96  goto fail2;
97  }
98  avctx->sample_rate = val;
99  size -= 4;
100  }
102  if (size < 8)
103  goto fail;
104  avctx->width = bytestream_get_le32(&data);
105  avctx->height = bytestream_get_le32(&data);
106  size -= 8;
107  ret = ff_set_dimensions(avctx, avctx->width, avctx->height);
108  if (ret < 0)
109  goto fail2;
110  }
111 
112  return 0;
113 fail:
114  av_log(avctx, AV_LOG_ERROR, "PARAM_CHANGE side data too small.\n");
115  ret = AVERROR_INVALIDDATA;
116 fail2:
117  if (ret < 0) {
118  av_log(avctx, AV_LOG_ERROR, "Error applying parameter changes.\n");
119  if (avctx->err_recognition & AV_EF_EXPLODE)
120  return ret;
121  }
122  return 0;
123 }
124 
126 {
127  int ret = 0;
128 
130  if (pkt) {
131  ret = av_packet_copy_props(avci->last_pkt_props, pkt);
132  if (!ret)
133  avci->last_pkt_props->size = pkt->size; // HACK: Needed for ff_decode_frame_props().
134  }
135  return ret;
136 }
137 
139 {
140  int ret;
141 
142  /* move the original frame to our backup */
143  av_frame_unref(avci->to_free);
144  av_frame_move_ref(avci->to_free, frame);
145 
146  /* now copy everything except the AVBufferRefs back
147  * note that we make a COPY of the side data, so calling av_frame_free() on
148  * the caller's frame will work properly */
149  ret = av_frame_copy_props(frame, avci->to_free);
150  if (ret < 0)
151  return ret;
152 
153  memcpy(frame->data, avci->to_free->data, sizeof(frame->data));
154  memcpy(frame->linesize, avci->to_free->linesize, sizeof(frame->linesize));
155  if (avci->to_free->extended_data != avci->to_free->data) {
156  int planes = avci->to_free->channels;
157  int size = planes * sizeof(*frame->extended_data);
158 
159  if (!size) {
160  av_frame_unref(frame);
161  return AVERROR_BUG;
162  }
163 
164  frame->extended_data = av_malloc(size);
165  if (!frame->extended_data) {
166  av_frame_unref(frame);
167  return AVERROR(ENOMEM);
168  }
169  memcpy(frame->extended_data, avci->to_free->extended_data,
170  size);
171  } else
172  frame->extended_data = frame->data;
173 
174  frame->format = avci->to_free->format;
175  frame->width = avci->to_free->width;
176  frame->height = avci->to_free->height;
177  frame->channel_layout = avci->to_free->channel_layout;
178  frame->nb_samples = avci->to_free->nb_samples;
179  frame->channels = avci->to_free->channels;
180 
181  return 0;
182 }
183 
184 static int bsfs_init(AVCodecContext *avctx)
185 {
186  AVCodecInternal *avci = avctx->internal;
187  DecodeFilterContext *s = &avci->filter;
188  const char *bsfs_str;
189  int ret;
190 
191  if (s->nb_bsfs)
192  return 0;
193 
194  bsfs_str = avctx->codec->bsfs ? avctx->codec->bsfs : "null";
195  while (bsfs_str && *bsfs_str) {
196  AVBSFContext **tmp;
197  const AVBitStreamFilter *filter;
198  char *bsf;
199 
200  bsf = av_get_token(&bsfs_str, ",");
201  if (!bsf) {
202  ret = AVERROR(ENOMEM);
203  goto fail;
204  }
205 
206  filter = av_bsf_get_by_name(bsf);
207  if (!filter) {
208  av_log(avctx, AV_LOG_ERROR, "A non-existing bitstream filter %s "
209  "requested by a decoder. This is a bug, please report it.\n",
210  bsf);
211  ret = AVERROR_BUG;
212  av_freep(&bsf);
213  goto fail;
214  }
215  av_freep(&bsf);
216 
217  tmp = av_realloc_array(s->bsfs, s->nb_bsfs + 1, sizeof(*s->bsfs));
218  if (!tmp) {
219  ret = AVERROR(ENOMEM);
220  goto fail;
221  }
222  s->bsfs = tmp;
223  s->nb_bsfs++;
224 
225  ret = av_bsf_alloc(filter, &s->bsfs[s->nb_bsfs - 1]);
226  if (ret < 0)
227  goto fail;
228 
229  if (s->nb_bsfs == 1) {
230  /* We do not currently have an API for passing the input timebase into decoders,
231  * but no filters used here should actually need it.
232  * So we make up some plausible-looking number (the MPEG 90kHz timebase) */
233  s->bsfs[s->nb_bsfs - 1]->time_base_in = (AVRational){ 1, 90000 };
235  avctx);
236  } else {
237  s->bsfs[s->nb_bsfs - 1]->time_base_in = s->bsfs[s->nb_bsfs - 2]->time_base_out;
238  ret = avcodec_parameters_copy(s->bsfs[s->nb_bsfs - 1]->par_in,
239  s->bsfs[s->nb_bsfs - 2]->par_out);
240  }
241  if (ret < 0)
242  goto fail;
243 
244  ret = av_bsf_init(s->bsfs[s->nb_bsfs - 1]);
245  if (ret < 0)
246  goto fail;
247  }
248 
249  return 0;
250 fail:
251  ff_decode_bsfs_uninit(avctx);
252  return ret;
253 }
254 
255 /* try to get one output packet from the filter chain */
256 static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
257 {
258  DecodeFilterContext *s = &avctx->internal->filter;
259  int idx, ret;
260 
261  /* start with the last filter in the chain */
262  idx = s->nb_bsfs - 1;
263  while (idx >= 0) {
264  /* request a packet from the currently selected filter */
265  ret = av_bsf_receive_packet(s->bsfs[idx], pkt);
266  if (ret == AVERROR(EAGAIN)) {
267  /* no packets available, try the next filter up the chain */
268  ret = 0;
269  idx--;
270  continue;
271  } else if (ret < 0 && ret != AVERROR_EOF) {
272  return ret;
273  }
274 
275  /* got a packet or EOF -- pass it to the caller or to the next filter
276  * down the chain */
277  if (idx == s->nb_bsfs - 1) {
278  return ret;
279  } else {
280  idx++;
281  ret = av_bsf_send_packet(s->bsfs[idx], ret < 0 ? NULL : pkt);
282  if (ret < 0) {
283  av_log(avctx, AV_LOG_ERROR,
284  "Error pre-processing a packet before decoding\n");
285  av_packet_unref(pkt);
286  return ret;
287  }
288  }
289  }
290 
291  return AVERROR(EAGAIN);
292 }
293 
295 {
296  AVCodecInternal *avci = avctx->internal;
297  int ret;
298 
299  if (avci->draining)
300  return AVERROR_EOF;
301 
302  ret = bsfs_poll(avctx, pkt);
303  if (ret == AVERROR_EOF)
304  avci->draining = 1;
305  if (ret < 0)
306  return ret;
307 
308  ret = extract_packet_props(avctx->internal, pkt);
309  if (ret < 0)
310  goto finish;
311 
312  ret = apply_param_change(avctx, pkt);
313  if (ret < 0)
314  goto finish;
315 
316  if (avctx->codec->receive_frame)
317  avci->compat_decode_consumed += pkt->size;
318 
319  return 0;
320 finish:
321  av_packet_unref(pkt);
322  return ret;
323 }
324 
325 /**
326  * Attempt to guess proper monotonic timestamps for decoded video frames
327  * which might have incorrect times. Input timestamps may wrap around, in
328  * which case the output will as well.
329  *
330  * @param pts the pts field of the decoded AVPacket, as passed through
331  * AVFrame.pts
332  * @param dts the dts field of the decoded AVPacket
333  * @return one of the input values, may be AV_NOPTS_VALUE
334  */
336  int64_t reordered_pts, int64_t dts)
337 {
338  int64_t pts = AV_NOPTS_VALUE;
339 
340  if (dts != AV_NOPTS_VALUE) {
342  ctx->pts_correction_last_dts = dts;
343  } else if (reordered_pts != AV_NOPTS_VALUE)
344  ctx->pts_correction_last_dts = reordered_pts;
345 
346  if (reordered_pts != AV_NOPTS_VALUE) {
347  ctx->pts_correction_num_faulty_pts += reordered_pts <= ctx->pts_correction_last_pts;
348  ctx->pts_correction_last_pts = reordered_pts;
349  } else if(dts != AV_NOPTS_VALUE)
350  ctx->pts_correction_last_pts = dts;
351 
353  && reordered_pts != AV_NOPTS_VALUE)
354  pts = reordered_pts;
355  else
356  pts = dts;
357 
358  return pts;
359 }
360 
361 /*
362  * The core of the receive_frame_wrapper for the decoders implementing
363  * the simple API. Certain decoders might consume partial packets without
364  * returning any output, so this function needs to be called in a loop until it
365  * returns EAGAIN.
366  **/
368 {
369  AVCodecInternal *avci = avctx->internal;
370  DecodeSimpleContext *ds = &avci->ds;
371  AVPacket *pkt = ds->in_pkt;
372  // copy to ensure we do not change pkt
373  int got_frame, actual_got_frame;
374  int ret;
375 
376  if (!pkt->data && !avci->draining) {
377  av_packet_unref(pkt);
378  ret = ff_decode_get_packet(avctx, pkt);
379  if (ret < 0 && ret != AVERROR_EOF)
380  return ret;
381  }
382 
383  // Some codecs (at least wma lossless) will crash when feeding drain packets
384  // after EOF was signaled.
385  if (avci->draining_done)
386  return AVERROR_EOF;
387 
388  if (!pkt->data &&
389  !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
391  return AVERROR_EOF;
392 
393  got_frame = 0;
394 
396  ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
397  } else {
398  ret = avctx->codec->decode(avctx, frame, &got_frame, pkt);
399 
401  frame->pkt_dts = pkt->dts;
402  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
403  if(!avctx->has_b_frames)
404  frame->pkt_pos = pkt->pos;
405  //FIXME these should be under if(!avctx->has_b_frames)
406  /* get_buffer is supposed to set frame parameters */
407  if (!(avctx->codec->capabilities & AV_CODEC_CAP_DR1)) {
408  if (!frame->sample_aspect_ratio.num) frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
409  if (!frame->width) frame->width = avctx->width;
410  if (!frame->height) frame->height = avctx->height;
411  if (frame->format == AV_PIX_FMT_NONE) frame->format = avctx->pix_fmt;
412  }
413  }
414  }
415  emms_c();
416  actual_got_frame = got_frame;
417 
418  if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
419  if (frame->flags & AV_FRAME_FLAG_DISCARD)
420  got_frame = 0;
421  if (got_frame)
423  frame->pts,
424  frame->pkt_dts);
425  } else if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
426  uint8_t *side;
427  int side_size;
428  uint32_t discard_padding = 0;
429  uint8_t skip_reason = 0;
430  uint8_t discard_reason = 0;
431 
432  if (ret >= 0 && got_frame) {
434  frame->pts,
435  frame->pkt_dts);
436  if (frame->format == AV_SAMPLE_FMT_NONE)
437  frame->format = avctx->sample_fmt;
438  if (!frame->channel_layout)
439  frame->channel_layout = avctx->channel_layout;
440  if (!frame->channels)
441  frame->channels = avctx->channels;
442  if (!frame->sample_rate)
443  frame->sample_rate = avctx->sample_rate;
444  }
445 
447  if(side && side_size>=10) {
449  discard_padding = AV_RL32(side + 4);
450  av_log(avctx, AV_LOG_DEBUG, "skip %d / discard %d samples due to side data\n",
451  avctx->internal->skip_samples, (int)discard_padding);
452  skip_reason = AV_RL8(side + 8);
453  discard_reason = AV_RL8(side + 9);
454  }
455 
456  if ((frame->flags & AV_FRAME_FLAG_DISCARD) && got_frame &&
457  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
458  avctx->internal->skip_samples = FFMAX(0, avctx->internal->skip_samples - frame->nb_samples);
459  got_frame = 0;
460  }
461 
462  if (avctx->internal->skip_samples > 0 && got_frame &&
463  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
464  if(frame->nb_samples <= avctx->internal->skip_samples){
465  got_frame = 0;
466  avctx->internal->skip_samples -= frame->nb_samples;
467  av_log(avctx, AV_LOG_DEBUG, "skip whole frame, skip left: %d\n",
468  avctx->internal->skip_samples);
469  } else {
471  frame->nb_samples - avctx->internal->skip_samples, avctx->channels, frame->format);
472  if(avctx->pkt_timebase.num && avctx->sample_rate) {
473  int64_t diff_ts = av_rescale_q(avctx->internal->skip_samples,
474  (AVRational){1, avctx->sample_rate},
475  avctx->pkt_timebase);
476  if(frame->pts!=AV_NOPTS_VALUE)
477  frame->pts += diff_ts;
478 #if FF_API_PKT_PTS
480  if(frame->pkt_pts!=AV_NOPTS_VALUE)
481  frame->pkt_pts += diff_ts;
483 #endif
484  if(frame->pkt_dts!=AV_NOPTS_VALUE)
485  frame->pkt_dts += diff_ts;
486  if (frame->pkt_duration >= diff_ts)
487  frame->pkt_duration -= diff_ts;
488  } else {
489  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for skipped samples.\n");
490  }
491  av_log(avctx, AV_LOG_DEBUG, "skip %d/%d samples\n",
492  avctx->internal->skip_samples, frame->nb_samples);
493  frame->nb_samples -= avctx->internal->skip_samples;
494  avctx->internal->skip_samples = 0;
495  }
496  }
497 
498  if (discard_padding > 0 && discard_padding <= frame->nb_samples && got_frame &&
499  !(avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL)) {
500  if (discard_padding == frame->nb_samples) {
501  got_frame = 0;
502  } else {
503  if(avctx->pkt_timebase.num && avctx->sample_rate) {
504  int64_t diff_ts = av_rescale_q(frame->nb_samples - discard_padding,
505  (AVRational){1, avctx->sample_rate},
506  avctx->pkt_timebase);
507  frame->pkt_duration = diff_ts;
508  } else {
509  av_log(avctx, AV_LOG_WARNING, "Could not update timestamps for discarded samples.\n");
510  }
511  av_log(avctx, AV_LOG_DEBUG, "discard %d/%d samples\n",
512  (int)discard_padding, frame->nb_samples);
513  frame->nb_samples -= discard_padding;
514  }
515  }
516 
517  if ((avctx->flags2 & AV_CODEC_FLAG2_SKIP_MANUAL) && got_frame) {
519  if (fside) {
520  AV_WL32(fside->data, avctx->internal->skip_samples);
521  AV_WL32(fside->data + 4, discard_padding);
522  AV_WL8(fside->data + 8, skip_reason);
523  AV_WL8(fside->data + 9, discard_reason);
524  avctx->internal->skip_samples = 0;
525  }
526  }
527  }
528 
529  if (avctx->codec->type == AVMEDIA_TYPE_AUDIO &&
531  ret >= 0 && ret != pkt->size && !(avctx->codec->capabilities & AV_CODEC_CAP_SUBFRAMES)) {
532  av_log(avctx, AV_LOG_WARNING, "Multiple frames in a packet.\n");
533  avci->showed_multi_packet_warning = 1;
534  }
535 
536  if (!got_frame)
537  av_frame_unref(frame);
538 
539  if (ret >= 0 && avctx->codec->type == AVMEDIA_TYPE_VIDEO && !(avctx->flags & AV_CODEC_FLAG_TRUNCATED))
540  ret = pkt->size;
541 
542 #if FF_API_AVCTX_TIMEBASE
543  if (avctx->framerate.num > 0 && avctx->framerate.den > 0)
544  avctx->time_base = av_inv_q(av_mul_q(avctx->framerate, (AVRational){avctx->ticks_per_frame, 1}));
545 #endif
546 
547  /* do not stop draining when actual_got_frame != 0 or ret < 0 */
548  /* got_frame == 0 but actual_got_frame != 0 when frame is discarded */
549  if (avctx->internal->draining && !actual_got_frame) {
550  if (ret < 0) {
551  /* prevent infinite loop if a decoder wrongly always return error on draining */
552  /* reasonable nb_errors_max = maximum b frames + thread count */
553  int nb_errors_max = 20 + (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME ?
554  avctx->thread_count : 1);
555 
556  if (avci->nb_draining_errors++ >= nb_errors_max) {
557  av_log(avctx, AV_LOG_ERROR, "Too many errors when draining, this is a bug. "
558  "Stop draining and force EOF.\n");
559  avci->draining_done = 1;
560  ret = AVERROR_BUG;
561  }
562  } else {
563  avci->draining_done = 1;
564  }
565  }
566 
567  avci->compat_decode_consumed += ret;
568 
569  if (ret >= pkt->size || ret < 0) {
570  av_packet_unref(pkt);
571  } else {
572  int consumed = ret;
573 
574  pkt->data += consumed;
575  pkt->size -= consumed;
576  avci->last_pkt_props->size -= consumed; // See extract_packet_props() comment.
577  pkt->pts = AV_NOPTS_VALUE;
578  pkt->dts = AV_NOPTS_VALUE;
581  }
582 
583  if (got_frame)
584  av_assert0(frame->buf[0]);
585 
586  return ret < 0 ? ret : 0;
587 }
588 
590 {
591  int ret;
592 
593  while (!frame->buf[0]) {
594  ret = decode_simple_internal(avctx, frame);
595  if (ret < 0)
596  return ret;
597  }
598 
599  return 0;
600 }
601 
603 {
604  AVCodecInternal *avci = avctx->internal;
605  int ret;
606 
607  av_assert0(!frame->buf[0]);
608 
609  if (avctx->codec->receive_frame)
610  ret = avctx->codec->receive_frame(avctx, frame);
611  else
612  ret = decode_simple_receive_frame(avctx, frame);
613 
614  if (ret == AVERROR_EOF)
615  avci->draining_done = 1;
616 
617  if (!ret) {
618  /* the only case where decode data is not set should be decoders
619  * that do not call ff_get_buffer() */
620  av_assert0((frame->private_ref && frame->private_ref->size == sizeof(FrameDecodeData)) ||
621  !(avctx->codec->capabilities & AV_CODEC_CAP_DR1));
622 
623  if (frame->private_ref) {
625 
626  if (fdd->post_process) {
627  ret = fdd->post_process(avctx, frame);
628  if (ret < 0) {
629  av_frame_unref(frame);
630  return ret;
631  }
632  }
633  }
634  }
635 
636  /* free the per-frame decode data */
637  av_buffer_unref(&frame->private_ref);
638 
639  return ret;
640 }
641 
643 {
644  AVCodecInternal *avci = avctx->internal;
645  int ret;
646 
647  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
648  return AVERROR(EINVAL);
649 
650  if (avctx->internal->draining)
651  return AVERROR_EOF;
652 
653  if (avpkt && !avpkt->size && avpkt->data)
654  return AVERROR(EINVAL);
655 
656  ret = bsfs_init(avctx);
657  if (ret < 0)
658  return ret;
659 
661  if (avpkt && (avpkt->data || avpkt->side_data_elems)) {
662  ret = av_packet_ref(avci->buffer_pkt, avpkt);
663  if (ret < 0)
664  return ret;
665  }
666 
667  ret = av_bsf_send_packet(avci->filter.bsfs[0], avci->buffer_pkt);
668  if (ret < 0) {
670  return ret;
671  }
672 
673  if (!avci->buffer_frame->buf[0]) {
674  ret = decode_receive_frame_internal(avctx, avci->buffer_frame);
675  if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
676  return ret;
677  }
678 
679  return 0;
680 }
681 
683 {
684  /* make sure we are noisy about decoders returning invalid cropping data */
685  if (frame->crop_left >= INT_MAX - frame->crop_right ||
686  frame->crop_top >= INT_MAX - frame->crop_bottom ||
687  (frame->crop_left + frame->crop_right) >= frame->width ||
688  (frame->crop_top + frame->crop_bottom) >= frame->height) {
689  av_log(avctx, AV_LOG_WARNING,
690  "Invalid cropping information set by a decoder: "
692  "(frame size %dx%d). This is a bug, please report it\n",
693  frame->crop_left, frame->crop_right, frame->crop_top, frame->crop_bottom,
694  frame->width, frame->height);
695  frame->crop_left = 0;
696  frame->crop_right = 0;
697  frame->crop_top = 0;
698  frame->crop_bottom = 0;
699  return 0;
700  }
701 
702  if (!avctx->apply_cropping)
703  return 0;
704 
705  return av_frame_apply_cropping(frame, avctx->flags & AV_CODEC_FLAG_UNALIGNED ?
707 }
708 
710 {
711  AVCodecInternal *avci = avctx->internal;
712  int ret;
713 
714  av_frame_unref(frame);
715 
716  if (!avcodec_is_open(avctx) || !av_codec_is_decoder(avctx->codec))
717  return AVERROR(EINVAL);
718 
719  ret = bsfs_init(avctx);
720  if (ret < 0)
721  return ret;
722 
723  if (avci->buffer_frame->buf[0]) {
724  av_frame_move_ref(frame, avci->buffer_frame);
725  } else {
726  ret = decode_receive_frame_internal(avctx, frame);
727  if (ret < 0)
728  return ret;
729  }
730 
731  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
732  ret = apply_cropping(avctx, frame);
733  if (ret < 0) {
734  av_frame_unref(frame);
735  return ret;
736  }
737  }
738 
739  avctx->frame_number++;
740 
741  return 0;
742 }
743 
745  int *got_frame, const AVPacket *pkt)
746 {
747  AVCodecInternal *avci = avctx->internal;
748  int ret = 0;
749 
751 
752  if (avci->draining_done && pkt && pkt->size != 0) {
753  av_log(avctx, AV_LOG_WARNING, "Got unexpected packet after EOF\n");
754  avcodec_flush_buffers(avctx);
755  }
756 
757  *got_frame = 0;
758  avci->compat_decode = 1;
759 
760  if (avci->compat_decode_partial_size > 0 &&
761  avci->compat_decode_partial_size != pkt->size) {
762  av_log(avctx, AV_LOG_ERROR,
763  "Got unexpected packet size after a partial decode\n");
764  ret = AVERROR(EINVAL);
765  goto finish;
766  }
767 
768  if (!avci->compat_decode_partial_size) {
769  ret = avcodec_send_packet(avctx, pkt);
770  if (ret == AVERROR_EOF)
771  ret = 0;
772  else if (ret == AVERROR(EAGAIN)) {
773  /* we fully drain all the output in each decode call, so this should not
774  * ever happen */
775  ret = AVERROR_BUG;
776  goto finish;
777  } else if (ret < 0)
778  goto finish;
779  }
780 
781  while (ret >= 0) {
782  ret = avcodec_receive_frame(avctx, frame);
783  if (ret < 0) {
784  if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
785  ret = 0;
786  goto finish;
787  }
788 
789  if (frame != avci->compat_decode_frame) {
790  if (!avctx->refcounted_frames) {
791  ret = unrefcount_frame(avci, frame);
792  if (ret < 0)
793  goto finish;
794  }
795 
796  *got_frame = 1;
797  frame = avci->compat_decode_frame;
798  } else {
799  if (!avci->compat_decode_warned) {
800  av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_decode_* "
801  "API cannot return all the frames for this decoder. "
802  "Some frames will be dropped. Update your code to the "
803  "new decoding API to fix this.\n");
804  avci->compat_decode_warned = 1;
805  }
806  }
807 
808  if (avci->draining || (!avctx->codec->bsfs && avci->compat_decode_consumed < pkt->size))
809  break;
810  }
811 
812 finish:
813  if (ret == 0) {
814  /* if there are any bsfs then assume full packet is always consumed */
815  if (avctx->codec->bsfs)
816  ret = pkt->size;
817  else
818  ret = FFMIN(avci->compat_decode_consumed, pkt->size);
819  }
820  avci->compat_decode_consumed = 0;
821  avci->compat_decode_partial_size = (ret >= 0) ? pkt->size - ret : 0;
822 
823  return ret;
824 }
825 
827  int *got_picture_ptr,
828  const AVPacket *avpkt)
829 {
830  return compat_decode(avctx, picture, got_picture_ptr, avpkt);
831 }
832 
834  AVFrame *frame,
835  int *got_frame_ptr,
836  const AVPacket *avpkt)
837 {
838  return compat_decode(avctx, frame, got_frame_ptr, avpkt);
839 }
840 
842 {
843  memset(sub, 0, sizeof(*sub));
844  sub->pts = AV_NOPTS_VALUE;
845 }
846 
847 #define UTF8_MAX_BYTES 4 /* 5 and 6 bytes sequences should not be used */
848 static int recode_subtitle(AVCodecContext *avctx,
849  AVPacket *outpkt, const AVPacket *inpkt)
850 {
851 #if CONFIG_ICONV
852  iconv_t cd = (iconv_t)-1;
853  int ret = 0;
854  char *inb, *outb;
855  size_t inl, outl;
856  AVPacket tmp;
857 #endif
858 
859  if (avctx->sub_charenc_mode != FF_SUB_CHARENC_MODE_PRE_DECODER || inpkt->size == 0)
860  return 0;
861 
862 #if CONFIG_ICONV
863  cd = iconv_open("UTF-8", avctx->sub_charenc);
864  av_assert0(cd != (iconv_t)-1);
865 
866  inb = inpkt->data;
867  inl = inpkt->size;
868 
869  if (inl >= INT_MAX / UTF8_MAX_BYTES - AV_INPUT_BUFFER_PADDING_SIZE) {
870  av_log(avctx, AV_LOG_ERROR, "Subtitles packet is too big for recoding\n");
871  ret = AVERROR(ENOMEM);
872  goto end;
873  }
874 
875  ret = av_new_packet(&tmp, inl * UTF8_MAX_BYTES);
876  if (ret < 0)
877  goto end;
878  outpkt->buf = tmp.buf;
879  outpkt->data = tmp.data;
880  outpkt->size = tmp.size;
881  outb = outpkt->data;
882  outl = outpkt->size;
883 
884  if (iconv(cd, &inb, &inl, &outb, &outl) == (size_t)-1 ||
885  iconv(cd, NULL, NULL, &outb, &outl) == (size_t)-1 ||
886  outl >= outpkt->size || inl != 0) {
887  ret = FFMIN(AVERROR(errno), -1);
888  av_log(avctx, AV_LOG_ERROR, "Unable to recode subtitle event \"%s\" "
889  "from %s to UTF-8\n", inpkt->data, avctx->sub_charenc);
890  av_packet_unref(&tmp);
891  goto end;
892  }
893  outpkt->size -= outl;
894  memset(outpkt->data + outpkt->size, 0, outl);
895 
896 end:
897  if (cd != (iconv_t)-1)
898  iconv_close(cd);
899  return ret;
900 #else
901  av_log(avctx, AV_LOG_ERROR, "requesting subtitles recoding without iconv");
902  return AVERROR(EINVAL);
903 #endif
904 }
905 
906 static int utf8_check(const uint8_t *str)
907 {
908  const uint8_t *byte;
909  uint32_t codepoint, min;
910 
911  while (*str) {
912  byte = str;
913  GET_UTF8(codepoint, *(byte++), return 0;);
914  min = byte - str == 1 ? 0 : byte - str == 2 ? 0x80 :
915  1 << (5 * (byte - str) - 4);
916  if (codepoint < min || codepoint >= 0x110000 ||
917  codepoint == 0xFFFE /* BOM */ ||
918  codepoint >= 0xD800 && codepoint <= 0xDFFF /* surrogates */)
919  return 0;
920  str = byte;
921  }
922  return 1;
923 }
924 
925 #if FF_API_ASS_TIMING
926 static void insert_ts(AVBPrint *buf, int ts)
927 {
928  if (ts == -1) {
929  av_bprintf(buf, "9:59:59.99,");
930  } else {
931  int h, m, s;
932 
933  h = ts/360000; ts -= 360000*h;
934  m = ts/ 6000; ts -= 6000*m;
935  s = ts/ 100; ts -= 100*s;
936  av_bprintf(buf, "%d:%02d:%02d.%02d,", h, m, s, ts);
937  }
938 }
939 
941 {
942  int i;
943  AVBPrint buf;
944 
946 
947  for (i = 0; i < sub->num_rects; i++) {
948  char *final_dialog;
949  const char *dialog;
950  AVSubtitleRect *rect = sub->rects[i];
951  int ts_start, ts_duration = -1;
952  long int layer;
953 
954  if (rect->type != SUBTITLE_ASS || !strncmp(rect->ass, "Dialogue: ", 10))
955  continue;
956 
957  av_bprint_clear(&buf);
958 
959  /* skip ReadOrder */
960  dialog = strchr(rect->ass, ',');
961  if (!dialog)
962  continue;
963  dialog++;
964 
965  /* extract Layer or Marked */
966  layer = strtol(dialog, (char**)&dialog, 10);
967  if (*dialog != ',')
968  continue;
969  dialog++;
970 
971  /* rescale timing to ASS time base (ms) */
972  ts_start = av_rescale_q(pkt->pts, tb, av_make_q(1, 100));
973  if (pkt->duration != -1)
974  ts_duration = av_rescale_q(pkt->duration, tb, av_make_q(1, 100));
975  sub->end_display_time = FFMAX(sub->end_display_time, 10 * ts_duration);
976 
977  /* construct ASS (standalone file form with timestamps) string */
978  av_bprintf(&buf, "Dialogue: %ld,", layer);
979  insert_ts(&buf, ts_start);
980  insert_ts(&buf, ts_duration == -1 ? -1 : ts_start + ts_duration);
981  av_bprintf(&buf, "%s\r\n", dialog);
982 
983  final_dialog = av_strdup(buf.str);
984  if (!av_bprint_is_complete(&buf) || !final_dialog) {
985  av_freep(&final_dialog);
986  av_bprint_finalize(&buf, NULL);
987  return AVERROR(ENOMEM);
988  }
989  av_freep(&rect->ass);
990  rect->ass = final_dialog;
991  }
992 
993  av_bprint_finalize(&buf, NULL);
994  return 0;
995 }
996 #endif
997 
999  int *got_sub_ptr,
1000  AVPacket *avpkt)
1001 {
1002  int i, ret = 0;
1003 
1004  if (!avpkt->data && avpkt->size) {
1005  av_log(avctx, AV_LOG_ERROR, "invalid packet: NULL data, size != 0\n");
1006  return AVERROR(EINVAL);
1007  }
1008  if (!avctx->codec)
1009  return AVERROR(EINVAL);
1010  if (avctx->codec->type != AVMEDIA_TYPE_SUBTITLE) {
1011  av_log(avctx, AV_LOG_ERROR, "Invalid media type for subtitles\n");
1012  return AVERROR(EINVAL);
1013  }
1014 
1015  *got_sub_ptr = 0;
1016  get_subtitle_defaults(sub);
1017 
1018  if ((avctx->codec->capabilities & AV_CODEC_CAP_DELAY) || avpkt->size) {
1019  AVPacket pkt_recoded = *avpkt;
1020 
1021  ret = recode_subtitle(avctx, &pkt_recoded, avpkt);
1022  if (ret < 0) {
1023  *got_sub_ptr = 0;
1024  } else {
1025  ret = extract_packet_props(avctx->internal, &pkt_recoded);
1026  if (ret < 0)
1027  return ret;
1028 
1029  if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
1030  sub->pts = av_rescale_q(avpkt->pts,
1031  avctx->pkt_timebase, AV_TIME_BASE_Q);
1032  ret = avctx->codec->decode(avctx, sub, got_sub_ptr, &pkt_recoded);
1033  av_assert1((ret >= 0) >= !!*got_sub_ptr &&
1034  !!*got_sub_ptr >= !!sub->num_rects);
1035 
1036 #if FF_API_ASS_TIMING
1038  && *got_sub_ptr && sub->num_rects) {
1039  const AVRational tb = avctx->pkt_timebase.num ? avctx->pkt_timebase
1040  : avctx->time_base;
1041  int err = convert_sub_to_old_ass_form(sub, avpkt, tb);
1042  if (err < 0)
1043  ret = err;
1044  }
1045 #endif
1046 
1047  if (sub->num_rects && !sub->end_display_time && avpkt->duration &&
1048  avctx->pkt_timebase.num) {
1049  AVRational ms = { 1, 1000 };
1050  sub->end_display_time = av_rescale_q(avpkt->duration,
1051  avctx->pkt_timebase, ms);
1052  }
1053 
1055  sub->format = 0;
1056  else if (avctx->codec_descriptor->props & AV_CODEC_PROP_TEXT_SUB)
1057  sub->format = 1;
1058 
1059  for (i = 0; i < sub->num_rects; i++) {
1061  sub->rects[i]->ass && !utf8_check(sub->rects[i]->ass)) {
1062  av_log(avctx, AV_LOG_ERROR,
1063  "Invalid UTF-8 in decoded subtitles text; "
1064  "maybe missing -sub_charenc option\n");
1065  avsubtitle_free(sub);
1066  ret = AVERROR_INVALIDDATA;
1067  break;
1068  }
1069  }
1070 
1071  if (avpkt->data != pkt_recoded.data) { // did we recode?
1072  /* prevent from destroying side data from original packet */
1073  pkt_recoded.side_data = NULL;
1074  pkt_recoded.side_data_elems = 0;
1075 
1076  av_packet_unref(&pkt_recoded);
1077  }
1078  }
1079 
1080  if (*got_sub_ptr)
1081  avctx->frame_number++;
1082  }
1083 
1084  return ret;
1085 }
1086 
1088  const enum AVPixelFormat *fmt)
1089 {
1090  const AVPixFmtDescriptor *desc;
1091  const AVCodecHWConfig *config;
1092  int i, n;
1093 
1094  // If a device was supplied when the codec was opened, assume that the
1095  // user wants to use it.
1096  if (avctx->hw_device_ctx && avctx->codec->hw_configs) {
1097  AVHWDeviceContext *device_ctx =
1099  for (i = 0;; i++) {
1100  config = &avctx->codec->hw_configs[i]->public;
1101  if (!config)
1102  break;
1103  if (!(config->methods &
1105  continue;
1106  if (device_ctx->type != config->device_type)
1107  continue;
1108  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1109  if (config->pix_fmt == fmt[n])
1110  return fmt[n];
1111  }
1112  }
1113  }
1114  // No device or other setup, so we have to choose from things which
1115  // don't any other external information.
1116 
1117  // If the last element of the list is a software format, choose it
1118  // (this should be best software format if any exist).
1119  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1120  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1121  if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
1122  return fmt[n - 1];
1123 
1124  // Finally, traverse the list in order and choose the first entry
1125  // with no external dependencies (if there is no hardware configuration
1126  // information available then this just picks the first entry).
1127  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++) {
1128  for (i = 0;; i++) {
1129  config = avcodec_get_hw_config(avctx->codec, i);
1130  if (!config)
1131  break;
1132  if (config->pix_fmt == fmt[n])
1133  break;
1134  }
1135  if (!config) {
1136  // No specific config available, so the decoder must be able
1137  // to handle this format without any additional setup.
1138  return fmt[n];
1139  }
1141  // Usable with only internal setup.
1142  return fmt[n];
1143  }
1144  }
1145 
1146  // Nothing is usable, give up.
1147  return AV_PIX_FMT_NONE;
1148 }
1149 
1151  enum AVHWDeviceType dev_type)
1152 {
1153  AVHWDeviceContext *device_ctx;
1154  AVHWFramesContext *frames_ctx;
1155  int ret;
1156 
1157  if (!avctx->hwaccel)
1158  return AVERROR(ENOSYS);
1159 
1160  if (avctx->hw_frames_ctx)
1161  return 0;
1162  if (!avctx->hw_device_ctx) {
1163  av_log(avctx, AV_LOG_ERROR, "A hardware frames or device context is "
1164  "required for hardware accelerated decoding.\n");
1165  return AVERROR(EINVAL);
1166  }
1167 
1168  device_ctx = (AVHWDeviceContext *)avctx->hw_device_ctx->data;
1169  if (device_ctx->type != dev_type) {
1170  av_log(avctx, AV_LOG_ERROR, "Device type %s expected for hardware "
1171  "decoding, but got %s.\n", av_hwdevice_get_type_name(dev_type),
1172  av_hwdevice_get_type_name(device_ctx->type));
1173  return AVERROR(EINVAL);
1174  }
1175 
1177  avctx->hw_device_ctx,
1178  avctx->hwaccel->pix_fmt,
1179  &avctx->hw_frames_ctx);
1180  if (ret < 0)
1181  return ret;
1182 
1183  frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1184 
1185 
1186  if (frames_ctx->initial_pool_size) {
1187  // We guarantee 4 base work surfaces. The function above guarantees 1
1188  // (the absolute minimum), so add the missing count.
1189  frames_ctx->initial_pool_size += 3;
1190  }
1191 
1192  ret = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1193  if (ret < 0) {
1194  av_buffer_unref(&avctx->hw_frames_ctx);
1195  return ret;
1196  }
1197 
1198  return 0;
1199 }
1200 
1202  AVBufferRef *device_ref,
1204  AVBufferRef **out_frames_ref)
1205 {
1206  AVBufferRef *frames_ref = NULL;
1207  const AVCodecHWConfigInternal *hw_config;
1208  const AVHWAccel *hwa;
1209  int i, ret;
1210 
1211  for (i = 0;; i++) {
1212  hw_config = avctx->codec->hw_configs[i];
1213  if (!hw_config)
1214  return AVERROR(ENOENT);
1215  if (hw_config->public.pix_fmt == hw_pix_fmt)
1216  break;
1217  }
1218 
1219  hwa = hw_config->hwaccel;
1220  if (!hwa || !hwa->frame_params)
1221  return AVERROR(ENOENT);
1222 
1223  frames_ref = av_hwframe_ctx_alloc(device_ref);
1224  if (!frames_ref)
1225  return AVERROR(ENOMEM);
1226 
1227  ret = hwa->frame_params(avctx, frames_ref);
1228  if (ret >= 0) {
1229  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)frames_ref->data;
1230 
1231  if (frames_ctx->initial_pool_size) {
1232  // If the user has requested that extra output surfaces be
1233  // available then add them here.
1234  if (avctx->extra_hw_frames > 0)
1235  frames_ctx->initial_pool_size += avctx->extra_hw_frames;
1236 
1237  // If frame threading is enabled then an extra surface per thread
1238  // is also required.
1239  if (avctx->active_thread_type & FF_THREAD_FRAME)
1240  frames_ctx->initial_pool_size += avctx->thread_count;
1241  }
1242 
1243  *out_frames_ref = frames_ref;
1244  } else {
1245  av_buffer_unref(&frames_ref);
1246  }
1247  return ret;
1248 }
1249 
1250 static int hwaccel_init(AVCodecContext *avctx,
1251  const AVCodecHWConfigInternal *hw_config)
1252 {
1253  const AVHWAccel *hwaccel;
1254  int err;
1255 
1256  hwaccel = hw_config->hwaccel;
1259  av_log(avctx, AV_LOG_WARNING, "Ignoring experimental hwaccel: %s\n",
1260  hwaccel->name);
1261  return AVERROR_PATCHWELCOME;
1262  }
1263 
1264  if (hwaccel->priv_data_size) {
1265  avctx->internal->hwaccel_priv_data =
1266  av_mallocz(hwaccel->priv_data_size);
1267  if (!avctx->internal->hwaccel_priv_data)
1268  return AVERROR(ENOMEM);
1269  }
1270 
1271  avctx->hwaccel = hwaccel;
1272  if (hwaccel->init) {
1273  err = hwaccel->init(avctx);
1274  if (err < 0) {
1275  av_log(avctx, AV_LOG_ERROR, "Failed setup for format %s: "
1276  "hwaccel initialisation returned error.\n",
1277  av_get_pix_fmt_name(hw_config->public.pix_fmt));
1279  avctx->hwaccel = NULL;
1280  return err;
1281  }
1282  }
1283 
1284  return 0;
1285 }
1286 
1287 static void hwaccel_uninit(AVCodecContext *avctx)
1288 {
1289  if (avctx->hwaccel && avctx->hwaccel->uninit)
1290  avctx->hwaccel->uninit(avctx);
1291 
1293 
1294  avctx->hwaccel = NULL;
1295 
1296  av_buffer_unref(&avctx->hw_frames_ctx);
1297 }
1298 
1300 {
1301  const AVPixFmtDescriptor *desc;
1302  enum AVPixelFormat *choices;
1303  enum AVPixelFormat ret, user_choice;
1304  const AVCodecHWConfigInternal *hw_config;
1305  const AVCodecHWConfig *config;
1306  int i, n, err;
1307 
1308  // Find end of list.
1309  for (n = 0; fmt[n] != AV_PIX_FMT_NONE; n++);
1310  // Must contain at least one entry.
1311  av_assert0(n >= 1);
1312  // If a software format is available, it must be the last entry.
1313  desc = av_pix_fmt_desc_get(fmt[n - 1]);
1314  if (desc->flags & AV_PIX_FMT_FLAG_HWACCEL) {
1315  // No software format is available.
1316  } else {
1317  avctx->sw_pix_fmt = fmt[n - 1];
1318  }
1319 
1320  choices = av_malloc_array(n + 1, sizeof(*choices));
1321  if (!choices)
1322  return AV_PIX_FMT_NONE;
1323 
1324  memcpy(choices, fmt, (n + 1) * sizeof(*choices));
1325 
1326  for (;;) {
1327  // Remove the previous hwaccel, if there was one.
1328  hwaccel_uninit(avctx);
1329 
1330  user_choice = avctx->get_format(avctx, choices);
1331  if (user_choice == AV_PIX_FMT_NONE) {
1332  // Explicitly chose nothing, give up.
1333  ret = AV_PIX_FMT_NONE;
1334  break;
1335  }
1336 
1337  desc = av_pix_fmt_desc_get(user_choice);
1338  if (!desc) {
1339  av_log(avctx, AV_LOG_ERROR, "Invalid format returned by "
1340  "get_format() callback.\n");
1341  ret = AV_PIX_FMT_NONE;
1342  break;
1343  }
1344  av_log(avctx, AV_LOG_DEBUG, "Format %s chosen by get_format().\n",
1345  desc->name);
1346 
1347  for (i = 0; i < n; i++) {
1348  if (choices[i] == user_choice)
1349  break;
1350  }
1351  if (i == n) {
1352  av_log(avctx, AV_LOG_ERROR, "Invalid return from get_format(): "
1353  "%s not in possible list.\n", desc->name);
1354  break;
1355  }
1356 
1357  if (avctx->codec->hw_configs) {
1358  for (i = 0;; i++) {
1359  hw_config = avctx->codec->hw_configs[i];
1360  if (!hw_config)
1361  break;
1362  if (hw_config->public.pix_fmt == user_choice)
1363  break;
1364  }
1365  } else {
1366  hw_config = NULL;
1367  }
1368 
1369  if (!hw_config) {
1370  // No config available, so no extra setup required.
1371  ret = user_choice;
1372  break;
1373  }
1374  config = &hw_config->public;
1375 
1376  if (config->methods &
1378  avctx->hw_frames_ctx) {
1379  const AVHWFramesContext *frames_ctx =
1381  if (frames_ctx->format != user_choice) {
1382  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1383  "does not match the format of the provided frames "
1384  "context.\n", desc->name);
1385  goto try_again;
1386  }
1387  } else if (config->methods &
1389  avctx->hw_device_ctx) {
1390  const AVHWDeviceContext *device_ctx =
1392  if (device_ctx->type != config->device_type) {
1393  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1394  "does not match the type of the provided device "
1395  "context.\n", desc->name);
1396  goto try_again;
1397  }
1398  } else if (config->methods &
1400  // Internal-only setup, no additional configuration.
1401  } else if (config->methods &
1403  // Some ad-hoc configuration we can't see and can't check.
1404  } else {
1405  av_log(avctx, AV_LOG_ERROR, "Invalid setup for format %s: "
1406  "missing configuration.\n", desc->name);
1407  goto try_again;
1408  }
1409  if (hw_config->hwaccel) {
1410  av_log(avctx, AV_LOG_DEBUG, "Format %s requires hwaccel "
1411  "initialisation.\n", desc->name);
1412  err = hwaccel_init(avctx, hw_config);
1413  if (err < 0)
1414  goto try_again;
1415  }
1416  ret = user_choice;
1417  break;
1418 
1419  try_again:
1420  av_log(avctx, AV_LOG_DEBUG, "Format %s not usable, retrying "
1421  "get_format() without it.\n", desc->name);
1422  for (i = 0; i < n; i++) {
1423  if (choices[i] == user_choice)
1424  break;
1425  }
1426  for (; i + 1 < n; i++)
1427  choices[i] = choices[i + 1];
1428  --n;
1429  }
1430 
1431  av_freep(&choices);
1432  return ret;
1433 }
1434 
1436 {
1437  FramePool *pool = avctx->internal->pool;
1438  int i, ret;
1439 
1440  switch (avctx->codec_type) {
1441  case AVMEDIA_TYPE_VIDEO: {
1442  uint8_t *data[4];
1443  int linesize[4];
1444  int size[4] = { 0 };
1445  int w = frame->width;
1446  int h = frame->height;
1447  int tmpsize, unaligned;
1448 
1449  if (pool->format == frame->format &&
1450  pool->width == frame->width && pool->height == frame->height)
1451  return 0;
1452 
1453  avcodec_align_dimensions2(avctx, &w, &h, pool->stride_align);
1454 
1455  do {
1456  // NOTE: do not align linesizes individually, this breaks e.g. assumptions
1457  // that linesize[0] == 2*linesize[1] in the MPEG-encoder for 4:2:2
1458  ret = av_image_fill_linesizes(linesize, avctx->pix_fmt, w);
1459  if (ret < 0)
1460  return ret;
1461  // increase alignment of w for next try (rhs gives the lowest bit set in w)
1462  w += w & ~(w - 1);
1463 
1464  unaligned = 0;
1465  for (i = 0; i < 4; i++)
1466  unaligned |= linesize[i] % pool->stride_align[i];
1467  } while (unaligned);
1468 
1469  tmpsize = av_image_fill_pointers(data, avctx->pix_fmt, h,
1470  NULL, linesize);
1471  if (tmpsize < 0)
1472  return -1;
1473 
1474  for (i = 0; i < 3 && data[i + 1]; i++)
1475  size[i] = data[i + 1] - data[i];
1476  size[i] = tmpsize - (data[i] - data[0]);
1477 
1478  for (i = 0; i < 4; i++) {
1479  av_buffer_pool_uninit(&pool->pools[i]);
1480  pool->linesize[i] = linesize[i];
1481  if (size[i]) {
1482  pool->pools[i] = av_buffer_pool_init(size[i] + 16 + STRIDE_ALIGN - 1,
1484  NULL :
1486  if (!pool->pools[i]) {
1487  ret = AVERROR(ENOMEM);
1488  goto fail;
1489  }
1490  }
1491  }
1492  pool->format = frame->format;
1493  pool->width = frame->width;
1494  pool->height = frame->height;
1495 
1496  break;
1497  }
1498  case AVMEDIA_TYPE_AUDIO: {
1499  int ch = frame->channels; //av_get_channel_layout_nb_channels(frame->channel_layout);
1500  int planar = av_sample_fmt_is_planar(frame->format);
1501  int planes = planar ? ch : 1;
1502 
1503  if (pool->format == frame->format && pool->planes == planes &&
1504  pool->channels == ch && frame->nb_samples == pool->samples)
1505  return 0;
1506 
1507  av_buffer_pool_uninit(&pool->pools[0]);
1508  ret = av_samples_get_buffer_size(&pool->linesize[0], ch,
1509  frame->nb_samples, frame->format, 0);
1510  if (ret < 0)
1511  goto fail;
1512 
1513  pool->pools[0] = av_buffer_pool_init(pool->linesize[0], NULL);
1514  if (!pool->pools[0]) {
1515  ret = AVERROR(ENOMEM);
1516  goto fail;
1517  }
1518 
1519  pool->format = frame->format;
1520  pool->planes = planes;
1521  pool->channels = ch;
1522  pool->samples = frame->nb_samples;
1523  break;
1524  }
1525  default: av_assert0(0);
1526  }
1527  return 0;
1528 fail:
1529  for (i = 0; i < 4; i++)
1530  av_buffer_pool_uninit(&pool->pools[i]);
1531  pool->format = -1;
1532  pool->planes = pool->channels = pool->samples = 0;
1533  pool->width = pool->height = 0;
1534  return ret;
1535 }
1536 
1538 {
1539  FramePool *pool = avctx->internal->pool;
1540  int planes = pool->planes;
1541  int i;
1542 
1543  frame->linesize[0] = pool->linesize[0];
1544 
1545  if (planes > AV_NUM_DATA_POINTERS) {
1546  frame->extended_data = av_mallocz_array(planes, sizeof(*frame->extended_data));
1547  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
1549  sizeof(*frame->extended_buf));
1550  if (!frame->extended_data || !frame->extended_buf) {
1551  av_freep(&frame->extended_data);
1552  av_freep(&frame->extended_buf);
1553  return AVERROR(ENOMEM);
1554  }
1555  } else {
1556  frame->extended_data = frame->data;
1557  av_assert0(frame->nb_extended_buf == 0);
1558  }
1559 
1560  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
1561  frame->buf[i] = av_buffer_pool_get(pool->pools[0]);
1562  if (!frame->buf[i])
1563  goto fail;
1564  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
1565  }
1566  for (i = 0; i < frame->nb_extended_buf; i++) {
1567  frame->extended_buf[i] = av_buffer_pool_get(pool->pools[0]);
1568  if (!frame->extended_buf[i])
1569  goto fail;
1570  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
1571  }
1572 
1573  if (avctx->debug & FF_DEBUG_BUFFERS)
1574  av_log(avctx, AV_LOG_DEBUG, "default_get_buffer called on frame %p", frame);
1575 
1576  return 0;
1577 fail:
1578  av_frame_unref(frame);
1579  return AVERROR(ENOMEM);
1580 }
1581 
1583 {
1584  FramePool *pool = s->internal->pool;
1586  int i;
1587 
1588  if (pic->data[0] || pic->data[1] || pic->data[2] || pic->data[3]) {
1589  av_log(s, AV_LOG_ERROR, "pic->data[*]!=NULL in avcodec_default_get_buffer\n");
1590  return -1;
1591  }
1592 
1593  if (!desc) {
1594  av_log(s, AV_LOG_ERROR,
1595  "Unable to get pixel format descriptor for format %s\n",
1596  av_get_pix_fmt_name(pic->format));
1597  return AVERROR(EINVAL);
1598  }
1599 
1600  memset(pic->data, 0, sizeof(pic->data));
1601  pic->extended_data = pic->data;
1602 
1603  for (i = 0; i < 4 && pool->pools[i]; i++) {
1604  pic->linesize[i] = pool->linesize[i];
1605 
1606  pic->buf[i] = av_buffer_pool_get(pool->pools[i]);
1607  if (!pic->buf[i])
1608  goto fail;
1609 
1610  pic->data[i] = pic->buf[i]->data;
1611  }
1612  for (; i < AV_NUM_DATA_POINTERS; i++) {
1613  pic->data[i] = NULL;
1614  pic->linesize[i] = 0;
1615  }
1616  if (desc->flags & AV_PIX_FMT_FLAG_PAL ||
1617  ((desc->flags & FF_PSEUDOPAL) && pic->data[1]))
1618  avpriv_set_systematic_pal2((uint32_t *)pic->data[1], pic->format);
1619 
1620  if (s->debug & FF_DEBUG_BUFFERS)
1621  av_log(s, AV_LOG_DEBUG, "default_get_buffer called on pic %p\n", pic);
1622 
1623  return 0;
1624 fail:
1625  av_frame_unref(pic);
1626  return AVERROR(ENOMEM);
1627 }
1628 
1630 {
1631  int ret;
1632 
1633  if (avctx->hw_frames_ctx) {
1634  ret = av_hwframe_get_buffer(avctx->hw_frames_ctx, frame, 0);
1635  frame->width = avctx->coded_width;
1636  frame->height = avctx->coded_height;
1637  return ret;
1638  }
1639 
1640  if ((ret = update_frame_pool(avctx, frame)) < 0)
1641  return ret;
1642 
1643  switch (avctx->codec_type) {
1644  case AVMEDIA_TYPE_VIDEO:
1645  return video_get_buffer(avctx, frame);
1646  case AVMEDIA_TYPE_AUDIO:
1647  return audio_get_buffer(avctx, frame);
1648  default:
1649  return -1;
1650  }
1651 }
1652 
1654 {
1655  int size;
1656  const uint8_t *side_metadata;
1657 
1658  AVDictionary **frame_md = &frame->metadata;
1659 
1660  side_metadata = av_packet_get_side_data(avpkt,
1662  return av_packet_unpack_dictionary(side_metadata, size, frame_md);
1663 }
1664 
1666 {
1667  const AVPacket *pkt = avctx->internal->last_pkt_props;
1668  int i;
1669  static const struct {
1670  enum AVPacketSideDataType packet;
1672  } sd[] = {
1681  };
1682 
1683  if (pkt) {
1684  frame->pts = pkt->pts;
1685 #if FF_API_PKT_PTS
1687  frame->pkt_pts = pkt->pts;
1689 #endif
1690  frame->pkt_pos = pkt->pos;
1691  frame->pkt_duration = pkt->duration;
1692  frame->pkt_size = pkt->size;
1693 
1694  for (i = 0; i < FF_ARRAY_ELEMS(sd); i++) {
1695  int size;
1696  uint8_t *packet_sd = av_packet_get_side_data(pkt, sd[i].packet, &size);
1697  if (packet_sd) {
1698  AVFrameSideData *frame_sd = av_frame_new_side_data(frame,
1699  sd[i].frame,
1700  size);
1701  if (!frame_sd)
1702  return AVERROR(ENOMEM);
1703 
1704  memcpy(frame_sd->data, packet_sd, size);
1705  }
1706  }
1707  add_metadata_from_side_data(pkt, frame);
1708 
1709  if (pkt->flags & AV_PKT_FLAG_DISCARD) {
1710  frame->flags |= AV_FRAME_FLAG_DISCARD;
1711  } else {
1712  frame->flags = (frame->flags & ~AV_FRAME_FLAG_DISCARD);
1713  }
1714  }
1715  frame->reordered_opaque = avctx->reordered_opaque;
1716 
1717  if (frame->color_primaries == AVCOL_PRI_UNSPECIFIED)
1718  frame->color_primaries = avctx->color_primaries;
1719  if (frame->color_trc == AVCOL_TRC_UNSPECIFIED)
1720  frame->color_trc = avctx->color_trc;
1721  if (frame->colorspace == AVCOL_SPC_UNSPECIFIED)
1722  frame->colorspace = avctx->colorspace;
1723  if (frame->color_range == AVCOL_RANGE_UNSPECIFIED)
1724  frame->color_range = avctx->color_range;
1726  frame->chroma_location = avctx->chroma_sample_location;
1727 
1728  switch (avctx->codec->type) {
1729  case AVMEDIA_TYPE_VIDEO:
1730  frame->format = avctx->pix_fmt;
1731  if (!frame->sample_aspect_ratio.num)
1732  frame->sample_aspect_ratio = avctx->sample_aspect_ratio;
1733 
1734  if (frame->width && frame->height &&
1735  av_image_check_sar(frame->width, frame->height,
1736  frame->sample_aspect_ratio) < 0) {
1737  av_log(avctx, AV_LOG_WARNING, "ignoring invalid SAR: %u/%u\n",
1738  frame->sample_aspect_ratio.num,
1739  frame->sample_aspect_ratio.den);
1740  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
1741  }
1742 
1743  break;
1744  case AVMEDIA_TYPE_AUDIO:
1745  if (!frame->sample_rate)
1746  frame->sample_rate = avctx->sample_rate;
1747  if (frame->format < 0)
1748  frame->format = avctx->sample_fmt;
1749  if (!frame->channel_layout) {
1750  if (avctx->channel_layout) {
1752  avctx->channels) {
1753  av_log(avctx, AV_LOG_ERROR, "Inconsistent channel "
1754  "configuration.\n");
1755  return AVERROR(EINVAL);
1756  }
1757 
1758  frame->channel_layout = avctx->channel_layout;
1759  } else {
1760  if (avctx->channels > FF_SANE_NB_CHANNELS) {
1761  av_log(avctx, AV_LOG_ERROR, "Too many channels: %d.\n",
1762  avctx->channels);
1763  return AVERROR(ENOSYS);
1764  }
1765  }
1766  }
1767  frame->channels = avctx->channels;
1768  break;
1769  }
1770  return 0;
1771 }
1772 
1774 {
1775  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1776  int i;
1777  int num_planes = av_pix_fmt_count_planes(frame->format);
1779  int flags = desc ? desc->flags : 0;
1780  if (num_planes == 1 && (flags & AV_PIX_FMT_FLAG_PAL))
1781  num_planes = 2;
1782  if ((flags & FF_PSEUDOPAL) && frame->data[1])
1783  num_planes = 2;
1784  for (i = 0; i < num_planes; i++) {
1785  av_assert0(frame->data[i]);
1786  }
1787  // For formats without data like hwaccel allow unused pointers to be non-NULL.
1788  for (i = num_planes; num_planes > 0 && i < FF_ARRAY_ELEMS(frame->data); i++) {
1789  if (frame->data[i])
1790  av_log(avctx, AV_LOG_ERROR, "Buffer returned by get_buffer2() did not zero unused plane pointers\n");
1791  frame->data[i] = NULL;
1792  }
1793  }
1794 }
1795 
1796 static void decode_data_free(void *opaque, uint8_t *data)
1797 {
1798  FrameDecodeData *fdd = (FrameDecodeData*)data;
1799 
1800  if (fdd->post_process_opaque_free)
1802 
1803  if (fdd->hwaccel_priv_free)
1804  fdd->hwaccel_priv_free(fdd->hwaccel_priv);
1805 
1806  av_freep(&fdd);
1807 }
1808 
1810 {
1811  AVBufferRef *fdd_buf;
1812  FrameDecodeData *fdd;
1813 
1814  av_assert1(!frame->private_ref);
1815  av_buffer_unref(&frame->private_ref);
1816 
1817  fdd = av_mallocz(sizeof(*fdd));
1818  if (!fdd)
1819  return AVERROR(ENOMEM);
1820 
1821  fdd_buf = av_buffer_create((uint8_t*)fdd, sizeof(*fdd), decode_data_free,
1823  if (!fdd_buf) {
1824  av_freep(&fdd);
1825  return AVERROR(ENOMEM);
1826  }
1827 
1828  frame->private_ref = fdd_buf;
1829 
1830  return 0;
1831 }
1832 
1834 {
1835  const AVHWAccel *hwaccel = avctx->hwaccel;
1836  int override_dimensions = 1;
1837  int ret;
1838 
1839  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
1840  if ((ret = av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx)) < 0 || avctx->pix_fmt<0) {
1841  av_log(avctx, AV_LOG_ERROR, "video_get_buffer: image parameters invalid\n");
1842  return AVERROR(EINVAL);
1843  }
1844 
1845  if (frame->width <= 0 || frame->height <= 0) {
1846  frame->width = FFMAX(avctx->width, AV_CEIL_RSHIFT(avctx->coded_width, avctx->lowres));
1847  frame->height = FFMAX(avctx->height, AV_CEIL_RSHIFT(avctx->coded_height, avctx->lowres));
1848  override_dimensions = 0;
1849  }
1850 
1851  if (frame->data[0] || frame->data[1] || frame->data[2] || frame->data[3]) {
1852  av_log(avctx, AV_LOG_ERROR, "pic->data[*]!=NULL in get_buffer_internal\n");
1853  return AVERROR(EINVAL);
1854  }
1855  }
1856  ret = ff_decode_frame_props(avctx, frame);
1857  if (ret < 0)
1858  return ret;
1859 
1860  if (hwaccel) {
1861  if (hwaccel->alloc_frame) {
1862  ret = hwaccel->alloc_frame(avctx, frame);
1863  goto end;
1864  }
1865  } else
1866  avctx->sw_pix_fmt = avctx->pix_fmt;
1867 
1868  ret = avctx->get_buffer2(avctx, frame, flags);
1869  if (ret < 0)
1870  goto end;
1871 
1872  validate_avframe_allocation(avctx, frame);
1873 
1874  ret = ff_attach_decode_data(frame);
1875  if (ret < 0)
1876  goto end;
1877 
1878 end:
1879  if (avctx->codec_type == AVMEDIA_TYPE_VIDEO && !override_dimensions &&
1881  frame->width = avctx->width;
1882  frame->height = avctx->height;
1883  }
1884 
1885  if (ret < 0)
1886  av_frame_unref(frame);
1887 
1888  return ret;
1889 }
1890 
1892 {
1893  int ret = get_buffer_internal(avctx, frame, flags);
1894  if (ret < 0) {
1895  av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
1896  frame->width = frame->height = 0;
1897  }
1898  return ret;
1899 }
1900 
1902 {
1903  AVFrame *tmp;
1904  int ret;
1905 
1907 
1908  if (frame->data[0] && (frame->width != avctx->width || frame->height != avctx->height || frame->format != avctx->pix_fmt)) {
1909  av_log(avctx, AV_LOG_WARNING, "Picture changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s in reget buffer()\n",
1910  frame->width, frame->height, av_get_pix_fmt_name(frame->format), avctx->width, avctx->height, av_get_pix_fmt_name(avctx->pix_fmt));
1911  av_frame_unref(frame);
1912  }
1913 
1914  if (!frame->data[0])
1915  return ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1916 
1917  if (av_frame_is_writable(frame))
1918  return ff_decode_frame_props(avctx, frame);
1919 
1920  tmp = av_frame_alloc();
1921  if (!tmp)
1922  return AVERROR(ENOMEM);
1923 
1924  av_frame_move_ref(tmp, frame);
1925 
1926  ret = ff_get_buffer(avctx, frame, AV_GET_BUFFER_FLAG_REF);
1927  if (ret < 0) {
1928  av_frame_free(&tmp);
1929  return ret;
1930  }
1931 
1932  av_frame_copy(frame, tmp);
1933  av_frame_free(&tmp);
1934 
1935  return 0;
1936 }
1937 
1939 {
1940  int ret = reget_buffer_internal(avctx, frame);
1941  if (ret < 0)
1942  av_log(avctx, AV_LOG_ERROR, "reget_buffer() failed\n");
1943  return ret;
1944 }
1945 
1947 {
1948  avctx->internal->draining = 0;
1949  avctx->internal->draining_done = 0;
1950  avctx->internal->nb_draining_errors = 0;
1954  avctx->internal->buffer_pkt_valid = 0;
1955 
1956  av_packet_unref(avctx->internal->ds.in_pkt);
1957 
1959  ff_thread_flush(avctx);
1960  else if (avctx->codec->flush)
1961  avctx->codec->flush(avctx);
1962 
1963  avctx->pts_correction_last_pts =
1964  avctx->pts_correction_last_dts = INT64_MIN;
1965 
1966  ff_decode_bsfs_uninit(avctx);
1967 
1968  if (!avctx->refcounted_frames)
1969  av_frame_unref(avctx->internal->to_free);
1970 }
1971 
1973 {
1974  DecodeFilterContext *s = &avctx->internal->filter;
1975  int i;
1976 
1977  for (i = 0; i < s->nb_bsfs; i++)
1978  av_bsf_free(&s->bsfs[i]);
1979  av_freep(&s->bsfs);
1980  s->nb_bsfs = 0;
1981 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
#define FF_SANE_NB_CHANNELS
Definition: internal.h:86
This struct aggregates all the (hardware/vendor-specific) "high-level" state, i.e.
Definition: hwcontext.h:60
#define FF_COMPLIANCE_EXPERIMENTAL
Allow nonstandardized experimental things.
Definition: avcodec.h:2581
enum AVPixelFormat(* get_format)(struct AVCodecContext *s, const enum AVPixelFormat *fmt)
callback to negotiate the pixelFormat
Definition: avcodec.h:1769
AVCodecHWConfig public
This is the structure which will be returned to the user by avcodec_get_hw_config().
Definition: hwaccel.h:34
int nb_draining_errors
Definition: internal.h:220
#define FF_SUB_CHARENC_MODE_PRE_DECODER
the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv ...
Definition: avcodec.h:3098
void av_bsf_free(AVBSFContext **ctx)
Free a bitstream filter context and everything associated with it; write NULL into the supplied point...
Definition: bsf.c:35
#define NULL
Definition: coverity.c:32
int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Select the (possibly hardware accelerated) pixel format.
Definition: decode.c:1299
const struct AVCodec * codec
Definition: avcodec.h:1527
AVRational framerate
Definition: avcodec.h:3040
const char const char void * val
Definition: avisynth_c.h:771
const AVCodecDescriptor * codec_descriptor
AVCodecDescriptor.
Definition: avcodec.h:3061
const char * s
Definition: avisynth_c.h:768
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
#define AV_NUM_DATA_POINTERS
Definition: frame.h:219
AVCodecParameters * par_out
Parameters of the output stream.
Definition: avcodec.h:5721
void av_bprintf(AVBPrint *buf, const char *fmt,...)
Definition: bprint.c:94
AVPacketSideDataType
Definition: avcodec.h:1143
int64_t pts_correction_num_faulty_dts
Number of incorrect PTS values so far.
Definition: avcodec.h:3078
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
#define GET_UTF8(val, GET_BYTE, ERROR)
Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
Definition: common.h:385
int size
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
int stride_align[AV_NUM_DATA_POINTERS]
Definition: internal.h:112
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
int apply_cropping
Video decoding only.
Definition: avcodec.h:3285
static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:602
const struct AVCodecHWConfigInternal ** hw_configs
Array of pointers to hardware configurations supported by the codec, or NULL if no hardware supported...
Definition: avcodec.h:3549
#define AV_CODEC_FLAG2_SKIP_MANUAL
Do not skip samples and export skip information as frame side data.
Definition: avcodec.h:937
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1705
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
int capabilities
Hardware accelerated codec capabilities.
Definition: avcodec.h:3609
const char * fmt
Definition: avisynth_c.h:769
AVPacket * last_pkt_props
Properties (timestamps+side data) extracted from the last packet passed for decoding.
Definition: internal.h:172
misc image utilities
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
static int convert_sub_to_old_ass_form(AVSubtitle *sub, const AVPacket *pkt, AVRational tb)
Definition: decode.c:940
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2403
AVFrame * to_free
Definition: internal.h:159
The codec supports this format by some internal method.
Definition: avcodec.h:3370
int64_t pos
byte position in stream, -1 if unknown
Definition: avcodec.h:1450
static void get_subtitle_defaults(AVSubtitle *sub)
Definition: decode.c:841
int ff_set_dimensions(AVCodecContext *s, int width, int height)
Check that the provided frame dimensions are valid and set them on the codec context.
Definition: utils.c:104
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:410
static int decode_simple_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:589
const char * desc
Definition: nvenc.c:65
int width
Definition: internal.h:111
This side data should be associated with a video stream and contains Stereoscopic 3D information in f...
Definition: avcodec.h:1217
ATSC A53 Part 4 Closed Captions.
Definition: avcodec.h:1345
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: avcodec.h:2148
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:428
Content light level (based on CTA-861.3).
Definition: frame.h:136
int num
Numerator.
Definition: rational.h:59
The bitstream filter state.
Definition: avcodec.h:5687
int size
Definition: avcodec.h:1431
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
const AVBitStreamFilter * av_bsf_get_by_name(const char *name)
enum AVPixelFormat pix_fmt
Supported pixel format.
Definition: avcodec.h:3603
AVRational sample_aspect_ratio
sample aspect ratio (0 if unknown) That is the width of a pixel divided by the height of the pixel...
Definition: avcodec.h:1896
void(* hwaccel_priv_free)(void *priv)
Definition: decode.h:53
#define AV_CODEC_PROP_TEXT_SUB
Subtitle codec is text based.
Definition: avcodec.h:763
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1727
int samples
Definition: internal.h:116
static int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:367
int attribute_align_arg avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, int *got_frame_ptr, const AVPacket *avpkt)
Decode the audio frame of size avpkt->size from avpkt->data into frame.
Definition: decode.c:833
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
Mastering display metadata associated with a video frame.
Definition: frame.h:119
unsigned num_rects
Definition: avcodec.h:3864
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:580
enum AVMediaType type
Definition: avcodec.h:3421
static int apply_param_change(AVCodecContext *avctx, const AVPacket *avpkt)
Definition: decode.c:47
static int get_buffer_internal(AVCodecContext *avctx, AVFrame *frame, int flags)
Definition: decode.c:1833
static int recode_subtitle(AVCodecContext *avctx, AVPacket *outpkt, const AVPacket *inpkt)
Definition: decode.c:848
AVBufferPool * pools[4]
Pools for each data plane.
Definition: internal.h:105
int ff_decode_frame_props(AVCodecContext *avctx, AVFrame *frame)
Set various frame properties from the codec context / packet data.
Definition: decode.c:1665
size_t crop_bottom
Definition: frame.h:578
static AVPacket pkt
int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, int *got_sub_ptr, AVPacket *avpkt)
Decode a subtitle message.
Definition: decode.c:998
const struct AVHWAccel * hwaccel
Hardware accelerator in use.
Definition: avcodec.h:2674
int(* alloc_frame)(AVCodecContext *avctx, AVFrame *frame)
Allocate a custom buffer.
Definition: avcodec.h:3622
static int utf8_check(const uint8_t *str)
Definition: decode.c:906
int av_bsf_init(AVBSFContext *ctx)
Prepare the filter for use, after all the parameters and options have been set.
Definition: bsf.c:134
int av_bprint_finalize(AVBPrint *buf, char **ret_str)
Finalize a print buffer.
Definition: bprint.c:235
int ff_reget_buffer(AVCodecContext *avctx, AVFrame *frame)
Identical in function to av_frame_make_writable(), except it uses ff_get_buffer() to allocate the buf...
Definition: decode.c:1938
static int apply_cropping(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:682
void ff_decode_bsfs_uninit(AVCodecContext *avctx)
Definition: decode.c:1972
Mastering display metadata (based on SMPTE-2086:2014).
Definition: avcodec.h:1325
AVRational time_base
This is the fundamental unit of time (in seconds) in terms of which frame timestamps are represented...
Definition: avcodec.h:1640
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVSubtitleRect ** rects
Definition: avcodec.h:3865
int av_codec_is_decoder(const AVCodec *codec)
Definition: utils.c:99
int(* uninit)(AVCodecContext *avctx)
Uninitialize the hwaccel private data.
Definition: avcodec.h:3714
int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx)
Allocate a context for a given bitstream filter.
Definition: bsf.c:81
#define AV_CODEC_CAP_DELAY
Encoder or decoder requires flushing with NULL input at the end in order to give the complete and cor...
Definition: avcodec.h:984
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt)
Retrieve a filtered packet.
Definition: bsf.c:201
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:114
DecodeFilterContext filter
Definition: internal.h:166
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
int height
Definition: internal.h:111
enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *avctx, const enum AVPixelFormat *fmt)
Definition: decode.c:1087
enum AVSampleFormat sample_fmt
audio sample format
Definition: avcodec.h:2181
uint8_t
#define av_malloc(s)
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1809
int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict)
Unpack a dictionary from side_data.
Definition: avpacket.c:517
static int64_t guess_correct_pts(AVCodecContext *ctx, int64_t reordered_pts, int64_t dts)
Attempt to guess proper monotonic timestamps for decoded video frames which might have incorrect time...
Definition: decode.c:335
size_t crop_left
Definition: frame.h:579
int avpriv_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
Definition: imgutils.c:152
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t duration
Duration of this packet in AVStream->time_base units, 0 if unknown.
Definition: avcodec.h:1448
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: avcodec.h:1211
#define AV_CODEC_FLAG_UNALIGNED
Allow decoders to produce frames with data planes that are not aligned to CPU requirements (e...
Definition: avcodec.h:829
#define AV_WL8(p, d)
Definition: intreadwrite.h:399
Multithreading support functions.
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:596
int ff_decode_get_packet(AVCodecContext *avctx, AVPacket *pkt)
Called by decoders to get the next packet for decoding.
Definition: decode.c:294
#define emms_c()
Definition: internal.h:55
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
enum AVPixelFormat pix_fmt
A hardware pixel format which the codec can use.
Definition: avcodec.h:3386
static AVFrame * frame
int planes
Definition: internal.h:114
const char data[16]
Definition: mxf.c:90
Structure to hold side data for an AVFrame.
Definition: frame.h:180
int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar)
Check if the given sample aspect ratio of an image is valid.
Definition: imgutils.c:287
size_t compat_decode_consumed
Definition: internal.h:209
static void finish(void)
Definition: movenc.c:345
uint8_t * data
Definition: avcodec.h:1430
static int flags
Definition: log.c:55
#define AVERROR_EOF
End of file.
Definition: error.h:55
AVDictionary * metadata
metadata.
Definition: frame.h:505
int lowres
low resolution decoding, 1-> 1/2 size, 2->1/4 size
Definition: avcodec.h:2749
#define AV_BUFFER_FLAG_READONLY
Always treat the buffer as read-only, even when it has only one reference.
Definition: buffer.h:113
int(* init)(AVCodecContext *avctx)
Initialize the hwaccel private data.
Definition: avcodec.h:3706
The data represents the AVSphericalMapping structure defined in libavutil/spherical.h.
Definition: frame.h:130
void * av_realloc_array(void *ptr, size_t nmemb, size_t size)
Allocate, reallocate, or free an array.
Definition: mem.c:198
static int bsfs_poll(AVCodecContext *avctx, AVPacket *pkt)
Definition: decode.c:256
enum AVChromaLocation chroma_sample_location
This defines the location of chroma samples.
Definition: avcodec.h:2155
#define av_log(a,...)
const char * name
Definition: pixdesc.h:82
int av_packet_ref(AVPacket *dst, const AVPacket *src)
Setup a new reference to the data described by a given packet.
Definition: avpacket.c:601
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
FramePool * pool
Definition: internal.h:161
void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, int linesize_align[AV_NUM_DATA_POINTERS])
Modify width and height values so that they will result in a memory buffer that is acceptable for the...
Definition: utils.c:154
int ff_thread_decode_frame(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, AVPacket *avpkt)
Submit a new frame to a decoding thread.
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define AV_RL8(x)
Definition: intreadwrite.h:398
int av_new_packet(AVPacket *pkt, int size)
Allocate the payload of a packet and initialize its fields with default values.
Definition: avpacket.c:86
AVRational pkt_timebase
Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
Definition: avcodec.h:3054
int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src)
Copy the contents of src to dst.
Definition: utils.c:2003
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
int has_b_frames
Size of the frame reordering buffer in the decoder.
Definition: avcodec.h:1807
void * post_process_opaque
Definition: decode.h:46
#define AV_BPRINT_SIZE_UNLIMITED
static int hwaccel_init(AVCodecContext *avctx, const AVCodecHWConfigInternal *hw_config)
Definition: decode.c:1250
static void validate_avframe_allocation(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1773
#define AVERROR(e)
Definition: error.h:43
An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
Definition: avcodec.h:1175
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:873
uint8_t * av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size)
Get side information from packet.
Definition: avpacket.c:350
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
int64_t pts_correction_last_pts
Number of incorrect DTS values so far.
Definition: avcodec.h:3079
int active_thread_type
Which multithreading methods are in use by the codec.
Definition: avcodec.h:2788
int methods
Bit set of AV_CODEC_HW_CONFIG_METHOD_* flags, describing the possible setup methods which can be used...
Definition: avcodec.h:3391
void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max)
Definition: bprint.c:69
int avcodec_is_open(AVCodecContext *s)
Definition: utils.c:1861
int attribute_align_arg avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame)
Return decoded output data from a decoder.
Definition: decode.c:709
static int extract_packet_props(AVCodecInternal *avci, const AVPacket *pkt)
Definition: decode.c:125
AVFrame * buffer_frame
Definition: internal.h:202
int capabilities
Codec capabilities.
Definition: avcodec.h:3427
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:463
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
AVBufferRef * buf
A reference to the reference-counted buffer where the packet data is stored.
Definition: avcodec.h:1413
int flags
AV_CODEC_FLAG_*.
Definition: avcodec.h:1598
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:474
AVRational time_base_in
The timebase used for the timestamps of the input packets.
Definition: avcodec.h:5727
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
int side_data_elems
Definition: avcodec.h:1442
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
int64_t max_pixels
The number of pixels per image to maximally accept.
Definition: avcodec.h:3227
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
static int reget_buffer_internal(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1901
enum AVHWDeviceType type
This field identifies the underlying API used for hardware access.
Definition: hwcontext.h:78
#define FFMAX(a, b)
Definition: common.h:94
int av_hwframe_get_buffer(AVBufferRef *hwframe_ref, AVFrame *frame, int flags)
Allocate a new frame attached to the given AVHWFramesContext.
Definition: hwcontext.c:465
#define fail()
Definition: checkasm.h:116
char * av_get_token(const char **buf, const char *term)
Unescape the given string until a non escaped terminating char, and return the token corresponding to...
Definition: avstring.c:149
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:790
const AVHWAccel * hwaccel
If this configuration uses a hwaccel, a pointer to it.
Definition: hwaccel.h:39
#define FF_CODEC_CAP_EXPORTS_CROPPING
The decoder sets the cropping fields in the output frames manually.
Definition: internal.h:66
int priv_data_size
Size of the private data to allocate in AVCodecInternal.hwaccel_priv_data.
Definition: avcodec.h:3720
int flags
A combination of AV_PKT_FLAG values.
Definition: avcodec.h:1436
reference-counted frame API
uint64_t channel_layout
Audio channel layout.
Definition: avcodec.h:2224
The codec supports this format by some ad-hoc method.
Definition: avcodec.h:3379
uint32_t end_display_time
Definition: avcodec.h:3863
int64_t pts
Same as packet pts, in AV_TIME_BASE.
Definition: avcodec.h:3866
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
int props
Codec properties, a combination of AV_CODEC_PROP_* flags.
Definition: avcodec.h:715
size_t crop_top
Definition: frame.h:577
common internal API header
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int initial_pool_size
Initial size of the frame pool.
Definition: hwcontext.h:198
int av_packet_copy_props(AVPacket *dst, const AVPacket *src)
Copy only "properties" fields from src to dst.
Definition: avpacket.c:558
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:456
int err_recognition
Error recognition; may misdetect some more or less valid parts as errors.
Definition: avcodec.h:2642
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FF_THREAD_FRAME
Decode more than one frame at once.
Definition: avcodec.h:2780
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3582
#define FFMIN(a, b)
Definition: common.h:96
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:424
int channels
Definition: internal.h:115
AVFrame * compat_decode_frame
Definition: internal.h:213
int width
picture width / height.
Definition: avcodec.h:1690
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3197
static int add_metadata_from_side_data(const AVPacket *avpkt, AVFrame *frame)
Definition: decode.c:1653
AVRational time_base_out
The timebase used for the timestamps of the output packets.
Definition: avcodec.h:5733
static int compat_decode(AVCodecContext *avctx, AVFrame *frame, int *got_frame, const AVPacket *pkt)
Definition: decode.c:744
AVPacket * in_pkt
Definition: internal.h:120
This side data should be associated with a video stream and corresponds to the AVSphericalMapping str...
Definition: avcodec.h:1331
int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt)
Submit a packet for filtering.
Definition: bsf.c:175
AVFormatContext * ctx
Definition: movenc.c:48
enum AVColorPrimaries color_primaries
Chromaticity coordinates of the source primaries.
Definition: avcodec.h:2127
AVFrameSideDataType
Definition: frame.h:48
uint16_t format
Definition: avcodec.h:3861
#define FF_DEBUG_BUFFERS
Definition: avcodec.h:2620
#define AV_RL32
Definition: intreadwrite.h:146
int64_t reordered_opaque
opaque 64-bit number (generally a PTS) that will be reordered and output in AVFrame.reordered_opaque
Definition: avcodec.h:2667
#define AV_EF_EXPLODE
abort decoding on minor error detection
Definition: avcodec.h:2653
int n
Definition: avisynth_c.h:684
static int av_bprint_is_complete(const AVBPrint *buf)
Test if the print buffer is complete (not truncated).
Definition: bprint.h:185
const char * bsfs
Decoding only, a comma-separated list of bitstream filters to apply to packets before decoding...
Definition: avcodec.h:3540
DecodeSimpleContext ds
Definition: internal.h:165
char * sub_charenc
DTS of the last frame.
Definition: avcodec.h:3087
static int audio_get_buffer(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1537
if(ret< 0)
Definition: vf_mcdeint.c:279
int draining
checks API usage: after codec draining, flush is required to resume operation
Definition: internal.h:195
#define FF_ARRAY_ELEMS(a)
int thread_count
thread count is used to decide how many independent tasks should be passed to execute() ...
Definition: avcodec.h:2769
int linesize[4]
Definition: internal.h:113
int sub_charenc_mode
Subtitles character encoding mode.
Definition: avcodec.h:3095
void avcodec_flush_buffers(AVCodecContext *avctx)
Reset the internal decoder state / flush internal buffers.
Definition: decode.c:1946
#define AVERROR_PATCHWELCOME
Not yet implemented in FFmpeg, patches welcome.
Definition: error.h:62
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
Content light level (based on CTA-861.3).
Definition: avcodec.h:1338
int attribute_align_arg avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt)
Supply raw packet data as input to a decoder.
Definition: decode.c:642
#define attribute_align_arg
Definition: internal.h:62
void(* post_process_opaque_free)(void *opaque)
Definition: decode.h:47
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
int avcodec_default_get_buffer2(AVCodecContext *avctx, AVFrame *frame, int flags)
The default callback for AVCodecContext.get_buffer2().
Definition: decode.c:1629
int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, uint8_t *ptr, const int linesizes[4])
Fill plane data pointers for an image with pixel format pix_fmt and height height.
Definition: imgutils.c:111
Libavcodec external API header.
enum AVMediaType codec_type
Definition: avcodec.h:1526
int compat_decode_warned
Definition: internal.h:206
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:498
A list of zero terminated key/value strings.
Definition: avcodec.h:1275
int attribute_align_arg avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, int *got_picture_ptr, const AVPacket *avpkt)
Decode the video frame of size avpkt->size from avpkt->data into picture.
Definition: decode.c:826
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:592
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:863
int sample_rate
samples per second
Definition: avcodec.h:2173
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
int debug
debug
Definition: avcodec.h:2598
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
const AVCodecHWConfig * avcodec_get_hw_config(const AVCodec *codec, int index)
Retrieve supported hardware configurations for a codec.
Definition: utils.c:1747
main external API structure.
Definition: avcodec.h:1518
int(* receive_frame)(AVCodecContext *avctx, AVFrame *frame)
Decode API with decoupled packet/frame dataflow.
Definition: avcodec.h:3524
void av_packet_unref(AVPacket *pkt)
Wipe the packet.
Definition: avpacket.c:592
int skip_samples_multiplier
Definition: internal.h:217
uint8_t * data
The data buffer.
Definition: buffer.h:89
void avsubtitle_free(AVSubtitle *sub)
Free all allocated data in the given subtitle struct.
Definition: utils.c:1042
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:306
int ff_get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
Get a buffer for a frame.
Definition: decode.c:1891
uint8_t * data
Definition: frame.h:182
#define AV_CODEC_PROP_BITMAP_SUB
Subtitle codec is bitmap based Decoded AVSubtitle data can be read from the AVSubtitleRect->pict fiel...
Definition: avcodec.h:758
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:580
AVBufferRef * av_buffer_allocz(int size)
Same as av_buffer_alloc(), except the returned buffer will be initialized to zero.
Definition: buffer.c:83
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int coded_height
Definition: avcodec.h:1705
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:386
int sample_rate
Sample rate of the audio data.
Definition: frame.h:391
int(* get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags)
This callback is called at the beginning of each frame to get data buffer(s) for it.
Definition: avcodec.h:2328
int showed_multi_packet_warning
Definition: internal.h:215
Definition: f_ebur128.c:91
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:720
const char * av_hwdevice_get_type_name(enum AVHWDeviceType type)
Get the string name of an AVHWDeviceType.
Definition: hwcontext.c:88
#define AV_CODEC_CAP_SUBFRAMES
Codec can output multiple frames per AVPacket Normally demuxers return one frame at a time...
Definition: avcodec.h:1002
void av_buffer_pool_uninit(AVBufferPool **ppool)
Mark the pool as being available for freeing.
Definition: buffer.c:275
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
enum AVColorSpace colorspace
YUV colorspace type.
Definition: avcodec.h:2141
Rational number (pair of numerator and denominator).
Definition: rational.h:58
enum AVColorTransferCharacteristic color_trc
Color Transfer Characteristic.
Definition: avcodec.h:2134
#define CONFIG_MEMORY_POISONING
Definition: config.h:562
int avcodec_parameters_from_context(AVCodecParameters *par, const AVCodecContext *codec)
Fill the parameters struct based on the values from the supplied codec context.
Definition: utils.c:2021
Recommmends skipping the specified number of samples.
Definition: avcodec.h:1259
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
int sub_text_format
Control the form of AVSubtitle.rects[N]->ass.
Definition: avcodec.h:3204
int buffer_pkt_valid
Definition: internal.h:201
int skip_samples
Number of audio samples to skip at the start of the next decoded frame.
Definition: internal.h:185
#define STRIDE_ALIGN
Definition: internal.h:97
enum AVChromaLocation chroma_location
Definition: frame.h:476
int(* frame_params)(AVCodecContext *avctx, AVBufferRef *hw_frames_ctx)
Fill the given hw_frames context with current codec parameters.
Definition: avcodec.h:3735
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:483
attribute_deprecated int refcounted_frames
If non-zero, the decoded audio and video frames returned from avcodec_decode_video2() and avcodec_dec...
Definition: avcodec.h:2344
int size
Size of data in bytes.
Definition: buffer.h:93
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:551
static int64_t pts
#define SIZE_SPECIFIER
Definition: internal.h:262
This side data should be associated with an audio stream and contains ReplayGain information in form ...
Definition: avcodec.h:1202
#define FF_CODEC_CAP_SETS_PKT_DTS
Decoders marked with FF_CODEC_CAP_SETS_PKT_DTS want to set AVFrame.pkt_dts manually.
Definition: internal.h:55
int ff_decode_get_hw_frames_ctx(AVCodecContext *avctx, enum AVHWDeviceType dev_type)
Make sure avctx.hw_frames_ctx is set.
Definition: decode.c:1150
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
static void decode_data_free(void *opaque, uint8_t *data)
Definition: decode.c:1796
#define UTF8_MAX_BYTES
Definition: decode.c:847
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:319
void av_bprint_clear(AVBPrint *buf)
Reset the string to "" but keep internal allocated data.
Definition: bprint.c:227
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
AVPacket * buffer_pkt
buffers for using new encode/decode API through legacy API
Definition: internal.h:200
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:327
A reference to a data buffer.
Definition: buffer.h:81
int extra_hw_frames
Definition: avcodec.h:3299
static int unrefcount_frame(AVCodecInternal *avci, AVFrame *frame)
Definition: decode.c:138
AVPacketSideData * side_data
Additional packet data that can be provided by the container.
Definition: avcodec.h:1441
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
int avcodec_get_hw_frames_parameters(AVCodecContext *avctx, AVBufferRef *device_ref, enum AVPixelFormat hw_pix_fmt, AVBufferRef **out_frames_ref)
Create and return a AVHWFramesContext with values adequate for hardware decoding. ...
Definition: decode.c:1201
static enum AVPixelFormat hw_pix_fmt
Definition: hw_decode.c:44
#define AV_PKT_FLAG_DISCARD
Flag is used to discard packets which are required to maintain valid decoder state but are not requir...
Definition: avcodec.h:1469
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
int(* decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt)
Definition: avcodec.h:3506
common internal api header.
common internal and external API header
AVBufferPool * av_buffer_pool_init(int size, AVBufferRef *(*alloc)(int size))
Allocate and initialize a buffer pool.
Definition: buffer.c:238
#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL
HWAccel is experimental and is thus avoided in favor of non experimental codecs.
Definition: avcodec.h:3742
#define AV_CODEC_CAP_PARAM_CHANGE
Codec supports changed parameters at any point.
Definition: avcodec.h:1023
void(* flush)(AVCodecContext *)
Flush buffers.
Definition: avcodec.h:3529
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
static void insert_ts(AVBPrint *buf, int ts)
Definition: decode.c:926
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
int caps_internal
Internal codec capabilities.
Definition: avcodec.h:3534
int den
Denominator.
Definition: rational.h:60
int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of a plane of an image with...
Definition: imgutils.c:253
static const struct @272 planes[]
#define AV_INPUT_BUFFER_PADDING_SIZE
Required number of additionally allocated bytes at the end of the input bitstream for decoding...
Definition: avcodec.h:773
AVBSFContext ** bsfs
Definition: internal.h:125
static int video_get_buffer(AVCodecContext *s, AVFrame *pic)
Definition: decode.c:1582
Formatted text, the ass field must be set by the decoder and is authoritative.
Definition: avcodec.h:3820
#define FF_PSEUDOPAL
Definition: internal.h:367
AVHWDeviceType
Definition: hwcontext.h:27
void ff_thread_flush(AVCodecContext *avctx)
Wait for decoding threads to finish and reset internal state.
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
The codec supports this format via the hw_frames_ctx interface.
Definition: avcodec.h:3363
int channels
number of audio channels
Definition: avcodec.h:2174
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1553
char * ass
0 terminated ASS/SSA compatible event line.
Definition: avcodec.h:3855
#define AV_FRAME_FLAG_DISCARD
A flag to mark the frames which need to be decoded, but shouldn&#39;t be output.
Definition: frame.h:448
int flags2
AV_CODEC_FLAG2_*.
Definition: avcodec.h:1605
enum AVColorPrimaries color_primaries
Definition: frame.h:465
#define HAVE_THREADS
Definition: config.h:270
int64_t dts
Decompression timestamp in AVStream->time_base units; the time at which the packet is decompressed...
Definition: avcodec.h:1429
AVRational av_mul_q(AVRational b, AVRational c)
Multiply two rationals.
Definition: rational.c:80
int64_t pts_correction_last_dts
PTS of the last frame.
Definition: avcodec.h:3080
size_t compat_decode_partial_size
Definition: internal.h:212
#define AV_CODEC_FLAG_TRUNCATED
Input bitstream might be truncated at a random location instead of only at frame boundaries.
Definition: avcodec.h:870
int frame_number
Frame counter, set by libavcodec.
Definition: avcodec.h:2204
static int update_frame_pool(AVCodecContext *avctx, AVFrame *frame)
Definition: decode.c:1435
int height
Definition: frame.h:276
#define av_freep(p)
int64_t pts_correction_num_faulty_pts
Current statistics for PTS correction.
Definition: avcodec.h:3077
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:467
AVBufferRef * av_buffer_pool_get(AVBufferPool *pool)
Allocate a new AVBuffer, reusing an old buffer from the pool when available.
Definition: buffer.c:334
Recommmends skipping the specified number of samples.
Definition: frame.h:108
void * hwaccel_priv
Per-frame private data for hwaccels.
Definition: decode.h:52
#define av_malloc_array(a, b)
enum AVHWDeviceType device_type
The device type associated with the configuration.
Definition: avcodec.h:3398
#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS
Definition: avcodec.h:3207
#define FF_SUB_CHARENC_MODE_IGNORE
neither convert the subtitles, nor check them for valid UTF-8
Definition: avcodec.h:3099
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2279
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
enum AVSubtitleType type
Definition: avcodec.h:3846
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
int format
Definition: internal.h:110
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3249
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:532
float min
Stereoscopic 3d metadata.
Definition: frame.h:63
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
The codec supports this format via the hw_device_ctx interface.
Definition: avcodec.h:3354
This structure stores compressed data.
Definition: avcodec.h:1407
AVCodecParameters * par_in
Parameters of the input stream.
Definition: avcodec.h:5715
#define AV_GET_BUFFER_FLAG_REF
The decoder will keep a reference to the frame and may reuse it later.
Definition: avcodec.h:1135
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
int strict_std_compliance
strictly follow the standard (MPEG-4, ...).
Definition: avcodec.h:2576
#define AV_CODEC_CAP_DR1
Codec uses get_buffer() for allocating buffers and supports custom allocators.
Definition: avcodec.h:959
int64_t pts
Presentation timestamp in AVStream->time_base units; the time at which the decompressed packet will b...
Definition: avcodec.h:1423
enum AVPixelFormat sw_pix_fmt
Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:3047
for(j=16;j >0;--j)
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:652
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
static void hwaccel_uninit(AVCodecContext *avctx)
Definition: decode.c:1287
#define tb
Definition: regdef.h:68
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
static int bsfs_init(AVCodecContext *avctx)
Definition: decode.c:184
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:191
This side data should be associated with an audio stream and corresponds to enum AVAudioServiceType.
Definition: avcodec.h:1223
static uint8_t tmp[11]
Definition: aes_ctr.c:26