FFmpeg  4.0
frame.c
Go to the documentation of this file.
1 /*
2  * This file is part of FFmpeg.
3  *
4  * FFmpeg is free software; you can redistribute it and/or
5  * modify it under the terms of the GNU Lesser General Public
6  * License as published by the Free Software Foundation; either
7  * version 2.1 of the License, or (at your option) any later version.
8  *
9  * FFmpeg is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12  * Lesser General Public License for more details.
13  *
14  * You should have received a copy of the GNU Lesser General Public
15  * License along with FFmpeg; if not, write to the Free Software
16  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
17  */
18 
19 #include "channel_layout.h"
20 #include "avassert.h"
21 #include "buffer.h"
22 #include "common.h"
23 #include "dict.h"
24 #include "frame.h"
25 #include "imgutils.h"
26 #include "mem.h"
27 #include "samplefmt.h"
28 
29 #if FF_API_FRAME_GET_SET
30 MAKE_ACCESSORS(AVFrame, frame, int64_t, best_effort_timestamp)
31 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_duration)
32 MAKE_ACCESSORS(AVFrame, frame, int64_t, pkt_pos)
33 MAKE_ACCESSORS(AVFrame, frame, int64_t, channel_layout)
37 MAKE_ACCESSORS(AVFrame, frame, int, decode_error_flags)
38 MAKE_ACCESSORS(AVFrame, frame, int, pkt_size)
39 MAKE_ACCESSORS(AVFrame, frame, enum AVColorSpace, colorspace)
41 #endif
42 
43 #define CHECK_CHANNELS_CONSISTENCY(frame) \
44  av_assert2(!(frame)->channel_layout || \
45  (frame)->channels == \
46  av_get_channel_layout_nb_channels((frame)->channel_layout))
47 
48 #if FF_API_FRAME_QP
49 struct qp_properties {
50  int stride;
51  int type;
52 };
53 
54 int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
55 {
56  struct qp_properties *p;
57  AVFrameSideData *sd;
59 
62 
63  f->qp_table_buf = buf;
64  f->qscale_table = buf->data;
65  f->qstride = stride;
66  f->qscale_type = qp_type;
68 
71 
72  ref = av_buffer_ref(buf);
74  av_buffer_unref(&ref);
75  return AVERROR(ENOMEM);
76  }
77 
79  sizeof(struct qp_properties));
80  if (!sd)
81  return AVERROR(ENOMEM);
82 
83  p = (struct qp_properties *)sd->data;
84  p->stride = stride;
85  p->type = qp_type;
86 
87  return 0;
88 }
89 
90 int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
91 {
92  AVBufferRef *buf = NULL;
93 
94  *stride = 0;
95  *type = 0;
96 
98  if (f->qp_table_buf) {
99  *stride = f->qstride;
100  *type = f->qscale_type;
101  buf = f->qp_table_buf;
103  } else {
104  AVFrameSideData *sd;
105  struct qp_properties *p;
107  if (!sd)
108  return NULL;
109  p = (struct qp_properties *)sd->data;
111  if (!sd)
112  return NULL;
113  *stride = p->stride;
114  *type = p->type;
115  buf = sd->buf;
116  }
117 
118  return buf ? buf->data : NULL;
119 }
120 #endif
121 
123 {
124  static const char * const name[] = {
125  [AVCOL_SPC_RGB] = "GBR",
126  [AVCOL_SPC_BT709] = "bt709",
127  [AVCOL_SPC_FCC] = "fcc",
128  [AVCOL_SPC_BT470BG] = "bt470bg",
129  [AVCOL_SPC_SMPTE170M] = "smpte170m",
130  [AVCOL_SPC_SMPTE240M] = "smpte240m",
131  [AVCOL_SPC_YCOCG] = "YCgCo",
132  };
133  if ((unsigned)val >= FF_ARRAY_ELEMS(name))
134  return NULL;
135  return name[val];
136 }
137 
139 {
140  if (frame->extended_data != frame->data)
141  av_freep(&frame->extended_data);
142 
143  memset(frame, 0, sizeof(*frame));
144 
145  frame->pts =
146  frame->pkt_dts = AV_NOPTS_VALUE;
147 #if FF_API_PKT_PTS
149  frame->pkt_pts = AV_NOPTS_VALUE;
151 #endif
153  frame->pkt_duration = 0;
154  frame->pkt_pos = -1;
155  frame->pkt_size = -1;
156  frame->key_frame = 1;
157  frame->sample_aspect_ratio = (AVRational){ 0, 1 };
158  frame->format = -1; /* unknown */
159  frame->extended_data = frame->data;
165  frame->flags = 0;
166 }
167 
168 static void free_side_data(AVFrameSideData **ptr_sd)
169 {
170  AVFrameSideData *sd = *ptr_sd;
171 
172  av_buffer_unref(&sd->buf);
173  av_dict_free(&sd->metadata);
174  av_freep(ptr_sd);
175 }
176 
178 {
179  int i;
180 
181  for (i = 0; i < frame->nb_side_data; i++) {
182  free_side_data(&frame->side_data[i]);
183  }
184  frame->nb_side_data = 0;
185 
186  av_freep(&frame->side_data);
187 }
188 
190 {
191  AVFrame *frame = av_mallocz(sizeof(*frame));
192 
193  if (!frame)
194  return NULL;
195 
196  frame->extended_data = NULL;
197  get_frame_defaults(frame);
198 
199  return frame;
200 }
201 
203 {
204  if (!frame || !*frame)
205  return;
206 
207  av_frame_unref(*frame);
208  av_freep(frame);
209 }
210 
212 {
214  int ret, i;
215 
216  if (!desc)
217  return AVERROR(EINVAL);
218 
219  if ((ret = av_image_check_size(frame->width, frame->height, 0, NULL)) < 0)
220  return ret;
221 
222  if (!frame->linesize[0]) {
223  if (align <= 0)
224  align = 32; /* STRIDE_ALIGN. Should be av_cpu_max_align() */
225 
226  for(i=1; i<=align; i+=i) {
227  ret = av_image_fill_linesizes(frame->linesize, frame->format,
228  FFALIGN(frame->width, i));
229  if (ret < 0)
230  return ret;
231  if (!(frame->linesize[0] & (align-1)))
232  break;
233  }
234 
235  for (i = 0; i < 4 && frame->linesize[i]; i++)
236  frame->linesize[i] = FFALIGN(frame->linesize[i], align);
237  }
238 
239  for (i = 0; i < 4 && frame->linesize[i]; i++) {
240  int h = FFALIGN(frame->height, 32);
241  if (i == 1 || i == 2)
242  h = AV_CEIL_RSHIFT(h, desc->log2_chroma_h);
243 
244  frame->buf[i] = av_buffer_alloc(frame->linesize[i] * h + 16 + 16/*STRIDE_ALIGN*/ - 1);
245  if (!frame->buf[i])
246  goto fail;
247 
248  frame->data[i] = frame->buf[i]->data;
249  }
250  if (desc->flags & AV_PIX_FMT_FLAG_PAL || desc->flags & FF_PSEUDOPAL) {
251  av_buffer_unref(&frame->buf[1]);
252  frame->buf[1] = av_buffer_alloc(AVPALETTE_SIZE);
253  if (!frame->buf[1])
254  goto fail;
255  frame->data[1] = frame->buf[1]->data;
256  }
257 
258  frame->extended_data = frame->data;
259 
260  return 0;
261 fail:
262  av_frame_unref(frame);
263  return AVERROR(ENOMEM);
264 }
265 
267 {
268  int channels;
269  int planar = av_sample_fmt_is_planar(frame->format);
270  int planes;
271  int ret, i;
272 
273  if (!frame->channels)
275 
276  channels = frame->channels;
277  planes = planar ? channels : 1;
278 
280  if (!frame->linesize[0]) {
281  ret = av_samples_get_buffer_size(&frame->linesize[0], channels,
282  frame->nb_samples, frame->format,
283  align);
284  if (ret < 0)
285  return ret;
286  }
287 
288  if (planes > AV_NUM_DATA_POINTERS) {
289  frame->extended_data = av_mallocz_array(planes,
290  sizeof(*frame->extended_data));
292  sizeof(*frame->extended_buf));
293  if (!frame->extended_data || !frame->extended_buf) {
294  av_freep(&frame->extended_data);
295  av_freep(&frame->extended_buf);
296  return AVERROR(ENOMEM);
297  }
298  frame->nb_extended_buf = planes - AV_NUM_DATA_POINTERS;
299  } else
300  frame->extended_data = frame->data;
301 
302  for (i = 0; i < FFMIN(planes, AV_NUM_DATA_POINTERS); i++) {
303  frame->buf[i] = av_buffer_alloc(frame->linesize[0]);
304  if (!frame->buf[i]) {
305  av_frame_unref(frame);
306  return AVERROR(ENOMEM);
307  }
308  frame->extended_data[i] = frame->data[i] = frame->buf[i]->data;
309  }
310  for (i = 0; i < planes - AV_NUM_DATA_POINTERS; i++) {
311  frame->extended_buf[i] = av_buffer_alloc(frame->linesize[0]);
312  if (!frame->extended_buf[i]) {
313  av_frame_unref(frame);
314  return AVERROR(ENOMEM);
315  }
316  frame->extended_data[i + AV_NUM_DATA_POINTERS] = frame->extended_buf[i]->data;
317  }
318  return 0;
319 
320 }
321 
323 {
324  if (frame->format < 0)
325  return AVERROR(EINVAL);
326 
327  if (frame->width > 0 && frame->height > 0)
328  return get_video_buffer(frame, align);
329  else if (frame->nb_samples > 0 && (frame->channel_layout || frame->channels > 0))
330  return get_audio_buffer(frame, align);
331 
332  return AVERROR(EINVAL);
333 }
334 
335 static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
336 {
337  int i;
338 
339  dst->key_frame = src->key_frame;
340  dst->pict_type = src->pict_type;
342  dst->crop_top = src->crop_top;
343  dst->crop_bottom = src->crop_bottom;
344  dst->crop_left = src->crop_left;
345  dst->crop_right = src->crop_right;
346  dst->pts = src->pts;
347  dst->repeat_pict = src->repeat_pict;
349  dst->top_field_first = src->top_field_first;
351  dst->sample_rate = src->sample_rate;
352  dst->opaque = src->opaque;
353 #if FF_API_PKT_PTS
355  dst->pkt_pts = src->pkt_pts;
357 #endif
358  dst->pkt_dts = src->pkt_dts;
359  dst->pkt_pos = src->pkt_pos;
360  dst->pkt_size = src->pkt_size;
361  dst->pkt_duration = src->pkt_duration;
363  dst->quality = src->quality;
367  dst->flags = src->flags;
369  dst->color_primaries = src->color_primaries;
370  dst->color_trc = src->color_trc;
371  dst->colorspace = src->colorspace;
372  dst->color_range = src->color_range;
373  dst->chroma_location = src->chroma_location;
374 
375  av_dict_copy(&dst->metadata, src->metadata, 0);
376 
377 #if FF_API_ERROR_FRAME
379  memcpy(dst->error, src->error, sizeof(dst->error));
381 #endif
382 
383  for (i = 0; i < src->nb_side_data; i++) {
384  const AVFrameSideData *sd_src = src->side_data[i];
385  AVFrameSideData *sd_dst;
386  if ( sd_src->type == AV_FRAME_DATA_PANSCAN
387  && (src->width != dst->width || src->height != dst->height))
388  continue;
389  if (force_copy) {
390  sd_dst = av_frame_new_side_data(dst, sd_src->type,
391  sd_src->size);
392  if (!sd_dst) {
393  wipe_side_data(dst);
394  return AVERROR(ENOMEM);
395  }
396  memcpy(sd_dst->data, sd_src->data, sd_src->size);
397  } else {
398  AVBufferRef *ref = av_buffer_ref(sd_src->buf);
399  sd_dst = av_frame_new_side_data_from_buf(dst, sd_src->type, ref);
400  if (!sd_dst) {
401  av_buffer_unref(&ref);
402  wipe_side_data(dst);
403  return AVERROR(ENOMEM);
404  }
405  }
406  av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
407  }
408 
409 #if FF_API_FRAME_QP
411  dst->qscale_table = NULL;
412  dst->qstride = 0;
413  dst->qscale_type = 0;
415  if (src->qp_table_buf) {
417  if (dst->qp_table_buf) {
418  dst->qscale_table = dst->qp_table_buf->data;
419  dst->qstride = src->qstride;
420  dst->qscale_type = src->qscale_type;
421  }
422  }
424 #endif
425 
428  if (src->opaque_ref) {
429  dst->opaque_ref = av_buffer_ref(src->opaque_ref);
430  if (!dst->opaque_ref)
431  return AVERROR(ENOMEM);
432  }
433  if (src->private_ref) {
435  if (!dst->private_ref)
436  return AVERROR(ENOMEM);
437  }
438  return 0;
439 }
440 
441 int av_frame_ref(AVFrame *dst, const AVFrame *src)
442 {
443  int i, ret = 0;
444 
445  av_assert1(dst->width == 0 && dst->height == 0);
446  av_assert1(dst->channels == 0);
447 
448  dst->format = src->format;
449  dst->width = src->width;
450  dst->height = src->height;
451  dst->channels = src->channels;
452  dst->channel_layout = src->channel_layout;
453  dst->nb_samples = src->nb_samples;
454 
455  ret = frame_copy_props(dst, src, 0);
456  if (ret < 0)
457  return ret;
458 
459  /* duplicate the frame data if it's not refcounted */
460  if (!src->buf[0]) {
461  ret = av_frame_get_buffer(dst, 32);
462  if (ret < 0)
463  return ret;
464 
465  ret = av_frame_copy(dst, src);
466  if (ret < 0)
467  av_frame_unref(dst);
468 
469  return ret;
470  }
471 
472  /* ref the buffers */
473  for (i = 0; i < FF_ARRAY_ELEMS(src->buf); i++) {
474  if (!src->buf[i])
475  continue;
476  dst->buf[i] = av_buffer_ref(src->buf[i]);
477  if (!dst->buf[i]) {
478  ret = AVERROR(ENOMEM);
479  goto fail;
480  }
481  }
482 
483  if (src->extended_buf) {
484  dst->extended_buf = av_mallocz_array(sizeof(*dst->extended_buf),
485  src->nb_extended_buf);
486  if (!dst->extended_buf) {
487  ret = AVERROR(ENOMEM);
488  goto fail;
489  }
490  dst->nb_extended_buf = src->nb_extended_buf;
491 
492  for (i = 0; i < src->nb_extended_buf; i++) {
493  dst->extended_buf[i] = av_buffer_ref(src->extended_buf[i]);
494  if (!dst->extended_buf[i]) {
495  ret = AVERROR(ENOMEM);
496  goto fail;
497  }
498  }
499  }
500 
501  if (src->hw_frames_ctx) {
503  if (!dst->hw_frames_ctx) {
504  ret = AVERROR(ENOMEM);
505  goto fail;
506  }
507  }
508 
509  /* duplicate extended data */
510  if (src->extended_data != src->data) {
511  int ch = src->channels;
512 
513  if (!ch) {
514  ret = AVERROR(EINVAL);
515  goto fail;
516  }
518 
519  dst->extended_data = av_malloc_array(sizeof(*dst->extended_data), ch);
520  if (!dst->extended_data) {
521  ret = AVERROR(ENOMEM);
522  goto fail;
523  }
524  memcpy(dst->extended_data, src->extended_data, sizeof(*src->extended_data) * ch);
525  } else
526  dst->extended_data = dst->data;
527 
528  memcpy(dst->data, src->data, sizeof(src->data));
529  memcpy(dst->linesize, src->linesize, sizeof(src->linesize));
530 
531  return 0;
532 
533 fail:
534  av_frame_unref(dst);
535  return ret;
536 }
537 
539 {
540  AVFrame *ret = av_frame_alloc();
541 
542  if (!ret)
543  return NULL;
544 
545  if (av_frame_ref(ret, src) < 0)
546  av_frame_free(&ret);
547 
548  return ret;
549 }
550 
552 {
553  int i;
554 
555  if (!frame)
556  return;
557 
558  wipe_side_data(frame);
559 
560  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
561  av_buffer_unref(&frame->buf[i]);
562  for (i = 0; i < frame->nb_extended_buf; i++)
563  av_buffer_unref(&frame->extended_buf[i]);
564  av_freep(&frame->extended_buf);
565  av_dict_free(&frame->metadata);
566 #if FF_API_FRAME_QP
568  av_buffer_unref(&frame->qp_table_buf);
570 #endif
571 
573 
574  av_buffer_unref(&frame->opaque_ref);
575  av_buffer_unref(&frame->private_ref);
576 
577  get_frame_defaults(frame);
578 }
579 
581 {
582  av_assert1(dst->width == 0 && dst->height == 0);
583  av_assert1(dst->channels == 0);
584 
585  *dst = *src;
586  if (src->extended_data == src->data)
587  dst->extended_data = dst->data;
588  memset(src, 0, sizeof(*src));
589  get_frame_defaults(src);
590 }
591 
593 {
594  int i, ret = 1;
595 
596  /* assume non-refcounted frames are not writable */
597  if (!frame->buf[0])
598  return 0;
599 
600  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf); i++)
601  if (frame->buf[i])
602  ret &= !!av_buffer_is_writable(frame->buf[i]);
603  for (i = 0; i < frame->nb_extended_buf; i++)
604  ret &= !!av_buffer_is_writable(frame->extended_buf[i]);
605 
606  return ret;
607 }
608 
610 {
611  AVFrame tmp;
612  int ret;
613 
614  if (!frame->buf[0])
615  return AVERROR(EINVAL);
616 
617  if (av_frame_is_writable(frame))
618  return 0;
619 
620  memset(&tmp, 0, sizeof(tmp));
621  tmp.format = frame->format;
622  tmp.width = frame->width;
623  tmp.height = frame->height;
624  tmp.channels = frame->channels;
625  tmp.channel_layout = frame->channel_layout;
626  tmp.nb_samples = frame->nb_samples;
627  ret = av_frame_get_buffer(&tmp, 32);
628  if (ret < 0)
629  return ret;
630 
631  ret = av_frame_copy(&tmp, frame);
632  if (ret < 0) {
633  av_frame_unref(&tmp);
634  return ret;
635  }
636 
637  ret = av_frame_copy_props(&tmp, frame);
638  if (ret < 0) {
639  av_frame_unref(&tmp);
640  return ret;
641  }
642 
643  av_frame_unref(frame);
644 
645  *frame = tmp;
646  if (tmp.data == tmp.extended_data)
647  frame->extended_data = frame->data;
648 
649  return 0;
650 }
651 
653 {
654  return frame_copy_props(dst, src, 1);
655 }
656 
658 {
659  uint8_t *data;
660  int planes, i;
661 
662  if (frame->nb_samples) {
663  int channels = frame->channels;
664  if (!channels)
665  return NULL;
667  planes = av_sample_fmt_is_planar(frame->format) ? channels : 1;
668  } else
669  planes = 4;
670 
671  if (plane < 0 || plane >= planes || !frame->extended_data[plane])
672  return NULL;
673  data = frame->extended_data[plane];
674 
675  for (i = 0; i < FF_ARRAY_ELEMS(frame->buf) && frame->buf[i]; i++) {
676  AVBufferRef *buf = frame->buf[i];
677  if (data >= buf->data && data < buf->data + buf->size)
678  return buf;
679  }
680  for (i = 0; i < frame->nb_extended_buf; i++) {
681  AVBufferRef *buf = frame->extended_buf[i];
682  if (data >= buf->data && data < buf->data + buf->size)
683  return buf;
684  }
685  return NULL;
686 }
687 
690  AVBufferRef *buf)
691 {
692  AVFrameSideData *ret, **tmp;
693 
694  if (!buf)
695  return NULL;
696 
697  if (frame->nb_side_data > INT_MAX / sizeof(*frame->side_data) - 1)
698  return NULL;
699 
700  tmp = av_realloc(frame->side_data,
701  (frame->nb_side_data + 1) * sizeof(*frame->side_data));
702  if (!tmp)
703  return NULL;
704  frame->side_data = tmp;
705 
706  ret = av_mallocz(sizeof(*ret));
707  if (!ret)
708  return NULL;
709 
710  ret->buf = buf;
711  ret->data = ret->buf->data;
712  ret->size = buf->size;
713  ret->type = type;
714 
715  frame->side_data[frame->nb_side_data++] = ret;
716 
717  return ret;
718 }
719 
722  int size)
723 {
724  AVFrameSideData *ret;
726  ret = av_frame_new_side_data_from_buf(frame, type, buf);
727  if (!ret)
728  av_buffer_unref(&buf);
729  return ret;
730 }
731 
734 {
735  int i;
736 
737  for (i = 0; i < frame->nb_side_data; i++) {
738  if (frame->side_data[i]->type == type)
739  return frame->side_data[i];
740  }
741  return NULL;
742 }
743 
744 static int frame_copy_video(AVFrame *dst, const AVFrame *src)
745 {
746  const uint8_t *src_data[4];
747  int i, planes;
748 
749  if (dst->width < src->width ||
750  dst->height < src->height)
751  return AVERROR(EINVAL);
752 
753  planes = av_pix_fmt_count_planes(dst->format);
754  for (i = 0; i < planes; i++)
755  if (!dst->data[i] || !src->data[i])
756  return AVERROR(EINVAL);
757 
758  memcpy(src_data, src->data, sizeof(src_data));
759  av_image_copy(dst->data, dst->linesize,
760  src_data, src->linesize,
761  dst->format, src->width, src->height);
762 
763  return 0;
764 }
765 
766 static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
767 {
769  int channels = dst->channels;
770  int planes = planar ? channels : 1;
771  int i;
772 
773  if (dst->nb_samples != src->nb_samples ||
774  dst->channels != src->channels ||
775  dst->channel_layout != src->channel_layout)
776  return AVERROR(EINVAL);
777 
779 
780  for (i = 0; i < planes; i++)
781  if (!dst->extended_data[i] || !src->extended_data[i])
782  return AVERROR(EINVAL);
783 
785  dst->nb_samples, channels, dst->format);
786 
787  return 0;
788 }
789 
790 int av_frame_copy(AVFrame *dst, const AVFrame *src)
791 {
792  if (dst->format != src->format || dst->format < 0)
793  return AVERROR(EINVAL);
794 
795  if (dst->width > 0 && dst->height > 0)
796  return frame_copy_video(dst, src);
797  else if (dst->nb_samples > 0 && dst->channels > 0)
798  return frame_copy_audio(dst, src);
799 
800  return AVERROR(EINVAL);
801 }
802 
804 {
805  int i;
806 
807  for (i = 0; i < frame->nb_side_data; i++) {
808  AVFrameSideData *sd = frame->side_data[i];
809  if (sd->type == type) {
810  free_side_data(&frame->side_data[i]);
811  frame->side_data[i] = frame->side_data[frame->nb_side_data - 1];
812  frame->nb_side_data--;
813  }
814  }
815 }
816 
818 {
819  switch(type) {
820  case AV_FRAME_DATA_PANSCAN: return "AVPanScan";
821  case AV_FRAME_DATA_A53_CC: return "ATSC A53 Part 4 Closed Captions";
822  case AV_FRAME_DATA_STEREO3D: return "Stereoscopic 3d metadata";
823  case AV_FRAME_DATA_MATRIXENCODING: return "AVMatrixEncoding";
824  case AV_FRAME_DATA_DOWNMIX_INFO: return "Metadata relevant to a downmix procedure";
825  case AV_FRAME_DATA_REPLAYGAIN: return "AVReplayGain";
826  case AV_FRAME_DATA_DISPLAYMATRIX: return "3x3 displaymatrix";
827  case AV_FRAME_DATA_AFD: return "Active format description";
828  case AV_FRAME_DATA_MOTION_VECTORS: return "Motion vectors";
829  case AV_FRAME_DATA_SKIP_SAMPLES: return "Skip samples";
830  case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: return "Audio service type";
831  case AV_FRAME_DATA_MASTERING_DISPLAY_METADATA: return "Mastering display metadata";
832  case AV_FRAME_DATA_CONTENT_LIGHT_LEVEL: return "Content light level metadata";
833  case AV_FRAME_DATA_GOP_TIMECODE: return "GOP timecode";
834  case AV_FRAME_DATA_ICC_PROFILE: return "ICC profile";
835  case AV_FRAME_DATA_QP_TABLE_PROPERTIES: return "QP table properties";
836  case AV_FRAME_DATA_QP_TABLE_DATA: return "QP table data";
837  }
838  return NULL;
839 }
840 
841 static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame,
842  const AVPixFmtDescriptor *desc)
843 {
844  int i, j;
845 
846  for (i = 0; frame->data[i]; i++) {
848  int shift_x = (i == 1 || i == 2) ? desc->log2_chroma_w : 0;
849  int shift_y = (i == 1 || i == 2) ? desc->log2_chroma_h : 0;
850 
851  if (desc->flags & (AV_PIX_FMT_FLAG_PAL | FF_PSEUDOPAL) && i == 1) {
852  offsets[i] = 0;
853  break;
854  }
855 
856  /* find any component descriptor for this plane */
857  for (j = 0; j < desc->nb_components; j++) {
858  if (desc->comp[j].plane == i) {
859  comp = &desc->comp[j];
860  break;
861  }
862  }
863  if (!comp)
864  return AVERROR_BUG;
865 
866  offsets[i] = (frame->crop_top >> shift_y) * frame->linesize[i] +
867  (frame->crop_left >> shift_x) * comp->step;
868  }
869 
870  return 0;
871 }
872 
874 {
875  const AVPixFmtDescriptor *desc;
876  size_t offsets[4];
877  int i;
878 
879  if (!(frame->width > 0 && frame->height > 0))
880  return AVERROR(EINVAL);
881 
882  if (frame->crop_left >= INT_MAX - frame->crop_right ||
883  frame->crop_top >= INT_MAX - frame->crop_bottom ||
884  (frame->crop_left + frame->crop_right) >= frame->width ||
885  (frame->crop_top + frame->crop_bottom) >= frame->height)
886  return AVERROR(ERANGE);
887 
888  desc = av_pix_fmt_desc_get(frame->format);
889  if (!desc)
890  return AVERROR_BUG;
891 
892  /* Apply just the right/bottom cropping for hwaccel formats. Bitstream
893  * formats cannot be easily handled here either (and corresponding decoders
894  * should not export any cropping anyway), so do the same for those as well.
895  * */
897  frame->width -= frame->crop_right;
898  frame->height -= frame->crop_bottom;
899  frame->crop_right = 0;
900  frame->crop_bottom = 0;
901  return 0;
902  }
903 
904  /* calculate the offsets for each plane */
905  calc_cropping_offsets(offsets, frame, desc);
906 
907  /* adjust the offsets to avoid breaking alignment */
908  if (!(flags & AV_FRAME_CROP_UNALIGNED)) {
909  int log2_crop_align = frame->crop_left ? ff_ctz(frame->crop_left) : INT_MAX;
910  int min_log2_align = INT_MAX;
911 
912  for (i = 0; frame->data[i]; i++) {
913  int log2_align = offsets[i] ? ff_ctz(offsets[i]) : INT_MAX;
914  min_log2_align = FFMIN(log2_align, min_log2_align);
915  }
916 
917  /* we assume, and it should always be true, that the data alignment is
918  * related to the cropping alignment by a constant power-of-2 factor */
919  if (log2_crop_align < min_log2_align)
920  return AVERROR_BUG;
921 
922  if (min_log2_align < 5) {
923  frame->crop_left &= ~((1 << (5 + log2_crop_align - min_log2_align)) - 1);
924  calc_cropping_offsets(offsets, frame, desc);
925  }
926  }
927 
928  for (i = 0; frame->data[i]; i++)
929  frame->data[i] += offsets[i];
930 
931  frame->width -= (frame->crop_left + frame->crop_right);
932  frame->height -= (frame->crop_top + frame->crop_bottom);
933  frame->crop_left = 0;
934  frame->crop_right = 0;
935  frame->crop_top = 0;
936  frame->crop_bottom = 0;
937 
938  return 0;
939 }
#define AV_PIX_FMT_FLAG_PAL
Pixel format has a palette in data[1], values are indexes in this palette.
Definition: pixdesc.h:132
const char * name
Definition: avisynth_c.h:775
also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
Definition: pixfmt.h:475
int plane
Definition: avisynth_c.h:422
int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int qp_type)
Definition: frame.c:54
int plane
Which of the 4 planes contains the component.
Definition: pixdesc.h:35
#define NULL
Definition: coverity.c:32
const char const char void * val
Definition: avisynth_c.h:771
#define ff_ctz
Definition: intmath.h:106
void * av_realloc(void *ptr, size_t size)
Allocate, reallocate, or free a block of memory.
Definition: mem.c:135
#define AV_NUM_DATA_POINTERS
Definition: frame.h:219
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
const AVPixFmtDescriptor * av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2363
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
This side data must be associated with an audio frame and corresponds to enum AVAudioServiceType defi...
Definition: frame.h:113
attribute_deprecated int qscale_type
Definition: frame.h:547
int64_t pkt_pos
reordered pos from the last AVPacket that has been input into the decoder
Definition: frame.h:490
misc image utilities
int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt)
Definition: pixdesc.c:2403
Memory handling functions.
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:410
const char * desc
Definition: nvenc.c:65
AVDictionary * metadata
Definition: frame.h:184
also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 ...
Definition: pixfmt.h:479
channels
Definition: aptx.c:30
void * opaque
for some private data of the user
Definition: frame.h:346
int nb_extended_buf
Number of elements in extended_buf.
Definition: frame.h:428
Content light level (based on CTA-861.3).
Definition: frame.h:136
int repeat_pict
When decoding, this signals how much the picture must be delayed.
Definition: frame.h:360
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
Definition: pixfmt.h:480
int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags)
Copy entries from one AVDictionary struct into another.
Definition: dict.c:217
Mastering display metadata associated with a video frame.
Definition: frame.h:119
attribute_deprecated AVBufferRef * qp_table_buf
Definition: frame.h:550
static void wipe_side_data(AVFrame *frame)
Definition: frame.c:177
color_range
void av_frame_move_ref(AVFrame *dst, AVFrame *src)
Move everything contained in src to dst and reset src.
Definition: frame.c:580
size_t crop_bottom
Definition: frame.h:578
#define src
Definition: vp8dsp.c:254
order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
Definition: pixfmt.h:474
uint8_t log2_chroma_w
Amount to shift the luma width right to find the chroma width.
Definition: pixdesc.h:92
attribute_deprecated int8_t * qscale_table
QP table.
Definition: frame.h:539
functionally identical to above
Definition: pixfmt.h:481
int av_get_channel_layout_nb_channels(uint64_t channel_layout)
Return the number of channels in the channel layout.
AVFrameSideData * av_frame_get_side_data(const AVFrame *frame, enum AVFrameSideDataType type)
Definition: frame.c:732
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:556
Public dictionary API.
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch const uint8_t **in ch off *out planar
Definition: audioconvert.c:56
AVComponentDescriptor comp[4]
Parameters that describe how pixels are packed.
Definition: pixdesc.h:117
uint8_t
AVFrame * av_frame_alloc(void)
Allocate an AVFrame and set its fields to default values.
Definition: frame.c:189
static int frame_copy_video(AVFrame *dst, const AVFrame *src)
Definition: frame.c:744
AVColorSpace
YUV colorspace type.
Definition: pixfmt.h:473
size_t crop_left
Definition: frame.h:579
#define AVPALETTE_SIZE
Definition: pixfmt.h:32
attribute_deprecated int qstride
QP store stride.
Definition: frame.h:544
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:596
int av_frame_ref(AVFrame *dst, const AVFrame *src)
Set up a new reference to the data described by the source frame.
Definition: frame.c:441
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:311
AVBufferRef * buf
Definition: frame.h:185
The data is the AVPanScan struct defined in libavcodec.
Definition: frame.h:52
static AVFrame * frame
const char data[16]
Definition: mxf.c:90
Structure to hold side data for an AVFrame.
Definition: frame.h:180
static int flags
Definition: log.c:55
AVDictionary * metadata
metadata.
Definition: frame.h:505
int interlaced_frame
The content of the picture is interlaced.
Definition: frame.h:365
AVColorRange
MPEG vs JPEG YUV range.
Definition: pixfmt.h:496
Active Format Description data consisting of a single byte as specified in ETSI TS 101 154 using AVAc...
Definition: frame.h:89
Metadata relevant to a downmix procedure.
Definition: frame.h:72
int nb_side_data
Definition: frame.h:431
#define FFALIGN(x, a)
Definition: macros.h:48
attribute_deprecated uint64_t error[AV_NUM_DATA_POINTERS]
Definition: frame.h:353
AVFrameSideData ** side_data
Definition: frame.h:430
int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt)
Check if the sample format is planar.
Definition: samplefmt.c:112
Implementation-specific description of the format of AV_FRAME_QP_TABLE_DATA.
Definition: frame.h:152
int width
Definition: frame.h:276
uint8_t log2_chroma_h
Amount to shift the luma height right to find the chroma height.
Definition: pixdesc.h:101
#define MAKE_ACCESSORS(str, name, type, field)
Definition: internal.h:91
#define AVERROR(e)
Definition: error.h:43
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
Definition: frame.c:335
int av_frame_apply_cropping(AVFrame *frame, int flags)
Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ crop_bottom fields...
Definition: frame.c:873
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
enum AVColorRange color_range
MPEG vs JPEG YUV range.
Definition: frame.h:463
ATSC A53 Part 4 Closed Captions.
Definition: frame.h:58
void av_dict_free(AVDictionary **pm)
Free all the memory allocated for an AVDictionary struct and all keys and values. ...
Definition: dict.c:203
enum AVColorSpace colorspace
YUV colorspace type.
Definition: frame.h:474
#define AV_PIX_FMT_FLAG_HWACCEL
Pixel format is an HW accelerated format.
Definition: pixdesc.h:140
simple assert() macros that are a bit more flexible than ISO C assert().
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
The GOP timecode in 25 bit timecode format.
Definition: frame.h:124
static int get_audio_buffer(AVFrame *frame, int align)
Definition: frame.c:266
#define fail()
Definition: checkasm.h:116
void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height)
Copy image in src_data to dst_data.
Definition: imgutils.c:387
int av_frame_copy(AVFrame *dst, const AVFrame *src)
Copy the frame data from src to dst.
Definition: frame.c:790
reference-counted frame API
uint64_t channel_layout
Channel layout of the audio data.
Definition: frame.h:396
size_t crop_top
Definition: frame.h:577
uint64_t flags
Combination of AV_PIX_FMT_FLAG_...
Definition: pixdesc.h:106
int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx)
Check if the given dimension of an image is valid, meaning that all bytes of the image can be address...
Definition: imgutils.c:282
uint8_t nb_components
The number of components each pixel has, (1-4)
Definition: pixdesc.h:83
int channels
number of audio channels, only used for audio.
Definition: frame.h:523
audio channel layout utility functions
enum AVPictureType pict_type
Picture type of the frame.
Definition: frame.h:301
int flags
Frame flags, a combination of AV_FRAME_FLAGS.
Definition: frame.h:456
#define av_assert1(cond)
assert() equivalent, that does not lie in speed critical code.
Definition: avassert.h:53
#define FFMIN(a, b)
Definition: common.h:96
int display_picture_number
picture number in display order
Definition: frame.h:336
AVBufferRef ** extended_buf
For planar audio which requires more than AV_NUM_DATA_POINTERS AVBufferRef pointers, this array will hold all the references which cannot fit into AVFrame.buf.
Definition: frame.h:424
AVFrameSideData * av_frame_new_side_data_from_buf(AVFrame *frame, enum AVFrameSideDataType type, AVBufferRef *buf)
Add a new side data to a frame from an existing AVBufferRef.
Definition: frame.c:688
Motion vectors exported by some codecs (on demand through the export_mvs flag set in the libavcodec A...
Definition: frame.h:96
AVBufferRef * av_frame_get_plane_buffer(AVFrame *frame, int plane)
Get the buffer reference a given data plane is stored in.
Definition: frame.c:657
AVFrameSideDataType
Definition: frame.h:48
int quality
quality (between 1 (good) and FF_LAMBDA_MAX (bad))
Definition: frame.h:341
The data contains an ICC profile as an opaque octet buffer following the format described by ISO 1507...
Definition: frame.h:143
int av_buffer_is_writable(const AVBufferRef *buf)
Definition: buffer.c:133
const char * av_get_colorspace_name(enum AVColorSpace val)
Get the name of a colorspace.
Definition: frame.c:122
FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
Definition: pixfmt.h:478
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:538
#define FF_ARRAY_ELEMS(a)
static void comp(unsigned char *dst, ptrdiff_t dst_stride, unsigned char *src, ptrdiff_t src_stride, int add)
Definition: eamad.c:83
const AVS_VideoInfo int align
Definition: avisynth_c.h:795
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
int coded_picture_number
picture number in bitstream order
Definition: frame.h:332
sample_rate
int64_t pkt_duration
duration of the corresponding packet, expressed in AVStream->time_base units, 0 if unknown...
Definition: frame.h:498
int stride
Definition: frame.c:50
This side data contains a 3x3 transformation matrix describing an affine transformation that needs to...
Definition: frame.h:84
int av_frame_is_writable(AVFrame *frame)
Check if the frame data is writable.
Definition: frame.c:592
AVBufferRef * av_buffer_alloc(int size)
Allocate an AVBuffer of the given size using av_malloc().
Definition: buffer.c:67
Apply the maximum possible cropping, even if it requires setting the AVFrame.data[] entries to unalig...
Definition: frame.h:863
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:249
static int get_video_buffer(AVFrame *frame, int align)
Definition: frame.c:211
Descriptor that unambiguously describes how the bits of a pixel are stored in the up to 4 data planes...
Definition: pixdesc.h:81
static int calc_cropping_offsets(size_t offsets[4], const AVFrame *frame, const AVPixFmtDescriptor *desc)
Definition: frame.c:841
uint8_t * data
The data buffer.
Definition: buffer.h:89
AVRational sample_aspect_ratio
Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
Definition: frame.h:306
uint8_t * data
Definition: frame.h:182
int av_samples_copy(uint8_t **dst, uint8_t *const *src, int dst_offset, int src_offset, int nb_samples, int nb_channels, enum AVSampleFormat sample_fmt)
Copy samples from src to dst.
Definition: samplefmt.c:213
void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type)
If side data of the supplied type exists in the frame, free it and remove it from the frame...
Definition: frame.c:803
void * buf
Definition: avisynth_c.h:690
size_t crop_right
Definition: frame.h:580
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
int64_t reordered_opaque
reordered opaque 64 bits (generally an integer or a double precision float PTS but can be anything)...
Definition: frame.h:386
int sample_rate
Sample rate of the audio data.
Definition: frame.h:391
int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width)
Fill plane linesizes for an image with pixel format pix_fmt and width width.
Definition: imgutils.c:89
AVFrameSideData * av_frame_new_side_data(AVFrame *frame, enum AVFrameSideDataType type, int size)
Add a new side data to a frame.
Definition: frame.c:720
int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, enum AVSampleFormat sample_fmt, int align)
Get the required buffer size for the given audio parameters.
Definition: samplefmt.c:119
Rational number (pair of numerator and denominator).
Definition: rational.h:58
int palette_has_changed
Tell user application that palette has changed from previous frame.
Definition: frame.h:375
refcounted data buffer API
enum AVChromaLocation chroma_location
Definition: frame.h:476
int64_t best_effort_timestamp
frame timestamp estimated using various heuristics, in stream time base
Definition: frame.h:483
int decode_error_flags
decode error flags of the frame, set to a combination of FF_DECODE_ERROR_xxx flags if the decoder pro...
Definition: frame.h:514
static int frame_copy_audio(AVFrame *dst, const AVFrame *src)
Definition: frame.c:766
#define AV_PIX_FMT_FLAG_BITSTREAM
All values of a component are bit-wise packed end to end.
Definition: pixdesc.h:136
const char * av_frame_side_data_name(enum AVFrameSideDataType type)
Definition: frame.c:817
int size
Size of data in bytes.
Definition: buffer.h:93
int av_frame_get_buffer(AVFrame *frame, int align)
Allocate new buffer(s) for audio or video data.
Definition: frame.c:322
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:551
enum AVFrameSideDataType type
Definition: frame.h:181
int av_frame_make_writable(AVFrame *frame)
Ensure that the frame data is writable, avoiding data copy if possible.
Definition: frame.c:609
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
attribute_deprecated int64_t pkt_pts
PTS copied from the AVPacket that was decoded to produce this frame.
Definition: frame.h:319
int64_t pkt_dts
DTS copied from the AVPacket that triggered returning this frame.
Definition: frame.h:327
A reference to a data buffer.
Definition: buffer.h:81
#define FF_DISABLE_DEPRECATION_WARNINGS
Definition: internal.h:84
common internal and external API header
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define CHECK_CHANNELS_CONSISTENCY(frame)
Definition: frame.c:43
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
static const struct @272 planes[]
#define FF_PSEUDOPAL
Definition: internal.h:367
#define FF_ENABLE_DEPRECATION_WARNINGS
Definition: internal.h:85
int top_field_first
If the content is interlaced, is top field displayed first.
Definition: frame.h:370
AVBufferRef * opaque_ref
AVBufferRef for free use by the API user.
Definition: frame.h:567
int key_frame
1 -> keyframe, 0-> not
Definition: frame.h:296
enum AVColorPrimaries color_primaries
Definition: frame.h:465
int height
Definition: frame.h:276
#define av_freep(p)
int type
Definition: frame.c:51
static void free_side_data(AVFrameSideData **ptr_sd)
Definition: frame.c:168
enum AVColorTransferCharacteristic color_trc
Definition: frame.h:467
Recommmends skipping the specified number of samples.
Definition: frame.h:108
#define av_malloc_array(a, b)
ReplayGain information in the form of the AVReplayGain struct.
Definition: frame.h:76
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:265
static void get_frame_defaults(AVFrame *frame)
Definition: frame.c:138
int8_t * av_frame_get_qp_table(AVFrame *f, int *stride, int *type)
Definition: frame.c:90
int pkt_size
size of the corresponding packet containing the compressed frame.
Definition: frame.h:532
Stereoscopic 3d metadata.
Definition: frame.h:63
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:284
The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
Definition: frame.h:67
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
Copy only "metadata" fields from src to dst.
Definition: frame.c:652
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
int step
Number of elements between 2 horizontally consecutive pixels.
Definition: pixdesc.h:41
Raw QP table data.
Definition: frame.h:159
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:191
#define AV_CEIL_RSHIFT(a, b)
Definition: common.h:58
static uint8_t tmp[11]
Definition: aes_ctr.c:26