FFmpeg  4.0
videotoolbox.c
Go to the documentation of this file.
1 /*
2  * Videotoolbox hardware acceleration
3  *
4  * copyright (c) 2012 Sebastien Zwickert
5  *
6  * This file is part of FFmpeg.
7  *
8  * FFmpeg is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU Lesser General Public
10  * License as published by the Free Software Foundation; either
11  * version 2.1 of the License, or (at your option) any later version.
12  *
13  * FFmpeg is distributed in the hope that it will be useful,
14  * but WITHOUT ANY WARRANTY; without even the implied warranty of
15  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  * Lesser General Public License for more details.
17  *
18  * You should have received a copy of the GNU Lesser General Public
19  * License along with FFmpeg; if not, write to the Free Software
20  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
21  */
22 
23 #include "config.h"
24 #include "videotoolbox.h"
26 #include "vt_internal.h"
27 #include "libavutil/avutil.h"
28 #include "libavutil/hwcontext.h"
29 #include "bytestream.h"
30 #include "decode.h"
31 #include "h264dec.h"
32 #include "hevcdec.h"
33 #include "mpegvideo.h"
34 #include <TargetConditionals.h>
35 
36 #ifndef kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
37 # define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder CFSTR("RequireHardwareAcceleratedVideoDecoder")
38 #endif
39 
40 #if !HAVE_KCMVIDEOCODECTYPE_HEVC
41 enum { kCMVideoCodecType_HEVC = 'hvc1' };
42 #endif
43 
44 #define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING 12
45 
46 static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
47 {
48  CVPixelBufferRef cv_buffer = *(CVPixelBufferRef *)data;
49  CVPixelBufferRelease(cv_buffer);
50 
51  av_free(data);
52 }
53 
55  const uint8_t *buffer,
56  uint32_t size)
57 {
58  void *tmp;
59 
60  tmp = av_fast_realloc(vtctx->bitstream,
61  &vtctx->allocated_size,
62  size);
63 
64  if (!tmp)
65  return AVERROR(ENOMEM);
66 
67  vtctx->bitstream = tmp;
68  memcpy(vtctx->bitstream, buffer, size);
69  vtctx->bitstream_size = size;
70 
71  return 0;
72 }
73 
74 static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
75 {
76  CVPixelBufferRef ref = *(CVPixelBufferRef *)frame->buf[0]->data;
77 
78  if (!ref) {
79  av_log(avctx, AV_LOG_ERROR, "No frame decoded?\n");
80  av_frame_unref(frame);
81  return AVERROR_EXTERNAL;
82  }
83 
84  frame->data[3] = (uint8_t*)ref;
85 
86  return 0;
87 }
88 
90 {
91  size_t size = sizeof(CVPixelBufferRef);
92  uint8_t *data = NULL;
93  AVBufferRef *buf = NULL;
94  int ret = ff_attach_decode_data(frame);
95  FrameDecodeData *fdd;
96  if (ret < 0)
97  return ret;
98 
99  data = av_mallocz(size);
100  if (!data)
101  return AVERROR(ENOMEM);
102  buf = av_buffer_create(data, size, videotoolbox_buffer_release, NULL, 0);
103  if (!buf) {
104  av_freep(&data);
105  return AVERROR(ENOMEM);
106  }
107  frame->buf[0] = buf;
108 
109  fdd = (FrameDecodeData*)frame->private_ref->data;
111 
112  frame->width = avctx->width;
113  frame->height = avctx->height;
114  frame->format = avctx->pix_fmt;
115 
116  return 0;
117 }
118 
119 #define AV_W8(p, v) *(p) = (v)
120 
122 {
123  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
124  H264Context *h = avctx->priv_data;
125  CFDataRef data = NULL;
126  uint8_t *p;
127  int vt_extradata_size = 6 + 2 + h->ps.sps->data_size + 3 + h->ps.pps->data_size;
128  uint8_t *vt_extradata = av_malloc(vt_extradata_size);
129  if (!vt_extradata)
130  return NULL;
131 
132  p = vt_extradata;
133 
134  AV_W8(p + 0, 1); /* version */
135  AV_W8(p + 1, h->ps.sps->data[1]); /* profile */
136  AV_W8(p + 2, h->ps.sps->data[2]); /* profile compat */
137  AV_W8(p + 3, h->ps.sps->data[3]); /* level */
138  AV_W8(p + 4, 0xff); /* 6 bits reserved (111111) + 2 bits nal size length - 3 (11) */
139  AV_W8(p + 5, 0xe1); /* 3 bits reserved (111) + 5 bits number of sps (00001) */
140  AV_WB16(p + 6, h->ps.sps->data_size);
141  memcpy(p + 8, h->ps.sps->data, h->ps.sps->data_size);
142  p += 8 + h->ps.sps->data_size;
143  AV_W8(p + 0, 1); /* number of pps */
144  AV_WB16(p + 1, h->ps.pps->data_size);
145  memcpy(p + 3, h->ps.pps->data, h->ps.pps->data_size);
146 
147  p += 3 + h->ps.pps->data_size;
148  av_assert0(p - vt_extradata == vt_extradata_size);
149 
150  // save sps header (profile/level) used to create decoder session,
151  // so we can detect changes and recreate it.
152  if (vtctx)
153  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
154 
155  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
156  av_free(vt_extradata);
157  return data;
158 }
159 
161 {
162  HEVCContext *h = avctx->priv_data;
163  const HEVCVPS *vps = (const HEVCVPS *)h->ps.vps_list[0]->data;
164  const HEVCSPS *sps = (const HEVCSPS *)h->ps.sps_list[0]->data;
165  int i, num_pps = 0;
166  const HEVCPPS *pps = h->ps.pps;
167  PTLCommon ptlc = vps->ptl.general_ptl;
168  VUI vui = sps->vui;
169  uint8_t parallelismType;
170  CFDataRef data = NULL;
171  uint8_t *p;
172  int vt_extradata_size = 23 + 5 + vps->data_size + 5 + sps->data_size + 3;
173  uint8_t *vt_extradata;
174 
175  for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
176  if (h->ps.pps_list[i]) {
177  const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
178  vt_extradata_size += 2 + pps->data_size;
179  num_pps++;
180  }
181  }
182 
183  vt_extradata = av_malloc(vt_extradata_size);
184  if (!vt_extradata)
185  return NULL;
186  p = vt_extradata;
187 
188  /* unsigned int(8) configurationVersion = 1; */
189  AV_W8(p + 0, 1);
190 
191  /*
192  * unsigned int(2) general_profile_space;
193  * unsigned int(1) general_tier_flag;
194  * unsigned int(5) general_profile_idc;
195  */
196  AV_W8(p + 1, ptlc.profile_space << 6 |
197  ptlc.tier_flag << 5 |
198  ptlc.profile_idc);
199 
200  /* unsigned int(32) general_profile_compatibility_flags; */
201  memcpy(p + 2, ptlc.profile_compatibility_flag, 4);
202 
203  /* unsigned int(48) general_constraint_indicator_flags; */
204  AV_W8(p + 6, ptlc.progressive_source_flag << 7 |
205  ptlc.interlaced_source_flag << 6 |
206  ptlc.non_packed_constraint_flag << 5 |
207  ptlc.frame_only_constraint_flag << 4);
208  AV_W8(p + 7, 0);
209  AV_WN32(p + 8, 0);
210 
211  /* unsigned int(8) general_level_idc; */
212  AV_W8(p + 12, ptlc.level_idc);
213 
214  /*
215  * bit(4) reserved = ‘1111’b;
216  * unsigned int(12) min_spatial_segmentation_idc;
217  */
218  AV_W8(p + 13, 0xf0 | (vui.min_spatial_segmentation_idc >> 4));
219  AV_W8(p + 14, vui.min_spatial_segmentation_idc & 0xff);
220 
221  /*
222  * bit(6) reserved = ‘111111’b;
223  * unsigned int(2) parallelismType;
224  */
226  parallelismType = 0;
228  parallelismType = 0;
229  else if (pps->entropy_coding_sync_enabled_flag)
230  parallelismType = 3;
231  else if (pps->tiles_enabled_flag)
232  parallelismType = 2;
233  else
234  parallelismType = 1;
235  AV_W8(p + 15, 0xfc | parallelismType);
236 
237  /*
238  * bit(6) reserved = ‘111111’b;
239  * unsigned int(2) chromaFormat;
240  */
241  AV_W8(p + 16, sps->chroma_format_idc | 0xfc);
242 
243  /*
244  * bit(5) reserved = ‘11111’b;
245  * unsigned int(3) bitDepthLumaMinus8;
246  */
247  AV_W8(p + 17, (sps->bit_depth - 8) | 0xfc);
248 
249  /*
250  * bit(5) reserved = ‘11111’b;
251  * unsigned int(3) bitDepthChromaMinus8;
252  */
253  AV_W8(p + 18, (sps->bit_depth_chroma - 8) | 0xfc);
254 
255  /* bit(16) avgFrameRate; */
256  AV_WB16(p + 19, 0);
257 
258  /*
259  * bit(2) constantFrameRate;
260  * bit(3) numTemporalLayers;
261  * bit(1) temporalIdNested;
262  * unsigned int(2) lengthSizeMinusOne;
263  */
264  AV_W8(p + 21, 0 << 6 |
265  sps->max_sub_layers << 3 |
266  sps->temporal_id_nesting_flag << 2 |
267  3);
268 
269  /* unsigned int(8) numOfArrays; */
270  AV_W8(p + 22, 3);
271 
272  p += 23;
273  /* vps */
274  /*
275  * bit(1) array_completeness;
276  * unsigned int(1) reserved = 0;
277  * unsigned int(6) NAL_unit_type;
278  */
279  AV_W8(p, 1 << 7 |
280  HEVC_NAL_VPS & 0x3f);
281  /* unsigned int(16) numNalus; */
282  AV_WB16(p + 1, 1);
283  /* unsigned int(16) nalUnitLength; */
284  AV_WB16(p + 3, vps->data_size);
285  /* bit(8*nalUnitLength) nalUnit; */
286  memcpy(p + 5, vps->data, vps->data_size);
287  p += 5 + vps->data_size;
288 
289  /* sps */
290  AV_W8(p, 1 << 7 |
291  HEVC_NAL_SPS & 0x3f);
292  AV_WB16(p + 1, 1);
293  AV_WB16(p + 3, sps->data_size);
294  memcpy(p + 5, sps->data, sps->data_size);
295  p += 5 + sps->data_size;
296 
297  /* pps */
298  AV_W8(p, 1 << 7 |
299  HEVC_NAL_PPS & 0x3f);
300  AV_WB16(p + 1, num_pps);
301  p += 3;
302  for (i = 0; i < HEVC_MAX_PPS_COUNT; i++) {
303  if (h->ps.pps_list[i]) {
304  const HEVCPPS *pps = (const HEVCPPS *)h->ps.pps_list[i]->data;
305  AV_WB16(p, pps->data_size);
306  memcpy(p + 2, pps->data, pps->data_size);
307  p += 2 + pps->data_size;
308  }
309  }
310 
311  av_assert0(p - vt_extradata == vt_extradata_size);
312 
313  data = CFDataCreate(kCFAllocatorDefault, vt_extradata, vt_extradata_size);
314  av_free(vt_extradata);
315  return data;
316 }
317 
319 {
320  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
321  if (!frame->buf[0] || frame->data[3]) {
322  av_log(avctx, AV_LOG_ERROR, "videotoolbox: invalid state\n");
323  av_frame_unref(frame);
324  return AVERROR_EXTERNAL;
325  }
326 
327  CVPixelBufferRef *ref = (CVPixelBufferRef *)frame->buf[0]->data;
328 
329  if (*ref)
330  CVPixelBufferRelease(*ref);
331 
332  *ref = vtctx->frame;
333  vtctx->frame = NULL;
334 
335  return 0;
336 }
337 
339  const uint8_t *buffer,
340  uint32_t size)
341 {
342  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
343  H264Context *h = avctx->priv_data;
344 
345  if (h->is_avc == 1) {
346  return videotoolbox_buffer_copy(vtctx, buffer, size);
347  }
348 
349  return 0;
350 }
351 
353  int type,
354  const uint8_t *buffer,
355  uint32_t size)
356 {
357  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
358  H264Context *h = avctx->priv_data;
359 
360  // save sps header (profile/level) used to create decoder session
361  if (!vtctx->sps[0])
362  memcpy(vtctx->sps, h->ps.sps->data + 1, 3);
363 
364  if (type == H264_NAL_SPS) {
365  if (size > 4 && memcmp(vtctx->sps, buffer + 1, 3) != 0) {
366  vtctx->reconfig_needed = true;
367  memcpy(vtctx->sps, buffer + 1, 3);
368  }
369  }
370 
371  // pass-through SPS/PPS changes to the decoder
372  return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
373 }
374 
376  const uint8_t *buffer,
377  uint32_t size)
378 {
379  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
380  H264Context *h = avctx->priv_data;
381  void *tmp;
382 
383  if (h->is_avc == 1)
384  return 0;
385 
386  tmp = av_fast_realloc(vtctx->bitstream,
387  &vtctx->allocated_size,
388  vtctx->bitstream_size+size+4);
389  if (!tmp)
390  return AVERROR(ENOMEM);
391 
392  vtctx->bitstream = tmp;
393 
394  AV_WB32(vtctx->bitstream + vtctx->bitstream_size, size);
395  memcpy(vtctx->bitstream + vtctx->bitstream_size + 4, buffer, size);
396 
397  vtctx->bitstream_size += size + 4;
398 
399  return 0;
400 }
401 
403 {
404  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
405  if (vtctx) {
406  av_freep(&vtctx->bitstream);
407  if (vtctx->frame)
408  CVPixelBufferRelease(vtctx->frame);
409  }
410 
411  return 0;
412 }
413 
414 #if CONFIG_VIDEOTOOLBOX
415 // Return the AVVideotoolboxContext that matters currently. Where it comes from
416 // depends on the API used.
417 static AVVideotoolboxContext *videotoolbox_get_context(AVCodecContext *avctx)
418 {
419  // Somewhat tricky because the user can call av_videotoolbox_default_free()
420  // at any time, even when the codec is closed.
421  if (avctx->internal && avctx->internal->hwaccel_priv_data) {
422  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
423  if (vtctx->vt_ctx)
424  return vtctx->vt_ctx;
425  }
426  return avctx->hwaccel_context;
427 }
428 
429 static int videotoolbox_buffer_create(AVCodecContext *avctx, AVFrame *frame)
430 {
431  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
432  CVPixelBufferRef pixbuf = (CVPixelBufferRef)vtctx->frame;
433  OSType pixel_format = CVPixelBufferGetPixelFormatType(pixbuf);
434  enum AVPixelFormat sw_format = av_map_videotoolbox_format_to_pixfmt(pixel_format);
435  int width = CVPixelBufferGetWidth(pixbuf);
436  int height = CVPixelBufferGetHeight(pixbuf);
437  AVHWFramesContext *cached_frames;
438  int ret;
439 
440  ret = videotoolbox_set_frame(avctx, frame);
441  if (ret < 0)
442  return ret;
443 
444  // Old API code path.
445  if (!vtctx->cached_hw_frames_ctx)
446  return 0;
447 
448  cached_frames = (AVHWFramesContext*)vtctx->cached_hw_frames_ctx->data;
449 
450  if (cached_frames->sw_format != sw_format ||
451  cached_frames->width != width ||
452  cached_frames->height != height) {
453  AVBufferRef *hw_frames_ctx = av_hwframe_ctx_alloc(cached_frames->device_ref);
454  AVHWFramesContext *hw_frames;
455  if (!hw_frames_ctx)
456  return AVERROR(ENOMEM);
457 
458  hw_frames = (AVHWFramesContext*)hw_frames_ctx->data;
459  hw_frames->format = cached_frames->format;
460  hw_frames->sw_format = sw_format;
461  hw_frames->width = width;
462  hw_frames->height = height;
463 
464  ret = av_hwframe_ctx_init(hw_frames_ctx);
465  if (ret < 0) {
466  av_buffer_unref(&hw_frames_ctx);
467  return ret;
468  }
469 
471  vtctx->cached_hw_frames_ctx = hw_frames_ctx;
472  }
473 
476  if (!frame->hw_frames_ctx)
477  return AVERROR(ENOMEM);
478 
479  return 0;
480 }
481 
482 static void videotoolbox_write_mp4_descr_length(PutByteContext *pb, int length)
483 {
484  int i;
485  uint8_t b;
486 
487  for (i = 3; i >= 0; i--) {
488  b = (length >> (i * 7)) & 0x7F;
489  if (i != 0)
490  b |= 0x80;
491 
492  bytestream2_put_byteu(pb, b);
493  }
494 }
495 
496 static CFDataRef videotoolbox_esds_extradata_create(AVCodecContext *avctx)
497 {
498  CFDataRef data;
499  uint8_t *rw_extradata;
500  PutByteContext pb;
501  int full_size = 3 + 5 + 13 + 5 + avctx->extradata_size + 3;
502  // ES_DescrTag data + DecoderConfigDescrTag + data + DecSpecificInfoTag + size + SLConfigDescriptor
503  int config_size = 13 + 5 + avctx->extradata_size;
504  int s;
505 
506  if (!(rw_extradata = av_mallocz(full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING)))
507  return NULL;
508 
509  bytestream2_init_writer(&pb, rw_extradata, full_size + VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING);
510  bytestream2_put_byteu(&pb, 0); // version
511  bytestream2_put_ne24(&pb, 0); // flags
512 
513  // elementary stream descriptor
514  bytestream2_put_byteu(&pb, 0x03); // ES_DescrTag
515  videotoolbox_write_mp4_descr_length(&pb, full_size);
516  bytestream2_put_ne16(&pb, 0); // esid
517  bytestream2_put_byteu(&pb, 0); // stream priority (0-32)
518 
519  // decoder configuration descriptor
520  bytestream2_put_byteu(&pb, 0x04); // DecoderConfigDescrTag
521  videotoolbox_write_mp4_descr_length(&pb, config_size);
522  bytestream2_put_byteu(&pb, 32); // object type indication. 32 = AV_CODEC_ID_MPEG4
523  bytestream2_put_byteu(&pb, 0x11); // stream type
524  bytestream2_put_ne24(&pb, 0); // buffer size
525  bytestream2_put_ne32(&pb, 0); // max bitrate
526  bytestream2_put_ne32(&pb, 0); // avg bitrate
527 
528  // decoder specific descriptor
529  bytestream2_put_byteu(&pb, 0x05); ///< DecSpecificInfoTag
530  videotoolbox_write_mp4_descr_length(&pb, avctx->extradata_size);
531 
532  bytestream2_put_buffer(&pb, avctx->extradata, avctx->extradata_size);
533 
534  // SLConfigDescriptor
535  bytestream2_put_byteu(&pb, 0x06); // SLConfigDescrTag
536  bytestream2_put_byteu(&pb, 0x01); // length
537  bytestream2_put_byteu(&pb, 0x02); //
538 
539  s = bytestream2_size_p(&pb);
540 
541  data = CFDataCreate(kCFAllocatorDefault, rw_extradata, s);
542 
543  av_freep(&rw_extradata);
544  return data;
545 }
546 
547 static CMSampleBufferRef videotoolbox_sample_buffer_create(CMFormatDescriptionRef fmt_desc,
548  void *buffer,
549  int size)
550 {
551  OSStatus status;
552  CMBlockBufferRef block_buf;
553  CMSampleBufferRef sample_buf;
554 
555  block_buf = NULL;
556  sample_buf = NULL;
557 
558  status = CMBlockBufferCreateWithMemoryBlock(kCFAllocatorDefault,// structureAllocator
559  buffer, // memoryBlock
560  size, // blockLength
561  kCFAllocatorNull, // blockAllocator
562  NULL, // customBlockSource
563  0, // offsetToData
564  size, // dataLength
565  0, // flags
566  &block_buf);
567 
568  if (!status) {
569  status = CMSampleBufferCreate(kCFAllocatorDefault, // allocator
570  block_buf, // dataBuffer
571  TRUE, // dataReady
572  0, // makeDataReadyCallback
573  0, // makeDataReadyRefcon
574  fmt_desc, // formatDescription
575  1, // numSamples
576  0, // numSampleTimingEntries
577  NULL, // sampleTimingArray
578  0, // numSampleSizeEntries
579  NULL, // sampleSizeArray
580  &sample_buf);
581  }
582 
583  if (block_buf)
584  CFRelease(block_buf);
585 
586  return sample_buf;
587 }
588 
589 static void videotoolbox_decoder_callback(void *opaque,
590  void *sourceFrameRefCon,
591  OSStatus status,
592  VTDecodeInfoFlags flags,
593  CVImageBufferRef image_buffer,
594  CMTime pts,
595  CMTime duration)
596 {
597  AVCodecContext *avctx = opaque;
598  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
599 
600  if (vtctx->frame) {
601  CVPixelBufferRelease(vtctx->frame);
602  vtctx->frame = NULL;
603  }
604 
605  if (!image_buffer) {
606  av_log(NULL, AV_LOG_DEBUG, "vt decoder cb: output image buffer is null\n");
607  return;
608  }
609 
610  vtctx->frame = CVPixelBufferRetain(image_buffer);
611 }
612 
613 static OSStatus videotoolbox_session_decode_frame(AVCodecContext *avctx)
614 {
615  OSStatus status;
616  CMSampleBufferRef sample_buf;
617  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
618  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
619 
620  sample_buf = videotoolbox_sample_buffer_create(videotoolbox->cm_fmt_desc,
621  vtctx->bitstream,
622  vtctx->bitstream_size);
623 
624  if (!sample_buf)
625  return -1;
626 
627  status = VTDecompressionSessionDecodeFrame(videotoolbox->session,
628  sample_buf,
629  0, // decodeFlags
630  NULL, // sourceFrameRefCon
631  0); // infoFlagsOut
632  if (status == noErr)
633  status = VTDecompressionSessionWaitForAsynchronousFrames(videotoolbox->session);
634 
635  CFRelease(sample_buf);
636 
637  return status;
638 }
639 
640 static CMVideoFormatDescriptionRef videotoolbox_format_desc_create(CMVideoCodecType codec_type,
641  CFDictionaryRef decoder_spec,
642  int width,
643  int height)
644 {
645  CMFormatDescriptionRef cm_fmt_desc;
646  OSStatus status;
647 
648  status = CMVideoFormatDescriptionCreate(kCFAllocatorDefault,
649  codec_type,
650  width,
651  height,
652  decoder_spec, // Dictionary of extension
653  &cm_fmt_desc);
654 
655  if (status)
656  return NULL;
657 
658  return cm_fmt_desc;
659 }
660 
661 static CFDictionaryRef videotoolbox_buffer_attributes_create(int width,
662  int height,
663  OSType pix_fmt)
664 {
665  CFMutableDictionaryRef buffer_attributes;
666  CFMutableDictionaryRef io_surface_properties;
667  CFNumberRef cv_pix_fmt;
668  CFNumberRef w;
669  CFNumberRef h;
670 
671  w = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &width);
672  h = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &height);
673  cv_pix_fmt = CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt32Type, &pix_fmt);
674 
675  buffer_attributes = CFDictionaryCreateMutable(kCFAllocatorDefault,
676  4,
677  &kCFTypeDictionaryKeyCallBacks,
678  &kCFTypeDictionaryValueCallBacks);
679  io_surface_properties = CFDictionaryCreateMutable(kCFAllocatorDefault,
680  0,
681  &kCFTypeDictionaryKeyCallBacks,
682  &kCFTypeDictionaryValueCallBacks);
683 
684  if (pix_fmt)
685  CFDictionarySetValue(buffer_attributes, kCVPixelBufferPixelFormatTypeKey, cv_pix_fmt);
686  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfacePropertiesKey, io_surface_properties);
687  CFDictionarySetValue(buffer_attributes, kCVPixelBufferWidthKey, w);
688  CFDictionarySetValue(buffer_attributes, kCVPixelBufferHeightKey, h);
689 #if TARGET_OS_IPHONE
690  CFDictionarySetValue(buffer_attributes, kCVPixelBufferOpenGLESCompatibilityKey, kCFBooleanTrue);
691 #else
692  CFDictionarySetValue(buffer_attributes, kCVPixelBufferIOSurfaceOpenGLTextureCompatibilityKey, kCFBooleanTrue);
693 #endif
694 
695  CFRelease(io_surface_properties);
696  CFRelease(cv_pix_fmt);
697  CFRelease(w);
698  CFRelease(h);
699 
700  return buffer_attributes;
701 }
702 
703 static CFDictionaryRef videotoolbox_decoder_config_create(CMVideoCodecType codec_type,
704  AVCodecContext *avctx)
705 {
706  CFMutableDictionaryRef config_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
707  0,
708  &kCFTypeDictionaryKeyCallBacks,
709  &kCFTypeDictionaryValueCallBacks);
710 
711  CFDictionarySetValue(config_info,
713  kCFBooleanTrue);
714 
715  CFMutableDictionaryRef avc_info;
716  CFDataRef data = NULL;
717 
718  avc_info = CFDictionaryCreateMutable(kCFAllocatorDefault,
719  1,
720  &kCFTypeDictionaryKeyCallBacks,
721  &kCFTypeDictionaryValueCallBacks);
722 
723  switch (codec_type) {
724  case kCMVideoCodecType_MPEG4Video :
725  if (avctx->extradata_size)
726  data = videotoolbox_esds_extradata_create(avctx);
727  if (data)
728  CFDictionarySetValue(avc_info, CFSTR("esds"), data);
729  break;
730  case kCMVideoCodecType_H264 :
732  if (data)
733  CFDictionarySetValue(avc_info, CFSTR("avcC"), data);
734  break;
737  if (data)
738  CFDictionarySetValue(avc_info, CFSTR("hvcC"), data);
739  break;
740  default:
741  break;
742  }
743 
744  CFDictionarySetValue(config_info,
745  kCMFormatDescriptionExtension_SampleDescriptionExtensionAtoms,
746  avc_info);
747 
748  if (data)
749  CFRelease(data);
750 
751  CFRelease(avc_info);
752  return config_info;
753 }
754 
755 static int videotoolbox_start(AVCodecContext *avctx)
756 {
757  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
758  OSStatus status;
759  VTDecompressionOutputCallbackRecord decoder_cb;
760  CFDictionaryRef decoder_spec;
761  CFDictionaryRef buf_attr;
762 
763  if (!videotoolbox) {
764  av_log(avctx, AV_LOG_ERROR, "hwaccel context is not set\n");
765  return -1;
766  }
767 
768  switch( avctx->codec_id ) {
769  case AV_CODEC_ID_H263 :
770  videotoolbox->cm_codec_type = kCMVideoCodecType_H263;
771  break;
772  case AV_CODEC_ID_H264 :
773  videotoolbox->cm_codec_type = kCMVideoCodecType_H264;
774  break;
775  case AV_CODEC_ID_HEVC :
776  videotoolbox->cm_codec_type = kCMVideoCodecType_HEVC;
777  break;
779  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG1Video;
780  break;
782  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG2Video;
783  break;
784  case AV_CODEC_ID_MPEG4 :
785  videotoolbox->cm_codec_type = kCMVideoCodecType_MPEG4Video;
786  break;
787  default :
788  break;
789  }
790 
791  decoder_spec = videotoolbox_decoder_config_create(videotoolbox->cm_codec_type, avctx);
792 
793  if (!decoder_spec) {
794  av_log(avctx, AV_LOG_ERROR, "decoder specification creation failed\n");
795  return -1;
796  }
797 
798  videotoolbox->cm_fmt_desc = videotoolbox_format_desc_create(videotoolbox->cm_codec_type,
799  decoder_spec,
800  avctx->width,
801  avctx->height);
802  if (!videotoolbox->cm_fmt_desc) {
803  if (decoder_spec)
804  CFRelease(decoder_spec);
805 
806  av_log(avctx, AV_LOG_ERROR, "format description creation failed\n");
807  return -1;
808  }
809 
810  buf_attr = videotoolbox_buffer_attributes_create(avctx->width,
811  avctx->height,
812  videotoolbox->cv_pix_fmt_type);
813 
814  decoder_cb.decompressionOutputCallback = videotoolbox_decoder_callback;
815  decoder_cb.decompressionOutputRefCon = avctx;
816 
817  status = VTDecompressionSessionCreate(NULL, // allocator
818  videotoolbox->cm_fmt_desc, // videoFormatDescription
819  decoder_spec, // videoDecoderSpecification
820  buf_attr, // destinationImageBufferAttributes
821  &decoder_cb, // outputCallback
822  &videotoolbox->session); // decompressionSessionOut
823 
824  if (decoder_spec)
825  CFRelease(decoder_spec);
826  if (buf_attr)
827  CFRelease(buf_attr);
828 
829  switch (status) {
830  case kVTVideoDecoderNotAvailableNowErr:
831  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox session not available.\n");
832  return AVERROR(ENOSYS);
833  case kVTVideoDecoderUnsupportedDataFormatErr:
834  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox does not support this format.\n");
835  return AVERROR(ENOSYS);
836  case kVTVideoDecoderMalfunctionErr:
837  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox malfunction.\n");
838  return AVERROR(EINVAL);
839  case kVTVideoDecoderBadDataErr:
840  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox reported invalid data.\n");
841  return AVERROR_INVALIDDATA;
842  case 0:
843  return 0;
844  default:
845  av_log(avctx, AV_LOG_VERBOSE, "Unknown VideoToolbox session creation error %u\n", (unsigned)status);
846  return AVERROR_UNKNOWN;
847  }
848 }
849 
850 static void videotoolbox_stop(AVCodecContext *avctx)
851 {
852  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
853  if (!videotoolbox)
854  return;
855 
856  if (videotoolbox->cm_fmt_desc) {
857  CFRelease(videotoolbox->cm_fmt_desc);
858  videotoolbox->cm_fmt_desc = NULL;
859  }
860 
861  if (videotoolbox->session) {
862  VTDecompressionSessionInvalidate(videotoolbox->session);
863  CFRelease(videotoolbox->session);
864  videotoolbox->session = NULL;
865  }
866 }
867 
868 static const char *videotoolbox_error_string(OSStatus status)
869 {
870  switch (status) {
871  case kVTVideoDecoderBadDataErr:
872  return "bad data";
873  case kVTVideoDecoderMalfunctionErr:
874  return "decoder malfunction";
875  case kVTInvalidSessionErr:
876  return "invalid session";
877  }
878  return "unknown";
879 }
880 
881 static int videotoolbox_common_end_frame(AVCodecContext *avctx, AVFrame *frame)
882 {
883  OSStatus status;
884  AVVideotoolboxContext *videotoolbox = videotoolbox_get_context(avctx);
885  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
886 
887  if (vtctx->reconfig_needed == true) {
888  vtctx->reconfig_needed = false;
889  av_log(avctx, AV_LOG_VERBOSE, "VideoToolbox decoder needs reconfig, restarting..\n");
890  videotoolbox_stop(avctx);
891  if (videotoolbox_start(avctx) != 0) {
892  return AVERROR_EXTERNAL;
893  }
894  }
895 
896  if (!videotoolbox->session || !vtctx->bitstream || !vtctx->bitstream_size)
897  return AVERROR_INVALIDDATA;
898 
899  status = videotoolbox_session_decode_frame(avctx);
900  if (status != noErr) {
901  if (status == kVTVideoDecoderMalfunctionErr || status == kVTInvalidSessionErr)
902  vtctx->reconfig_needed = true;
903  av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%s, %d)\n", videotoolbox_error_string(status), (int)status);
904  return AVERROR_UNKNOWN;
905  }
906 
907  if (!vtctx->frame) {
908  vtctx->reconfig_needed = true;
909  return AVERROR_UNKNOWN;
910  }
911 
912  return videotoolbox_buffer_create(avctx, frame);
913 }
914 
915 static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
916 {
917  H264Context *h = avctx->priv_data;
918  AVFrame *frame = h->cur_pic_ptr->f;
919  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
920  int ret = videotoolbox_common_end_frame(avctx, frame);
921  vtctx->bitstream_size = 0;
922  return ret;
923 }
924 
925 static int videotoolbox_hevc_decode_params(AVCodecContext *avctx,
926  int type,
927  const uint8_t *buffer,
928  uint32_t size)
929 {
930  return ff_videotoolbox_h264_decode_slice(avctx, buffer, size);
931 }
932 
933 static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
934 {
935  HEVCContext *h = avctx->priv_data;
936  AVFrame *frame = h->ref->frame;
937  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
938  int ret;
939 
940  ret = videotoolbox_common_end_frame(avctx, frame);
941  vtctx->bitstream_size = 0;
942  return ret;
943 }
944 
945 static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
946  const uint8_t *buffer,
947  uint32_t size)
948 {
949  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
950 
951  return videotoolbox_buffer_copy(vtctx, buffer, size);
952 }
953 
954 static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
955  const uint8_t *buffer,
956  uint32_t size)
957 {
958  return 0;
959 }
960 
961 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
962 {
963  MpegEncContext *s = avctx->priv_data;
964  AVFrame *frame = s->current_picture_ptr->f;
965 
966  return videotoolbox_common_end_frame(avctx, frame);
967 }
968 
969 static int videotoolbox_uninit(AVCodecContext *avctx)
970 {
971  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
972  if (!vtctx)
973  return 0;
974 
975  ff_videotoolbox_uninit(avctx);
976 
977  if (vtctx->vt_ctx)
978  videotoolbox_stop(avctx);
979 
981  av_freep(&vtctx->vt_ctx);
982 
983  return 0;
984 }
985 
986 static int videotoolbox_common_init(AVCodecContext *avctx)
987 {
988  VTContext *vtctx = avctx->internal->hwaccel_priv_data;
989  AVHWFramesContext *hw_frames;
990  int err;
991 
992  // Old API - do nothing.
993  if (avctx->hwaccel_context)
994  return 0;
995 
996  if (!avctx->hw_frames_ctx && !avctx->hw_device_ctx) {
997  av_log(avctx, AV_LOG_ERROR,
998  "Either hw_frames_ctx or hw_device_ctx must be set.\n");
999  return AVERROR(EINVAL);
1000  }
1001 
1003  if (!vtctx->vt_ctx) {
1004  err = AVERROR(ENOMEM);
1005  goto fail;
1006  }
1007 
1008  if (avctx->hw_frames_ctx) {
1009  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1010  } else {
1012  if (!avctx->hw_frames_ctx) {
1013  err = AVERROR(ENOMEM);
1014  goto fail;
1015  }
1016 
1017  hw_frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
1018  hw_frames->format = AV_PIX_FMT_VIDEOTOOLBOX;
1019  hw_frames->sw_format = AV_PIX_FMT_NV12; // same as av_videotoolbox_alloc_context()
1020  hw_frames->width = avctx->width;
1021  hw_frames->height = avctx->height;
1022 
1023  err = av_hwframe_ctx_init(avctx->hw_frames_ctx);
1024  if (err < 0) {
1025  av_buffer_unref(&avctx->hw_frames_ctx);
1026  goto fail;
1027  }
1028  }
1029 
1031  if (!vtctx->cached_hw_frames_ctx) {
1032  err = AVERROR(ENOMEM);
1033  goto fail;
1034  }
1035 
1036  vtctx->vt_ctx->cv_pix_fmt_type =
1038  if (!vtctx->vt_ctx->cv_pix_fmt_type) {
1039  av_log(avctx, AV_LOG_ERROR, "Unknown sw_format.\n");
1040  err = AVERROR(EINVAL);
1041  goto fail;
1042  }
1043 
1044  err = videotoolbox_start(avctx);
1045  if (err < 0)
1046  goto fail;
1047 
1048  return 0;
1049 
1050 fail:
1051  videotoolbox_uninit(avctx);
1052  return err;
1053 }
1054 
1055 static int videotoolbox_frame_params(AVCodecContext *avctx,
1056  AVBufferRef *hw_frames_ctx)
1057 {
1058  AVHWFramesContext *frames_ctx = (AVHWFramesContext*)hw_frames_ctx->data;
1059 
1060  frames_ctx->format = AV_PIX_FMT_VIDEOTOOLBOX;
1061  frames_ctx->width = avctx->coded_width;
1062  frames_ctx->height = avctx->coded_height;
1063  frames_ctx->sw_format = AV_PIX_FMT_NV12;
1064 
1065  return 0;
1066 }
1067 
1069  .name = "h263_videotoolbox",
1070  .type = AVMEDIA_TYPE_VIDEO,
1071  .id = AV_CODEC_ID_H263,
1072  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1073  .alloc_frame = ff_videotoolbox_alloc_frame,
1074  .start_frame = videotoolbox_mpeg_start_frame,
1075  .decode_slice = videotoolbox_mpeg_decode_slice,
1076  .end_frame = videotoolbox_mpeg_end_frame,
1077  .frame_params = videotoolbox_frame_params,
1078  .init = videotoolbox_common_init,
1079  .uninit = videotoolbox_uninit,
1080  .priv_data_size = sizeof(VTContext),
1081 };
1082 
1084  .name = "hevc_videotoolbox",
1085  .type = AVMEDIA_TYPE_VIDEO,
1086  .id = AV_CODEC_ID_HEVC,
1087  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1088  .alloc_frame = ff_videotoolbox_alloc_frame,
1089  .start_frame = ff_videotoolbox_h264_start_frame,
1090  .decode_slice = ff_videotoolbox_h264_decode_slice,
1091  .decode_params = videotoolbox_hevc_decode_params,
1092  .end_frame = videotoolbox_hevc_end_frame,
1093  .frame_params = videotoolbox_frame_params,
1094  .init = videotoolbox_common_init,
1095  .uninit = ff_videotoolbox_uninit,
1096  .priv_data_size = sizeof(VTContext),
1097 };
1098 
1100  .name = "h264_videotoolbox",
1101  .type = AVMEDIA_TYPE_VIDEO,
1102  .id = AV_CODEC_ID_H264,
1103  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1104  .alloc_frame = ff_videotoolbox_alloc_frame,
1105  .start_frame = ff_videotoolbox_h264_start_frame,
1106  .decode_slice = ff_videotoolbox_h264_decode_slice,
1107  .decode_params = videotoolbox_h264_decode_params,
1108  .end_frame = videotoolbox_h264_end_frame,
1109  .frame_params = videotoolbox_frame_params,
1110  .init = videotoolbox_common_init,
1111  .uninit = videotoolbox_uninit,
1112  .priv_data_size = sizeof(VTContext),
1113 };
1114 
1116  .name = "mpeg1_videotoolbox",
1117  .type = AVMEDIA_TYPE_VIDEO,
1118  .id = AV_CODEC_ID_MPEG1VIDEO,
1119  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1120  .alloc_frame = ff_videotoolbox_alloc_frame,
1121  .start_frame = videotoolbox_mpeg_start_frame,
1122  .decode_slice = videotoolbox_mpeg_decode_slice,
1123  .end_frame = videotoolbox_mpeg_end_frame,
1124  .frame_params = videotoolbox_frame_params,
1125  .init = videotoolbox_common_init,
1126  .uninit = videotoolbox_uninit,
1127  .priv_data_size = sizeof(VTContext),
1128 };
1129 
1131  .name = "mpeg2_videotoolbox",
1132  .type = AVMEDIA_TYPE_VIDEO,
1133  .id = AV_CODEC_ID_MPEG2VIDEO,
1134  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1135  .alloc_frame = ff_videotoolbox_alloc_frame,
1136  .start_frame = videotoolbox_mpeg_start_frame,
1137  .decode_slice = videotoolbox_mpeg_decode_slice,
1138  .end_frame = videotoolbox_mpeg_end_frame,
1139  .frame_params = videotoolbox_frame_params,
1140  .init = videotoolbox_common_init,
1141  .uninit = videotoolbox_uninit,
1142  .priv_data_size = sizeof(VTContext),
1143 };
1144 
1146  .name = "mpeg4_videotoolbox",
1147  .type = AVMEDIA_TYPE_VIDEO,
1148  .id = AV_CODEC_ID_MPEG4,
1149  .pix_fmt = AV_PIX_FMT_VIDEOTOOLBOX,
1150  .alloc_frame = ff_videotoolbox_alloc_frame,
1151  .start_frame = videotoolbox_mpeg_start_frame,
1152  .decode_slice = videotoolbox_mpeg_decode_slice,
1153  .end_frame = videotoolbox_mpeg_end_frame,
1154  .frame_params = videotoolbox_frame_params,
1155  .init = videotoolbox_common_init,
1156  .uninit = videotoolbox_uninit,
1157  .priv_data_size = sizeof(VTContext),
1158 };
1159 
1161 {
1162  AVVideotoolboxContext *ret = av_mallocz(sizeof(*ret));
1163 
1164  if (ret) {
1165  ret->output_callback = videotoolbox_decoder_callback;
1166  ret->cv_pix_fmt_type = kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange;
1167  }
1168 
1169  return ret;
1170 }
1171 
1173 {
1174  return av_videotoolbox_default_init2(avctx, NULL);
1175 }
1176 
1178 {
1179  avctx->hwaccel_context = vtctx ?: av_videotoolbox_alloc_context();
1180  if (!avctx->hwaccel_context)
1181  return AVERROR(ENOMEM);
1182  return videotoolbox_start(avctx);
1183 }
1184 
1186 {
1187 
1188  videotoolbox_stop(avctx);
1189  av_freep(&avctx->hwaccel_context);
1190 }
1191 #endif /* CONFIG_VIDEOTOOLBOX */
const HEVCPPS * pps
Definition: hevc_ps.h:403
AVFrame * frame
Definition: hevcdec.h:312
#define NULL
Definition: coverity.c:32
const char * s
Definition: avisynth_c.h:768
int min_spatial_segmentation_idc
Definition: hevc_ps.h:168
#define AVERROR_INVALIDDATA
Invalid data found when processing input.
Definition: error.h:59
static enum AVPixelFormat pix_fmt
void av_buffer_unref(AVBufferRef **buf)
Free a given reference and automatically free the buffer if there are no more references to it...
Definition: buffer.c:125
int size
This structure describes decoded (raw) audio or video data.
Definition: frame.h:218
HEVCFrame * ref
Definition: hevcdec.h:423
AVBufferRef * vps_list[HEVC_MAX_VPS_COUNT]
Definition: hevc_ps.h:396
int coded_width
Bitstream width / height, may be different from width/height e.g.
Definition: avcodec.h:1705
uint8_t * bitstream
Definition: vt_internal.h:24
int cm_codec_type
CoreMedia codec type that Videotoolbox will use to create the decompression session.
Definition: videotoolbox.h:76
AVBufferRef * buf[AV_NUM_DATA_POINTERS]
AVBuffer references backing the data for this frame.
Definition: frame.h:410
hardware decoding through Videotoolbox
Definition: pixfmt.h:278
static void videotoolbox_uninit(AVCodecContext *s)
AVVideotoolboxContext * av_videotoolbox_alloc_context(void)
Allocate and initialize a Videotoolbox context.
const char * b
Definition: vf_curves.c:113
static av_always_inline void bytestream2_init_writer(PutByteContext *p, uint8_t *buf, int buf_size)
Definition: bytestream.h:143
uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt)
Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format.
int width
The allocated dimensions of the frames in this pool.
Definition: hwcontext.h:228
enum AVMediaType codec_type
Definition: rtp.c:37
enum AVPixelFormat pix_fmt
Pixel format, see AV_PIX_FMT_xxx.
Definition: avcodec.h:1727
Convenience header that includes libavutil&#39;s core.
int is_avc
Used to parse AVC variant of H.264.
Definition: h264dec.h:450
enum AVPixelFormat format
The pixel format identifying the underlying HW surface type.
Definition: hwcontext.h:208
mpegvideo header.
HEVCParamSets ps
Definition: hevcdec.h:408
H264Context.
Definition: h264dec.h:337
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:338
AVFrame * f
Definition: h264dec.h:129
const AVHWAccel ff_h264_videotoolbox_hwaccel
uint8_t entropy_coding_sync_enabled_flag
Definition: hevc_ps.h:345
AVBufferRef * hw_frames_ctx
For hwaccel-format frames, this should be a reference to the AVHWFramesContext describing the frame...
Definition: frame.h:556
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
AVBufferRef * sps_list[HEVC_MAX_SPS_COUNT]
Definition: hevc_ps.h:397
#define AV_W8(p, v)
Definition: videotoolbox.c:119
const PPS * pps
Definition: h264_ps.h:144
static char buffer[20]
Definition: seek.c:32
uint8_t profile_compatibility_flag[32]
Definition: hevc_ps.h:179
Definition: hevc_ps.h:132
uint8_t
#define av_malloc(s)
int ff_attach_decode_data(AVFrame *frame)
Definition: decode.c:1809
void * hwaccel_context
Hardware accelerator context.
Definition: avcodec.h:2686
AVBufferRef * private_ref
AVBufferRef for internal use by a single libav* library.
Definition: frame.h:596
An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX.
uint8_t * extradata
some codecs need / can use extradata like Huffman tables.
Definition: avcodec.h:1618
int64_t duration
Definition: movenc.c:63
static AVFrame * frame
uint8_t tier_flag
Definition: hevc_ps.h:177
const char data[16]
Definition: mxf.c:90
#define height
static int flags
Definition: log.c:55
CFDataRef ff_videotoolbox_avcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:121
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
int av_videotoolbox_default_init(AVCodecContext *avctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
AVBufferRef * pps_list[HEVC_MAX_PPS_COUNT]
Definition: hevc_ps.h:398
#define AV_WB16(p, v)
Definition: intreadwrite.h:405
#define av_log(a,...)
static int videotoolbox_buffer_copy(VTContext *vtctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:54
int width
Definition: frame.h:276
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
uint8_t frame_only_constraint_flag
Definition: hevc_ps.h:184
const AVHWAccel ff_mpeg2_videotoolbox_hwaccel
#define AVERROR(e)
Definition: error.h:43
int data_size
Definition: hevc_ps.h:392
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
int allocated_size
Definition: vt_internal.h:30
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:85
uint16_t width
Definition: gdv.c:47
void * av_mallocz(size_t size)
Allocate a memory block with alignment suitable for all memory accesses (including vectors if availab...
Definition: mem.c:236
AVBufferRef * av_buffer_create(uint8_t *data, int size, void(*free)(void *opaque, uint8_t *data), void *opaque, int flags)
Create an AVBuffer from an existing array.
Definition: buffer.c:28
VTDecompressionSessionRef session
Videotoolbox decompression session object.
Definition: videotoolbox.h:51
int av_hwframe_ctx_init(AVBufferRef *ref)
Finalize the context before use.
Definition: hwcontext.c:329
#define fail()
Definition: checkasm.h:116
uint8_t tiles_enabled_flag
Definition: hevc_ps.h:344
const AVHWAccel ff_hevc_videotoolbox_hwaccel
CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
Definition: videotoolbox.c:160
size_t data_size
Definition: h264_ps.h:102
uint8_t profile_idc
Definition: hevc_ps.h:178
const char * name
Name of the hardware accelerated codec.
Definition: avcodec.h:3582
uint8_t data[4096]
Definition: h264_ps.h:128
int width
picture width / height.
Definition: avcodec.h:1690
uint8_t w
Definition: llviddspenc.c:38
AVBufferRef * hw_frames_ctx
A reference to the AVHWFramesContext describing the input (for encoding) or output (decoding) frames...
Definition: avcodec.h:3197
Picture * current_picture_ptr
pointer to the current picture
Definition: mpegvideo.h:184
#define TRUE
Definition: windows2linux.h:33
H.264 / AVC / MPEG-4 part10 codec.
void * av_fast_realloc(void *ptr, unsigned int *size, size_t min_size)
Reallocate the given buffer if it is not large enough, otherwise do nothing.
Definition: mem.c:464
int bitstream_size
Definition: vt_internal.h:27
int data_size
Definition: hevc_ps.h:215
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p, const uint8_t *src, unsigned int size)
Definition: bytestream.h:282
PTLCommon general_ptl
Definition: hevc_ps.h:188
if(ret< 0)
Definition: vf_mcdeint.c:279
preferred ID for MPEG-1/2 video decoding
Definition: avcodec.h:220
int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx)
This is a convenience function that creates and sets up the Videotoolbox context using an internal im...
#define kVTVideoDecoderSpecification_RequireHardwareAcceleratedVideoDecoder
Definition: videotoolbox.c:37
PTL ptl
Definition: hevc_ps.h:200
bool reconfig_needed
Definition: vt_internal.h:44
int(* post_process)(void *logctx, AVFrame *frame)
The callback to perform some delayed processing on the frame right before it is returned to the calle...
Definition: decode.h:45
int format
format of the frame, -1 if unknown or unset Values correspond to enum AVPixelFormat for video frames...
Definition: frame.h:291
uint8_t sps[3]
Definition: vt_internal.h:43
enum AVCodecID codec_id
Definition: avcodec.h:1528
#define bytestream2_put_ne24
Definition: bytestream.h:124
main external API structure.
Definition: avcodec.h:1518
uint8_t * data
The data buffer.
Definition: buffer.h:89
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:375
CMVideoFormatDescriptionRef cm_fmt_desc
CoreMedia Format Description that Videotoolbox will use to create the decompression session...
Definition: videotoolbox.h:70
void * buf
Definition: avisynth_c.h:690
static int videotoolbox_postproc_frame(void *avctx, AVFrame *frame)
Definition: videotoolbox.c:74
int extradata_size
Definition: avcodec.h:1619
int coded_height
Definition: avcodec.h:1705
static int FUNC() pps(CodedBitstreamContext *ctx, RWContext *rw, H264RawPPS *current)
#define AV_WB32(p, v)
Definition: intreadwrite.h:419
struct AVFrame * f
Definition: mpegpicture.h:46
This struct describes a set or pool of "hardware" frames (i.e.
Definition: hwcontext.h:123
cl_device_type type
H264Picture * cur_pic_ptr
Definition: h264dec.h:346
static int FUNC() sps(CodedBitstreamContext *ctx, RWContext *rw, H264RawSPS *current)
const SPS * sps
Definition: h264_ps.h:145
OSType cv_pix_fmt_type
CVPixelBuffer Format Type that Videotoolbox will use for decoded frames.
Definition: videotoolbox.h:64
const AVHWAccel ff_mpeg1_videotoolbox_hwaccel
static int FUNC() vps(CodedBitstreamContext *ctx, RWContext *rw, H265RawVPS *current)
void av_frame_unref(AVFrame *frame)
Unreference all the buffers referenced by frame and reset the frame fields.
Definition: frame.c:551
static int64_t pts
int ff_videotoolbox_uninit(AVCodecContext *avctx)
Definition: videotoolbox.c:402
const AVHWAccel ff_mpeg4_videotoolbox_hwaccel
static int videotoolbox_set_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:318
static int videotoolbox_h264_decode_params(AVCodecContext *avctx, int type, const uint8_t *buffer, uint32_t size)
Definition: videotoolbox.c:352
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:232
AVBufferRef * device_ref
A reference to the parent AVHWDeviceContext.
Definition: hwcontext.h:140
uint8_t level_idc
Definition: hevc_ps.h:180
MpegEncContext.
Definition: mpegvideo.h:81
A reference to a data buffer.
Definition: buffer.h:81
uint8_t profile_space
Definition: hevc_ps.h:176
This struct stores per-frame lavc-internal data and is attached to it via private_ref.
Definition: decode.h:34
VTDecompressionOutputCallback output_callback
The output callback that must be passed to the session.
Definition: videotoolbox.h:57
uint8_t data[4096]
Definition: h264_ps.h:101
static int ref[MAX_W *MAX_W]
Definition: jpeg2000dwt.c:107
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
size_t data_size
Definition: h264_ps.h:129
uint8_t data[4096]
Definition: hevc_ps.h:214
AVBufferRef * av_hwframe_ctx_alloc(AVBufferRef *device_ref_in)
Allocate an AVHWFramesContext tied to a given device context.
Definition: hwcontext.c:243
H264ParamSets ps
Definition: h264dec.h:456
void * hwaccel_priv_data
hwaccel-specific private data
Definition: internal.h:190
AVBufferRef * av_buffer_ref(AVBufferRef *buf)
Create a new reference to an AVBuffer.
Definition: buffer.c:93
struct AVVideotoolboxContext * vt_ctx
Definition: vt_internal.h:40
#define VIDEOTOOLBOX_ESDS_EXTRADATA_PADDING
Definition: videotoolbox.c:44
#define AVERROR_UNKNOWN
Unknown error, typically from an external library.
Definition: error.h:71
int ff_videotoolbox_alloc_frame(AVCodecContext *avctx, AVFrame *frame)
Definition: videotoolbox.c:89
void * priv_data
Definition: avcodec.h:1545
#define av_free(p)
enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt)
Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat.
struct AVCodecInternal * internal
Private context used for internal data.
Definition: avcodec.h:1553
const AVHWAccel ff_h263_videotoolbox_hwaccel
struct AVBufferRef * cached_hw_frames_ctx
Definition: vt_internal.h:36
#define bytestream2_put_ne32
Definition: bytestream.h:125
int height
Definition: frame.h:276
static void videotoolbox_buffer_release(void *opaque, uint8_t *data)
Definition: videotoolbox.c:46
#define av_freep(p)
uint8_t progressive_source_flag
Definition: hevc_ps.h:181
void av_videotoolbox_default_free(AVCodecContext *avctx)
This function must be called to free the Videotoolbox context initialized with av_videotoolbox_defaul...
#define bytestream2_put_ne16
Definition: bytestream.h:123
const char int length
Definition: avisynth_c.h:768
AVBufferRef * hw_device_ctx
A reference to the AVHWDeviceContext describing the device which will be used by a hardware encoder/d...
Definition: avcodec.h:3249
uint8_t non_packed_constraint_flag
Definition: hevc_ps.h:183
enum AVPixelFormat sw_format
The pixel format identifying the actual data layout of the hardware frames.
Definition: hwcontext.h:221
#define AVERROR_EXTERNAL
Generic error in an external library.
Definition: error.h:57
AVPixelFormat
Pixel format.
Definition: pixfmt.h:60
uint8_t interlaced_source_flag
Definition: hevc_ps.h:182
This struct holds all the information that needs to be passed between the caller and libavcodec for i...
Definition: videotoolbox.h:46
static av_always_inline int bytestream2_size_p(PutByteContext *p)
Definition: bytestream.h:203
CVImageBufferRef frame
Definition: vt_internal.h:33
Public libavcodec Videotoolbox header.
uint8_t data[4096]
Definition: hevc_ps.h:391
static uint8_t tmp[11]
Definition: aes_ctr.c:26