AVFoundation.AVCaptureSession Class
Coordinates a recording session.

See Also: AVCaptureSession Members

Syntax

[Foundation.Register("AVCaptureSession", true)]
[ObjCRuntime.Availability(Introduced=ObjCRuntime.Platform.iOS_4_0)]
public class AVCaptureSession : Foundation.NSObject

Remarks

The AVCaptureSession object coordinates the recording of video or audio input and passing the recorded information to one or more output objects. As the iOS line has advanced, different devices have gained multiple capture devices (in particular, gained multiple cameras). Application developers can use AVCaptureDevice.DefaultDeviceWithMediaType or AVCaptureDevice.DevicesWithMediaType, passing in the constants defined in AVFoundation.AVMediaType.

Configuring capture consists of setting the AVCaptureSession.Inputs and AVCaptureSession.Outputs properties of the AVFoundation.AVCaptureSession. Notice that multiple AVFoundation.AVCaptureInputs and AVFoundation.AVCaptureOutputs are possible. For instance, to capture both audio and video, one would use two capture inputs:

C# Example

 var session = new AVCaptureSession();

var camera = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Video);
var  mic = AVCaptureDevice.DefaultDeviceWithMediaType(AVMediaType.Audio);
if(camera == null || mic == null){
    throw new Exception("Can't find devices");
}

if(session.CanAddInput(camera)){
    session.AddInput(camera);
}
if(session.CanAddInput(mic)){
   session.AddInput(mic);
}
          

Video can be captured directly to file with AVFoundation.AVCaptureMovieFileOutput. However, this class has no display-able data and cannot be used simultaneously with AVFoundation.AVCaptureVideoDataOutput. Instead, application developers can use it in combination with a AVFoundation.AVCaptureVideoPreviewLayer, as shown in the following example:

C# Example

var layer = new AVCaptureVideoPreviewLayer(session);
layer.LayerVideoGravity = AVLayerVideoGravity.ResizeAspectFill;
layer.VideoGravity = AVCaptureVideoPreviewLayer.GravityResizeAspectFill;

var cameraView = new UIView();
cameraView.Layer.AddSublayer(layer);

var filePath = System.IO.Path.Combine( Path.GetTempPath(), "temporary.mov");
var fileUrl = NSUrl.FromFilename( filePath );

var movieFileOutput = new AVCaptureMovieFileOutput();
var recordingDelegate = new MyRecordingDelegate();
session.AddOutput(movieFileOutput);

movieFileOutput.StartRecordingToOutputFile( fileUrl, recordingDelegate);
          

Application developers should note that the function AVCaptureMovieFileOutput.StopRecording is asynchronous; developers should wait until the AVCaptureFileOutputRecordingDelegate.FinishedRecording delegate method before manipulating the file (for instance, before saving it to the Photos album with UIKit.UIVideo.SaveToPhotosAlbum or AssetsLibrary.ALAssetsLibrary.WriteVideoToSavedPhotosAlbumAsync).

C# Example

public class MyRecordingDelegate : AVCaptureFileOutputRecordingDelegate {
public override void FinishedRecording(AVCaptureFileOutput captureOutput, NSUrl outputFileUrl, NSObject[] connections, NSError error)
{
	if(UIVideo.IsCompatibleWithSavedPhotosAlbum(outputFileUrl.Path))
	{
		var library = new ALAssetsLibrary();
		library.WriteVideoToSavedPhotosAlbum(outputFileUrl, (path, e2) => {
			if(e2 != null)
			{
				new UIAlertView("Error", e2.ToString(), null, "OK", null).Show();
			}
			else
			{
				new UIAlertView("Saved", "Saved to Photos", null, "OK", null).Show();
				File.Delete(outputFileUrl.Path);
			}
		});
	}
	else
	{
		new UIAlertView("Incompatible", "Incompatible", null, "OK", null).Show();
	}

}
}          

Application developers can configure one or more output ports for the captured data, and these can be still frames, video frames with timing information, audio samples, quicktime movie files, or can be rendered directly to a CoreAnimation layer.

Once the input and output components of the session are set, the actual processing is begun by calling the AVCaptureSession.StartRunning() method.

C# Example


void SetupCapture ()
	/ configure the capture session for low resolution, change this if your code
	// can cope with more data or volume
	session = new AVCaptureSession () {
	        SessionPreset = AVCaptureSession.PresetMedium
	};
	
	// create a device input and attach it to the session
	var captureDevice = AVCaptureDevice.DefaultDeviceWithMediaType (AVMediaType.Video);
	var input = AVCaptureDeviceInput.FromDevice (captureDevice);
	if (input == null){
	        Console.WriteLine ("No video input device");
	        return false;
	}
	session.AddInput (input);
	
	// create a VideoDataOutput and add it to the sesion
	var output = new AVCaptureVideoDataOutput () {
	        VideoSettings = new AVVideoSettings (CVPixelFormatType.CV32BGRA),
	
	        // If you want to cap the frame rate at a given speed, in this sample: 15 frames per second
	        MinFrameDuration = new CMTime (1, 15)
	};
	
	// configure the output
	queue = new CoreFoundation.DispatchQueue ("myQueue");
	outputRecorder = new OutputRecorder ();
	output.SetSampleBufferDelegateAndQueue (outputRecorder, queue);
	session.AddOutput (output);
	
	session.StartRunning ();
}

public class OutputRecorder : AVCaptureVideoDataOutputSampleBufferDelegate {
        public override void DidOutputSampleBuffer (AVCaptureOutput captureOutput, CMSampleBuffer sampleBuffer, AVCaptureConnection connection)
        {
                try {
                        var image = ImageFromSampleBuffer (sampleBuffer);

                        // Do something with the image, we just stuff it in our main view.
                        AppDelegate.ImageView.BeginInvokeOnMainThread (delegate {
                                AppDelegate.ImageView.Image = image;
                        });

                        //
                        // Although this looks innocent "Oh, he is just optimizing this case away"
                        // this is incredibly important to call on this callback, because the AVFoundation
                        // has a fixed number of buffers and if it runs out of free buffers, it will stop
                        // delivering frames.
                        //
                        sampleBuffer.Dispose ();
                } catch (Exception e){
                        Console.WriteLine (e);
                }
        }

        UIImage ImageFromSampleBuffer (CMSampleBuffer sampleBuffer)
        {
                // Get the CoreVideo image
                using (var pixelBuffer = sampleBuffer.GetImageBuffer () as CVPixelBuffer){
                        // Lock the base address
                        pixelBuffer.Lock (0);
                        // Get the number of bytes per row for the pixel buffer
                        var baseAddress = pixelBuffer.BaseAddress;
                        int bytesPerRow = pixelBuffer.BytesPerRow;
                        int width = pixelBuffer.Width;
                        int height = pixelBuffer.Height;
                        var flags = CGBitmapFlags.PremultipliedFirst | CGBitmapFlags.ByteOrder32Little;
                        // Create a CGImage on the RGB colorspace from the configured parameter above
                        using (var cs = CGColorSpace.CreateDeviceRGB ())
                        using (var context = new CGBitmapContext (baseAddress,width, height, 8, bytesPerRow, cs, (CGImageAlphaInfo) flags))
                        using (var cgImage = context.ToImage ()){
                                pixelBuffer.Unlock (0);
                                return UIImage.FromImage (cgImage);
                        }
                }
        }
}

	

Related content

Requirements

Namespace: AVFoundation
Assembly: Xamarin.iOS (in Xamarin.iOS.dll)
Assembly Versions: 0.0.0.0