Skip to content

Commit

Permalink
Added comments
Browse files Browse the repository at this point in the history
  • Loading branch information
sayaleepote committed Jul 4, 2018
1 parent b34fe82 commit 5fad5f1
Showing 1 changed file with 17 additions and 4 deletions.
21 changes: 17 additions & 4 deletions CustomVisionMicrosoftToCoreML/ViewController.swift
Original file line number Diff line number Diff line change
Expand Up @@ -31,17 +31,23 @@ class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDele
}

func configureCamera() {

//Start capture session
let captureSession = AVCaptureSession()
captureSession.sessionPreset = .photo
captureSession.startRunning()

// Add input for capture
guard let captureDevice = AVCaptureDevice.default(for: .video) else { return }
guard let captureInput = try? AVCaptureDeviceInput(device: captureDevice) else { return }
captureSession.addInput(captureInput)

// Add preview layer
let previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
view.layer.addSublayer(previewLayer)
previewLayer.frame = view.frame

// Add output for capture
let dataOutput = AVCaptureVideoDataOutput()
dataOutput.setSampleBufferDelegate(self, queue: DispatchQueue(label: "videoQueue"))
captureSession.addOutput(dataOutput)
Expand All @@ -50,13 +56,19 @@ class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDele
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate

func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

// Initialise CVPixelBuffer from sample buffer
guard let pixelBuffer: CVPixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else { return }

//Initialise Core ML model
guard let handSignsModel = try? VNCoreMLModel(for: HandSigns().model) else { return }

// Create a Core ML Vision request
let request = VNCoreMLRequest(model: handSignsModel) { (finishedRequest, err) in


// Dealing with the result of the Core ML Vision request
guard let results = finishedRequest.results as? [VNClassificationObservation] else { return }

guard let firstResult = results.first else { return }
var predictionString = ""
DispatchQueue.main.async {
Expand All @@ -75,7 +87,8 @@ class ViewController: UIViewController, AVCaptureVideoDataOutputSampleBufferDele
self.predictionLabel.text = predictionString + "(\(firstResult.confidence))"
}
}


// Perform the above request using Vision Image Request Handler
try? VNImageRequestHandler(cvPixelBuffer: pixelBuffer, options: [:]).perform([request])
}

Expand Down

0 comments on commit 5fad5f1

Please sign in to comment.