iOS相机面部跟踪(Swift 3 Xcode 8)

4

我正在努力制作一个简单的相机应用程序,其中前置摄像头可以检测人脸。

这应该是足够简单的:

  • Create a CameraView class that inherits from UIImage and place it in the UI. Make sure it implements AVCaptureVideoDataOutputSampleBufferDelegate in order to process frames from the camera in real time.

    class CameraView: UIImageView, AVCaptureVideoDataOutputSampleBufferDelegate 
    
  • Within a function handleCamera, called when the CameraView is instantiated, setup an AVCapture session. Add input from the camera.

    override init(frame: CGRect) {
        super.init(frame:frame)
    
        handleCamera()
    }
    
    func handleCamera () {
        camera = AVCaptureDevice.defaultDevice(withDeviceType: .builtInWideAngleCamera,
                                               mediaType: AVMediaTypeVideo, position: .front)
        session = AVCaptureSession()
    
        // Set recovered camera as an input device for the capture session
        do {
            try input = AVCaptureDeviceInput(device: camera);
        } catch _ as NSError {
            print ("ERROR: Front camera can't be used as input")
            input = nil
        }
    
        // Add the input from the camera to the capture session
        if (session?.canAddInput(input) == true) {
            session?.addInput(input)
        }
    
  • Create output. Create a serial output queue to pass the data to which will then be processed by the AVCaptureVideoDataOutputSampleBufferDelegate (the class itself in this case). Add output to session.

        output = AVCaptureVideoDataOutput()
    
        output?.alwaysDiscardsLateVideoFrames = true    
        outputQueue = DispatchQueue(label: "outputQueue")
        output?.setSampleBufferDelegate(self, queue: outputQueue)
    
        // add front camera output to the session for use and modification
        if(session?.canAddOutput(output) == true){
            session?.addOutput(output)
        } // front camera can't be used as output, not working: handle error
        else {
            print("ERROR: Output not viable")
        }
    
  • Setup the camera preview view and run the session

        // Setup camera preview with the session input
        previewLayer = AVCaptureVideoPreviewLayer(session: session)
        previewLayer?.videoGravity = AVLayerVideoGravityResizeAspectFill
        previewLayer?.connection.videoOrientation = AVCaptureVideoOrientation.portrait
        previewLayer?.frame = self.bounds
        self.layer.addSublayer(previewLayer!)
    
        // Process the camera and run it onto the preview
        session?.startRunning()
    
  • in the captureOutput function run by the delegate, convert the recieved sample buffer to CIImage in order to detect faces. Give feedback if a face is found.

    func captureOutput(_ captureOutput: AVCaptureOutput!, didDrop sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
    
    let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
    let cameraImage = CIImage(cvPixelBuffer: pixelBuffer!)
    
    
    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: cameraImage)
    
    for face in faces as! [CIFaceFeature] {
    
          print("Found bounds are \(face.bounds)")
    
          let faceBox = UIView(frame: face.bounds)
    
          faceBox.layer.borderWidth = 3
          faceBox.layer.borderColor = UIColor.red.cgColor
          faceBox.backgroundColor = UIColor.clear
          self.addSubview(faceBox)
    
          if face.hasLeftEyePosition {
              print("Left eye bounds are \(face.leftEyePosition)")
          }
    
          if face.hasRightEyePosition {
              print("Right eye bounds are \(face.rightEyePosition)")
          }
      }
    }
    
我的问题是:我可以让摄像头运行,但是我尝试了来自互联网各处的众多不同代码后,从未能够使captureOutput检测到人脸。要么应用程序没有进入函数,要么由于不起作用的变量而崩溃,最常见的是样本缓冲区变量为null。 我做错了什么?
1个回答

1
你需要将captureOutput函数的参数更改为以下内容:func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) 你的captureOutput函数在缓冲区丢失时调用,而不是从相机获取时调用。

1
我其实是在我的实习期间得到了一位 iOS 开发者的帮助才找到了这个,然后忘记更新问题了。 这就是所有缺失的部分,感谢你的查阅,希望这对其他人有所帮助。 - KazToozs
你能够顺利地运行检测吗?即使我尝试使用CIDetectorAccuracyLow,当我实时开启面部检测时,视图看起来仍然有点缓慢。 - nr5

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接