iOS Swift - 自定义相机覆盖层

15

你好,我想在我的应用程序中像这样打开摄像头

在此输入图片描述

我只想在该部分的中间位置打开相机,以便用户只能在矩形区域拍摄快照

我正在使用的代码是这个

import UIKit
import AVFoundation

class TakeProductPhotoController: UIViewController {

    let captureSession = AVCaptureSession()
    var previewLayer : AVCaptureVideoPreviewLayer?

    // If we find a device we'll store it here for later use
    var captureDevice : AVCaptureDevice?

    override func viewDidLoad() {
        super.viewDidLoad()

        // Do any additional setup after loading the view, typically from a nib.
        captureSession.sessionPreset = AVCaptureSessionPresetHigh

        let devices = AVCaptureDevice.devices()

        // Loop through all the capture devices on this phone
        for device in devices {
            // Make sure this particular device supports video
            if (device.hasMediaType(AVMediaTypeVideo)) {
                // Finally check the position and confirm we've got the back camera
                if(device.position == AVCaptureDevicePosition.Back) {
                    captureDevice = device as? AVCaptureDevice
                    if captureDevice != nil {
                        print("Capture device found")
                        beginSession()
                    }
                }
            }
        }

    }
    func updateDeviceSettings(focusValue : Float, isoValue : Float) {
        let error: NSErrorPointer = nil

        if let device = captureDevice {
            do {
                try captureDevice!.lockForConfiguration()

            } catch let error1 as NSError {
                error.memory = error1
            }

                device.setFocusModeLockedWithLensPosition(focusValue, completionHandler: { (time) -> Void in
                    //
                })

                // Adjust the iso to clamp between minIso and maxIso based on the active format
                let minISO = device.activeFormat.minISO
                let maxISO = device.activeFormat.maxISO
                let clampedISO = isoValue * (maxISO - minISO) + minISO

                device.setExposureModeCustomWithDuration(AVCaptureExposureDurationCurrent, ISO: clampedISO, completionHandler: { (time) -> Void in
                    //
                })

                device.unlockForConfiguration()

        }
    }

    func touchPercent(touch : UITouch) -> CGPoint {
        // Get the dimensions of the screen in points
        let screenSize = UIScreen.mainScreen().bounds.size

        // Create an empty CGPoint object set to 0, 0
        var touchPer = CGPointZero

        // Set the x and y values to be the value of the tapped position, divided by the width/height of the screen
        touchPer.x = touch.locationInView(self.view).x / screenSize.width
        touchPer.y = touch.locationInView(self.view).y / screenSize.height

        // Return the populated CGPoint
        return touchPer
    }

    func focusTo(value : Float) {
        let error: NSErrorPointer = nil


        if let device = captureDevice {
            do {
                try captureDevice!.lockForConfiguration()

            } catch let error1 as NSError {
                error.memory = error1
            }

                device.setFocusModeLockedWithLensPosition(value, completionHandler: { (time) -> Void in
                    //
                })
                device.unlockForConfiguration()

        }
    }

    let screenWidth = UIScreen.mainScreen().bounds.size.width

    override func touchesBegan(touches: Set<UITouch>, withEvent event: UIEvent?) {
        //if let touchPer = touches.first {
            let touchPer = touchPercent( touches.first! as UITouch )
         updateDeviceSettings(Float(touchPer.x), isoValue: Float(touchPer.y))


        super.touchesBegan(touches, withEvent:event)
    }

   override func touchesMoved(touches: Set<UITouch>, withEvent event: UIEvent?) {
      // if let anyTouch = touches.first {
           let touchPer = touchPercent( touches.first! as UITouch )
       // let touchPercent = anyTouch.locationInView(self.view).x / screenWidth
  //      focusTo(Float(touchPercent))
    updateDeviceSettings(Float(touchPer.x), isoValue: Float(touchPer.y))

    }

    func configureDevice() {
          let error: NSErrorPointer = nil
        if let device = captureDevice {
            //device.lockForConfiguration(nil)

            do {
                try captureDevice!.lockForConfiguration()

            } catch let error1 as NSError {
                error.memory = error1
            }

            device.focusMode = .Locked
            device.unlockForConfiguration()
        }

    }

    func beginSession() {
        configureDevice()
        var err : NSError? = nil

        var deviceInput: AVCaptureDeviceInput!
        do {
            deviceInput = try AVCaptureDeviceInput(device: captureDevice)

        } catch let error as NSError {
            err = error
            deviceInput = nil
        };


        captureSession.addInput(deviceInput)

        if err != nil {
            print("error: \(err?.localizedDescription)")
        }

        previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)

        self.view.layer.addSublayer(previewLayer!)
        previewLayer?.frame = self.view.layer.frame
        captureSession.startRunning()
    }
}

在这段代码中,相机占据整个屏幕。


你能否发布你的故事板设计?我遇到了同样的问题。 - Noman Akhtar
@NomanAkhtar 你还卡在那里吗? - hellosheikh
@hellosheikh 那么,previewLayer 的最终更新帧是什么?还可以告诉我如何在触摸后获取图像对象吗? - Shrikant K
1个回答

10
如果您想在自定义的UIView中启动相机,则需要更改AVCaptureVideoPreviewLayer。您可以更改其边界、位置,还可以向其添加掩码。

回答您的问题,捕获层占据整个屏幕是因为您有:

 previewLayer?.frame = self.view.layer.frame

将这行更改为覆盖框架

  previewLayer?.frame = self.overLayView.layer.frame 

或者,如果您想手动使用原始值定位相机图层:

  previewLayer?.frame = CGRectMake(x,y,width,height)

还要注意,如果您想在重叠视图中启动相机,则需要将子视图添加到该重叠视图中。

因此,这一行:

     self.view.layer.addSublayer(previewLayer!)

将会是这样:

    self.overLayView.layer.addSublayer(previewLayer!)

拉伸图层/适配预览图层:

  previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)

        var bounds:CGRect
         bounds=cameraView.layer.frame;
        previewLayer!.videoGravity = AVLayerVideoGravityResizeAspectFill;
        previewLayer!.bounds=bounds;
        previewLayer!.position=CGPointMake(CGRectGetMidX(bounds), CGRectGetMidY(bounds));

        self.view.layer.addSublayer(previewLayer!)

非常感谢。是的,我尝试了一下,它可以工作。只剩下一个小问题。我从界面构建器中拖动了UI视图,然后设置了一个变量并编写了代码。问题是它只在中间显示相机,而不是占用整个矩形自定义UI视图区域。让我展示一张截图,这样你就能更好地理解了。 - hellosheikh
好的,请试一下,previewLayer?.frame = CGRectMake(overlayView.frame.origin.x,overlayView.frame.origin.y,self.view.frame.size.width,overLayView.frame.size.height) - Teja Nandamuri
只需将宽度设置为self.view的宽度,剩下的x、y、高度必须与覆盖视图相等。 - Teja Nandamuri
1
overLayView在哪里?它是什么类型的?回答不完整。 - Paresh. P
1
@Paresh.P 它可以是你选择的任何视图作为叠加层,并且当然是 UIView 类型的。 - Teja Nandamuri
显示剩余7条评论

网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接