我正在开发一款应用程序,其中一些主要功能是将CIFilters应用于图像。
let context = CIContext()
let context = CIContext(eaglContext: EAGLContext(api: .openGLES3)!)
let context = CIContext(mtlDevice: MTLCreateSystemDefaultDevice()!)
所有这些都会在我的CameraViewController上给我大约相同的CPU使用率(70%),其中我对帧应用滤镜并更新imageview。所有这些似乎以完全相同的方式工作,这使我认为我缺少一些关键信息。
例如,使用AVFoundation,我从相机获取每个帧,将滤镜应用于该帧,并使用新图像更新imageview。
let context = CIContext()
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
connection.videoOrientation = orientation
connection.isVideoMirrored = !cameraModeIsBack
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: DispatchQueue.main)
let sharpenFilter = CIFilter(name: "CISharpenLuminance")
let saturationFilter = CIFilter(name: "CIColorControls")
let contrastFilter = CIFilter(name: "CIColorControls")
let pixellateFilter = CIFilter(name: "CIPixellate")
let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer)
var cameraImage = CIImage(cvImageBuffer: pixelBuffer!)
saturationFilter?.setValue(cameraImage, forKey: kCIInputImageKey)
saturationFilter?.setValue(saturationValue, forKey: "inputSaturation")
var cgImage = context.createCGImage((saturationFilter?.outputImage!)!, from: cameraImage.extent)!
cameraImage = CIImage(cgImage: cgImage)
sharpenFilter?.setValue(cameraImage, forKey: kCIInputImageKey)
sharpenFilter?.setValue(sharpnessValue, forKey: kCIInputSharpnessKey)
cgImage = context.createCGImage((sharpenFilter?.outputImage!)!, from: (cameraImage.extent))!
cameraImage = CIImage(cgImage: cgImage)
contrastFilter?.setValue(cameraImage, forKey: "inputImage")
contrastFilter?.setValue(contrastValue, forKey: "inputContrast")
cgImage = context.createCGImage((contrastFilter?.outputImage!)!, from: (cameraImage.extent))!
cameraImage = CIImage(cgImage: cgImage)
pixellateFilter?.setValue(cameraImage, forKey: kCIInputImageKey)
pixellateFilter?.setValue(pixelateValue, forKey: kCIInputScaleKey)
cgImage = context.createCGImage((pixellateFilter?.outputImage!)!, from: (cameraImage.extent))!
applyChanges(image: cgImage)
}
另一个例子是我如何仅对普通图像进行更改(我使用滑块完成所有这些操作)。
func imagePixelate(sliderValue: CGFloat){
let cgImg = image?.cgImage
let ciImg = CIImage(cgImage: cgImg!)
let pixellateFilter = CIFilter(name: "CIPixellate")
pixellateFilter?.setValue(ciImg, forKey: kCIInputImageKey)
pixellateFilter?.setValue(sliderValue, forKey: kCIInputScaleKey)
let outputCIImg = pixellateFilter?.outputImage!
let outputCGImg = context.createCGImage(outputCIImg!, from: (outputCIImg?.extent)!)
let outputUIImg = UIImage(cgImage:outputCGImg!, scale:(originalImage?.scale)!, orientation: originalOrientation!)
imageSource[0] = ImageSource(image: outputUIImg)
slideshow.setImageInputs(imageSource)
currentFilteredImage = outputUIImg
}
基本上:
- 从UiImg创建CgImg
- 从CgImg创建CiImg
- 使用上下文将滤镜应用于图像并将其转换回UiImg
- 使用新的UiImg更新任何视图
这个过程在我的iPhone X上运行良好,也在iPhone 6上出乎意料地运行良好。因为我的应用程序几乎完成了,所以我想尽可能地对它进行优化。我查看了很多有关使用OpenGL和Metal进行处理的文档,但似乎无法弄清如何开始。
我一直以为我是在CPU上运行这些进程,但使用OpenGL和Metal创建上下文并没有提供任何改进。我需要使用MetalKit视图或GLKit视图吗(eaglContext似乎完全被淘汰了)?我该如何转换?苹果的文档似乎不够好。