你好,我正在尝试使用AVCaptureSession访问iphone相机的原始数据。 我遵循了Apple提供的指南(链接在这里)。
从samplebuffer中获得的原始数据采用YUV格式(我的理解是原始视频帧格式正确吗?),如何直接获取存储在samplebuffer中的原始数据的Y分量数据。
你好,我正在尝试使用AVCaptureSession访问iphone相机的原始数据。 我遵循了Apple提供的指南(链接在这里)。
从samplebuffer中获得的原始数据采用YUV格式(我的理解是原始视频帧格式正确吗?),如何直接获取存储在samplebuffer中的原始数据的Y分量数据。
在设置返回原始相机帧的AVCaptureVideoDataOutput时,您可以使用以下代码设置帧的格式:
[videoOutput setVideoSettings:[NSDictionary dictionaryWithObject:[NSNumber numberWithInt:kCVPixelFormatType_32BGRA] forKey:(id)kCVPixelBufferPixelFormatTypeKey]];
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection
{
CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CVPixelBufferLockBaseAddress(pixelBuffer, 0);
unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer);
// Do something with the raw pixels here
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
}
kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange
是一种YUV 4:2:0平面颜色空间。 - Brad Larson除了Brad的回答和你自己的代码外,你还需要考虑以下内容:
由于您的图像有两个不同的平面,函数CVPixelBufferGetBaseAddress将不会返回平面的基地址,而是返回附加数据结构的基地址。您可能会得到一个足够靠近第一个平面的地址,以便看到图像,但这是因为当前实现的原因。但这也是它被移位并在左上角具有垃圾的原因。接收第一个平面的正确方法是:
unsigned char *rowBase = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
图像中的一行可能会比图像的宽度更长(由于四舍五入)。这就是为什么有不同的函数用于获取宽度和每行字节数。目前您没有此问题。但是随着下一个iOS版本的到来,这可能会改变。因此,您的代码应该是:
int bufferHeight = CVPixelBufferGetHeight(pixelBuffer);
int bufferWidth = CVPixelBufferGetWidth(pixelBuffer);
int bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
int size = bufferHeight * bytesPerRow ;
unsigned char *pixel = (unsigned char*)malloc(size);
unsigned char *rowBase = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);
memcpy (pixel, rowBase, size);
请注意,你的代码在 iPhone 3G 上将彻底失败。
#pragma mark -
#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate Methods
#if !(TARGET_IPHONE_SIMULATOR)
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection;
{
// get image buffer reference
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
// extract needed informations from image buffer
CVPixelBufferLockBaseAddress(imageBuffer, 0);
size_t bufferSize = CVPixelBufferGetDataSize(imageBuffer);
void *baseAddress = CVPixelBufferGetBaseAddress(imageBuffer);
CGSize resolution = CGSizeMake(CVPixelBufferGetWidth(imageBuffer), CVPixelBufferGetHeight(imageBuffer));
// variables for grayscaleBuffer
void *grayscaleBuffer = 0;
size_t grayscaleBufferSize = 0;
// the pixelFormat differs between iPhone 3G and later models
OSType pixelFormat = CVPixelBufferGetPixelFormatType(imageBuffer);
if (pixelFormat == '2vuy') { // iPhone 3G
// kCVPixelFormatType_422YpCbCr8 = '2vuy',
/* Component Y'CbCr 8-bit 4:2:2, ordered Cb Y'0 Cr Y'1 */
// copy every second byte (luminance bytes form Y-channel) to new buffer
grayscaleBufferSize = bufferSize/2;
grayscaleBuffer = malloc(grayscaleBufferSize);
if (grayscaleBuffer == NULL) {
NSLog(@"ERROR in %@:%@:%d: couldn't allocate memory for grayscaleBuffer!", NSStringFromClass([self class]), NSStringFromSelector(_cmd), __LINE__);
return nil; }
memset(grayscaleBuffer, 0, grayscaleBufferSize);
void *sourceMemPos = baseAddress + 1;
void *destinationMemPos = grayscaleBuffer;
void *destinationEnd = grayscaleBuffer + grayscaleBufferSize;
while (destinationMemPos <= destinationEnd) {
memcpy(destinationMemPos, sourceMemPos, 1);
destinationMemPos += 1;
sourceMemPos += 2;
}
}
if (pixelFormat == '420v' || pixelFormat == '420f') {
// kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange = '420v',
// kCVPixelFormatType_420YpCbCr8BiPlanarFullRange = '420f',
// Bi-Planar Component Y'CbCr 8-bit 4:2:0, video-range (luma=[16,235] chroma=[16,240]).
// Bi-Planar Component Y'CbCr 8-bit 4:2:0, full-range (luma=[0,255] chroma=[1,255]).
// baseAddress points to a big-endian CVPlanarPixelBufferInfo_YCbCrBiPlanar struct
// i.e.: Y-channel in this format is in the first third of the buffer!
int bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(imageBuffer, 0);
baseAddress = CVPixelBufferGetBaseAddressOfPlane(imageBuffer,0);
grayscaleBufferSize = resolution.height * bytesPerRow ;
grayscaleBuffer = malloc(grayscaleBufferSize);
if (grayscaleBuffer == NULL) {
NSLog(@"ERROR in %@:%@:%d: couldn't allocate memory for grayscaleBuffer!", NSStringFromClass([self class]), NSStringFromSelector(_cmd), __LINE__);
return nil; }
memset(grayscaleBuffer, 0, grayscaleBufferSize);
memcpy (grayscaleBuffer, baseAddress, grayscaleBufferSize);
}
// do whatever you want with the grayscale buffer
...
// clean-up
free(grayscaleBuffer);
}
#endif
这只是其他人辛勤工作的集大成者,包括其他帖子上的内容,现已转换为Swift 3版本,供需要的人使用。
func captureOutput(_ captureOutput: AVCaptureOutput!, didOutputSampleBuffer sampleBuffer: CMSampleBuffer!, from connection: AVCaptureConnection!) {
if let pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) {
CVPixelBufferLockBaseAddress(pixelBuffer, CVPixelBufferLockFlags.readOnly)
let pixelFormatType = CVPixelBufferGetPixelFormatType(pixelBuffer)
if pixelFormatType == kCVPixelFormatType_420YpCbCr8BiPlanarFullRange
|| pixelFormatType == kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange {
let bufferHeight = CVPixelBufferGetHeight(pixelBuffer)
let bufferWidth = CVPixelBufferGetWidth(pixelBuffer)
let lumaBytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0)
let size = bufferHeight * lumaBytesPerRow
let lumaBaseAddress = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)
let lumaByteBuffer = unsafeBitCast(lumaBaseAddress, to:UnsafeMutablePointer<UInt8>.self)
let releaseDataCallback: CGDataProviderReleaseDataCallback = { (info: UnsafeMutableRawPointer?, data: UnsafeRawPointer, size: Int) -> () in
// https://developer.apple.com/reference/coregraphics/cgdataproviderreleasedatacallback
// N.B. 'CGDataProviderRelease' is unavailable: Core Foundation objects are automatically memory managed
return
}
if let dataProvider = CGDataProvider(dataInfo: nil, data: lumaByteBuffer, size: size, releaseData: releaseDataCallback) {
let colorSpace = CGColorSpaceCreateDeviceGray()
let bitmapInfo = CGBitmapInfo(rawValue: CGImageAlphaInfo.noneSkipFirst.rawValue)
let cgImage = CGImage(width: bufferWidth, height: bufferHeight, bitsPerComponent: 8, bitsPerPixel: 8, bytesPerRow: lumaBytesPerRow, space: colorSpace, bitmapInfo: bitmapInfo, provider: dataProvider, decode: nil, shouldInterpolate: false, intent: CGColorRenderingIntent.defaultIntent)
let greyscaleImage = UIImage(cgImage: cgImage!)
// do what you want with the greyscale image.
}
}
CVPixelBufferUnlockBaseAddress(pixelBuffer, CVPixelBufferLockFlags.readOnly)
}
}