转换为灰度 - 太慢了

5

我做了一个将图像转换为灰度的类。但是它运行得太慢了。有没有办法让它更快?

这是我的类:

@implementation PixelProcessing

SYNTHESIZE_SINGLETON_FOR_CLASS(PixelProcessing);

#define bytesPerPixel 4
#define bitsPerComponent 8


-(UIImage*)scaleAndRotateImage: (UIImage*)img withMaxResolution: (int)kMaxResolution
{
    CGImageRef imgRef = img.CGImage;

    CGFloat width = CGImageGetWidth(imgRef);
    CGFloat height = CGImageGetHeight(imgRef);


    CGAffineTransform transform = CGAffineTransformIdentity;
    CGRect bounds = CGRectMake(0, 0, width, height);

    if ( (kMaxResolution != 0) && (width > kMaxResolution || height > kMaxResolution) ) {
        CGFloat ratio = width/height;
        if (ratio > 1) {
            bounds.size.width = kMaxResolution;
            bounds.size.height = bounds.size.width / ratio;
        }
        else {
            bounds.size.height = kMaxResolution;
            bounds.size.width = bounds.size.height * ratio;
        }
    }

    CGFloat scaleRatio;
    if (kMaxResolution != 0){
        scaleRatio = bounds.size.width / width;
    } else
    {
        scaleRatio = 1.0f;
    }

    CGSize imageSize = CGSizeMake(CGImageGetWidth(imgRef), CGImageGetHeight(imgRef));
    CGFloat boundHeight;
    UIImageOrientation orient = img.imageOrientation;
    switch(orient) {

        case UIImageOrientationUp: //EXIF = 1
            transform = CGAffineTransformIdentity;
            break;

        case UIImageOrientationUpMirrored: //EXIF = 2
            transform = CGAffineTransformMakeTranslation(imageSize.width, 0.0);
            transform = CGAffineTransformScale(transform, -1.0, 1.0);
            break;

        case UIImageOrientationDown: //EXIF = 3
            transform = CGAffineTransformMakeTranslation(imageSize.width, imageSize.height);
            transform = CGAffineTransformRotate(transform, M_PI);
            break;

        case UIImageOrientationDownMirrored: //EXIF = 4
            transform = CGAffineTransformMakeTranslation(0.0, imageSize.height);
            transform = CGAffineTransformScale(transform, 1.0, -1.0);
            break;

        case UIImageOrientationLeftMirrored: //EXIF = 5
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(imageSize.height, imageSize.width);
            transform = CGAffineTransformScale(transform, -1.0, 1.0);
            transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
            break;

        case UIImageOrientationLeft: //EXIF = 6
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(0.0, imageSize.width);
            transform = CGAffineTransformRotate(transform, 3.0 * M_PI / 2.0);
            break;

        case UIImageOrientationRightMirrored: //EXIF = 7
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeScale(-1.0, 1.0);
            transform = CGAffineTransformRotate(transform, M_PI / 2.0);
            break;

        case UIImageOrientationRight: //EXIF = 8
            boundHeight = bounds.size.height;
            bounds.size.height = bounds.size.width;
            bounds.size.width = boundHeight;
            transform = CGAffineTransformMakeTranslation(imageSize.height, 0.0);
            transform = CGAffineTransformRotate(transform, M_PI / 2.0);
            break;

        default:
            [NSException raise:NSInternalInconsistencyException format: @"Invalid image orientation"];

    }

    UIGraphicsBeginImageContext(bounds.size);

    CGContextRef context = UIGraphicsGetCurrentContext();

    if (orient == UIImageOrientationRight || orient == UIImageOrientationLeft) {
        CGContextScaleCTM(context, -scaleRatio, scaleRatio);
        CGContextTranslateCTM(context, -height, 0);
    }
    else {
        CGContextScaleCTM(context, scaleRatio, -scaleRatio);
        CGContextTranslateCTM(context, 0, -height);
    }

    CGContextConcatCTM(context, transform);

    CGContextDrawImage(UIGraphicsGetCurrentContext(), CGRectMake(0, 0, width, height), imgRef);
    UIImage *tempImage = UIGraphicsGetImageFromCurrentImageContext();
    UIGraphicsEndImageContext();

    return tempImage;
}


#pragma mark Getting Ans Writing Pixels
-(float*) getColorForPixel: (NSUInteger)xCoordinate andForY: (NSUInteger)yCoordinate
{
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;

    float *colorToReturn = malloc(3);
    colorToReturn[0] = bitmap[byteIndex] / 255.f;   //Red
    colorToReturn[1] = bitmap[byteIndex + 1] / 255.f;   //Green
    colorToReturn[2] = bitmap[byteIndex + 2] / 255.f;   //Blue

    return colorToReturn;
}

-(void) writeColor: (float*)colorToWrite forPixelAtX: (NSUInteger)xCoordinate andY: (NSUInteger)yCoordinate
{
    int byteIndex = (bytesPerRow * yCoordinate) + xCoordinate * bytesPerPixel;

    bitmap[byteIndex] = (unsigned char) ( colorToWrite[0] * 255);
    bitmap[byteIndex + 1] = (unsigned char) ( colorToWrite[1] * 255);
    bitmap[byteIndex + 2] = (unsigned char) ( colorToWrite[2] * 255);
}

#pragma mark Bitmap

-(float) getAverageBrightnessForImage: (UIImage*)img
{
    UIImage *tempImage = [self scaleAndRotateImage: img withMaxResolution: 100];

    unsigned char *rawData = [self getBytesForImage: tempImage];

    double aBrightness = 0;

    for(int y = 0; y < tempImage.size.height; y++) {
        for(int x = 0; x < tempImage.size.width; x++) {
            int byteIndex = ( (tempImage.size.width * y) + x) * bytesPerPixel;

            aBrightness += (rawData[byteIndex] + rawData[byteIndex + 1] + rawData[byteIndex + 2]);


        }
    }

    free(rawData);

    aBrightness /= 3.0f;
    aBrightness /= 255.0f;
    aBrightness /= tempImage.size.width * tempImage.size.height;

    return aBrightness;
}

-(unsigned char*) getBytesForImage: (UIImage*)pImage
{
    CGImageRef image = [pImage CGImage];
    NSUInteger width = CGImageGetWidth(image);
    NSUInteger height = CGImageGetHeight(image);

    bytesPerRow = bytesPerPixel * width;

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    unsigned char *rawData = malloc(height * width * bytesPerPixel);
    CGContextRef context = CGBitmapContextCreate(rawData, width, height, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
    CGColorSpaceRelease(colorSpace);

    CGContextDrawImage(context, CGRectMake(0, 0, width, height), image);
    CGContextRelease(context);

    return rawData;
}

-(void) loadWithImage: (UIImage*)img
{   
    averageBrightness = [self getAverageBrightnessForImage: img];
    currentImage = [self scaleAndRotateImage: img withMaxResolution: 0];

    imgWidth = currentImage.size.width;
    imgHeight = currentImage.size.height;

    bitmap = [self getBytesForImage: currentImage];

    bytesPerRow = bytesPerPixel * imgWidth;
}

-(void) processImage
{   
    // now convert to grayscale

    for(int y = 0; y < imgHeight; y++) {
        for(int x = 0; x < imgWidth; x++) {
            float *currentColor = [self getColorForPixel: x andForY: y];

            //Grayscale
            float averageColor = (currentColor[0] + currentColor[1] + currentColor[2]) / 3.0f;

            averageColor += 0.5f - averageBrightness;

            if (averageColor > 1.0f) averageColor = 1.0f;

            currentColor[0] = averageColor;
            currentColor[1] = averageColor;
            currentColor[2] = averageColor;

            [self writeColor: currentColor forPixelAtX: x andY: y];

            free(currentColor);
        }
    }
}

-(UIImage*) getProcessedImage
{
    // create a UIImage
    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();
    CGContextRef context = CGBitmapContextCreate(bitmap, imgWidth, imgHeight, bitsPerComponent, bytesPerRow, colorSpace, kCGBitmapByteOrder32Big | kCGImageAlphaPremultipliedLast);
    CGImageRef image = CGBitmapContextCreateImage(context);
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    UIImage *resultUIImage = [UIImage imageWithCGImage: image];
    CGImageRelease(image);

    return resultUIImage;
}

-(void) releaseCurrentImage
{
    free(bitmap);
}


@end

我按照以下方式将图像转换为灰度:

    [ [PixelProcessing sharedPixelProcessing] loadWithImage: imageToDisplay.image];
    [ [PixelProcessing sharedPixelProcessing] processImage];
    imageToDisplay.image = [ [PixelProcessing sharedPixelProcessing] getProcessedImage];
    [ [PixelProcessing sharedPixelProcessing] releaseCurrentImage];

为什么它运行得如此缓慢?有没有办法获取像素的RGB颜色分量的浮点值?我该如何进行优化?

谢谢。


2
如果您关心alpha值,那么请保存并通过它传递。另外,实现灰度的更好方法不是进行平均,而是使用以下公式:灰度亮度 = 0.3红色 + 0.59绿色 + 0.11蓝色 - mahboudz
4个回答

13

你可以让Quartz为你进行灰度转换:

CGImageRef grayscaleCGImageFromCGImage(CGImageRef inputImage) {
    size_t width = CGImageGetWidth(inputImage);
    size_t height = CGImageGetHeight(inputImage);

    // Create a gray scale context and render the input image into that
    CGColorSpaceRef colorspace = CGColorSpaceCreateDeviceGray();
    CGContextRef context = CGBitmapContextCreate(NULL, width, height, 8, 
                             4*width, colorspace, kCGBitmapByteOrderDefault);
    CGContextDrawImage(context, CGRectMake(0,0, width,height), inputImage);

    // Get an image representation of the grayscale context which the input
    //    was rendered into.
    CGImageRef outputImage = CGBitmapContextCreateImage(context);

    // Cleanup
    CGContextRelease(context);
    CGColorSpaceRelease(colorspace);
    return (CGImageRef)[(id)outputImage autorelease];
}

8

最近我也遇到了同样的问题,以下是我想出的代码 (它还保留了alpha):

@implementation UIImage (grayscale)

typedef enum {
    ALPHA = 0,
    BLUE = 1,
    GREEN = 2,
    RED = 3
} PIXELS;

- (UIImage *)convertToGrayscale {
    CGSize size = [self size];
    int width = size.width;
    int height = size.height;

    // the pixels will be painted to this array
    uint32_t *pixels = (uint32_t *) malloc(width * height * sizeof(uint32_t));

    // clear the pixels so any transparency is preserved
    memset(pixels, 0, width * height * sizeof(uint32_t));

    CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB();

    // create a context with RGBA pixels
    CGContextRef context = CGBitmapContextCreate(pixels, width, height, 8, width * sizeof(uint32_t), colorSpace, 
                                                 kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedLast);

    // paint the bitmap to our context which will fill in the pixels array
    CGContextDrawImage(context, CGRectMake(0, 0, width, height), [self CGImage]);

    for(int y = 0; y < height; y++) {
        for(int x = 0; x < width; x++) {
            uint8_t *rgbaPixel = (uint8_t *) &pixels[y * width + x];

            // convert to grayscale using recommended method: http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale
            uint32_t gray = 0.3 * rgbaPixel[RED] + 0.59 * rgbaPixel[GREEN] + 0.11 * rgbaPixel[BLUE];

            // set the pixels to gray
            rgbaPixel[RED] = gray;
            rgbaPixel[GREEN] = gray;
            rgbaPixel[BLUE] = gray;
        }
    }

    // create a new CGImageRef from our context with the modified pixels
    CGImageRef image = CGBitmapContextCreateImage(context);

    // we're done with the context, color space, and pixels
    CGContextRelease(context);
    CGColorSpaceRelease(colorSpace);
    free(pixels);

    // make a new UIImage to return
    UIImage *resultUIImage = [UIImage imageWithCGImage:image];

    // we're done with image now too
    CGImageRelease(image);

    return resultUIImage;
}

@end

1
为了使视网膜图片正常工作,同时在宽度和高度的开头添加*self.scale。 int width = size.width * self.scale; int height = size.height * self.scale; - vakio

3
发现速度问题的方法是使用Shark进行分析。(在Xcode中,运行->使用性能工具->Shark)。然而,在这种情况下,我相当确定主要问题是每像素malloc/free、浮点算术和内部处理循环中的两个方法调用。
为避免malloc/free,您应该采取以下做法:
- (void) getColorForPixelX:(NSUInteger)x y:(NSUInteger)y pixel:(float[3])pixel
{ /* Write stuff to pixel[0], pixel[1], pixel[2] */ }

// To call:
float pixel[3];
for (each pixel)
{
    [self getColorForPixelX:x y:y pixel:pixel];
    // Do stuff
}

第二个可能导致减速的原因是浮点数的使用,准确地说是转换成和转换回浮点数的代价。对于你正在编写的滤镜,使用整数数学运算很简单——将整数像素值相加并除以255×3。(顺便说一下,这是一种将彩色转换为灰度的相当糟糕的方法。请查看http://en.wikipedia.org/wiki/Grayscale#Converting_color_to_grayscale)。
方法调用对于它们本身来说很快,但与滤镜的基本算术相比仍然相当慢。 (有关一些数字,请参见本文。) 消除方法调用的简单方法是将其替换为内联函数。

1

你尝试过使用亮度混合模式吗?将白色图像与原始图像混合,使用该混合模式似乎会产生灰度效果。这两个图像,前景图像在右侧,背景图像在左侧:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/both_images.jpg

使用kCGBlendModeLuminosity混合的结果如下:

alt text http://developer.apple.com/iphone/library/documentation/GraphicsImaging/Conceptual/drawingwithquartz2d/Art/luminosity_image.jpg

详情请参见:使用混合模式绘制图像的Quartz 2D绘图


网页内容由stack overflow 提供, 点击上面的
可以查看英文原文,
原文链接