ios - GPUImage 自定义 OpenGL ES 着色器导致黑色图像

标签 ios objective-c opengl-es gpuimage

基于 this 开发另一个 OpenGL ES 图像过滤器:

uniform sampler2D texture;
uniform float amount;
uniform vec2 texSize;
varying vec2 texCoord;
void main() {
    vec4 color = texture2D(texture, texCoord);
    vec4 orig = color;

    /* High pass filter */
    vec4 highpass = color * 5.0;

    float dx = 1.0 / texSize.x;
    float dy = 1.0 / texSize.y;
    highpass += texture2D(texture, texCoord + vec2(-dx, -dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx, -dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx, dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx, dy)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(dx * 2.0, dy * 2.0)) * -0.625;
    highpass += texture2D(texture, texCoord + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
    highpass.a = 1.0;

    /* Overlay blend */
    vec3 overlay = vec3(1.0);
    if (highpass.r <= 0.5) {
        overlay.r = 2.0 * color.r * highpass.r;
    } else {
        overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
    }
    if (highpass.g <= 0.5) {
        overlay.g = 2.0 * color.g * highpass.g;
    } else {
        overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
    }
    if (highpass.b <= 0.5) {
        overlay.b = 2.0 * color.b * highpass.b;
    } else {
        overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
    }
    color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);

    /* Desaturated hard light */
    vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
    if (desaturated.r <= 0.5) {
        color.rgb = 2.0 * color.rgb * desaturated;
    } else {
        color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
    }
    color = (orig * 0.6) + (color * 0.4);

    /* Add back some color */
    float average = (color.r + color.g + color.b) / 3.0;
    color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));

    gl_FragColor = (color * amount) + (orig * (1.0 - amount));
}

根据我的question yesterday ,我知道为每个 float 和 vec 分配精度。这次它编译得很好,但是当我在 GPUImage 中应用滤镜时(例如,将 clarity 的值设置为 0.8),图像变黑。我的直觉告诉我这与纹理大小有关,但由于不知道 GPUImage 是如何处理的,我有点卡住了。

这是我在 Objective-C 中的实现:

.h

#import <GPUImage/GPUImage.h>

@interface GPUImageClarityFilter : GPUImageFilter
{
    GLint clarityUniform;
}

// Gives the image a gritty, surreal contrasty effect
// Value 0 to 1
@property (readwrite, nonatomic) GLfloat clarity;

@end

.m

#import "GPUImageClarityFilter.h"

#if TARGET_IPHONE_SIMULATOR || TARGET_OS_IPHONE
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
 uniform sampler2D inputImageTexture;
 uniform lowp float clarity;
 uniform highp vec2 textureSize;
 varying highp vec2 textureCoordinate;
 void main() {
     highp vec4 color = texture2D(inputImageTexture, textureCoordinate);
     highp vec4 orig = color;

     /* High pass filter */
     highp vec4 highpass = color * 5.0;

     highp float dx = 1.0 / textureSize.x;
     highp float dy = 1.0 / textureSize.y;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
     highpass.a = 1.0;

     /* Overlay blend */
     highp vec3 overlay = vec3(1.0);
     if (highpass.r <= 0.5) {
         overlay.r = 2.0 * color.r * highpass.r;
     } else {
         overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
     }
     if (highpass.g <= 0.5) {
         overlay.g = 2.0 * color.g * highpass.g;
     } else {
         overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
     }
     if (highpass.b <= 0.5) {
         overlay.b = 2.0 * color.b * highpass.b;
     } else {
         overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
     }
     color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);

     /* Desaturated hard light */
     highp vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
     if (desaturated.r <= 0.5) {
         color.rgb = 2.0 * color.rgb * desaturated;
     } else {
         color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
     }
     color = (orig * 0.6) + (color * 0.4);

     /* Add back some color */
     highp float average = (color.r + color.g + color.b) / 3.0;
     color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));

     gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
 }
);
#else
NSString *const kGPUImageClarityFragmentShaderString = SHADER_STRING
(
 uniform sampler2D inputImageTexture;
 uniform float clarity;
 uniform vec2 textureSize;
 varying vec2 textureCoordinate;
 void main() {
     vec4 color = texture2D(inputImageTexture, textureCoordinate);
     vec4 orig = color;

     /* High pass filter */
     vec4 highpass = color * 5.0;

     float dx = 1.0 / textureSize.x;
     float dy = 1.0 / textureSize.y;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, -dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx, dy)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, -dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(dx * 2.0, dy * 2.0)) * -0.625;
     highpass += texture2D(inputImageTexture, textureCoordinate + vec2(-dx * 2.0, dy * 2.0)) * -0.625;
     highpass.a = 1.0;

     /* Overlay blend */
     vec3 overlay = vec3(1.0);
     if (highpass.r <= 0.5) {
         overlay.r = 2.0 * color.r * highpass.r;
     } else {
         overlay.r = 1.0 - 2.0 * (1.0 - color.r) * (1.0 - highpass.r);
     }
     if (highpass.g <= 0.5) {
         overlay.g = 2.0 * color.g * highpass.g;
     } else {
         overlay.g = 1.0 - 2.0 * (1.0 - color.g) * (1.0 - highpass.g);
     }
     if (highpass.b <= 0.5) {
         overlay.b = 2.0 * color.b * highpass.b;
     } else {
         overlay.b = 1.0 - 2.0 * (1.0 - color.b) * (1.0 - highpass.b);
     }
     color.rgb = (overlay * 0.8) + (orig.rgb * 0.2);

     /* Desaturated hard light */
     vec3 desaturated = vec3(orig.r + orig.g + orig.b / 3.0);
     if (desaturated.r <= 0.5) {
         color.rgb = 2.0 * color.rgb * desaturated;
     } else {
         color.rgb = vec3(1.0) - vec3(2.0) * (vec3(1.0) - color.rgb) * (vec3(1.0) - desaturated);
     }
     color = (orig * 0.6) + (color * 0.4);

     /* Add back some color */
     float average = (color.r + color.g + color.b) / 3.0;
     color.rgb += (average - color.rgb) * (1.0 - 1.0 / (1.001 - 0.45));

     gl_FragColor = (color * clarity) + (orig * (1.0 - clarity));
 }
);
#endif

@implementation GPUImageClarityFilter

@synthesize clarity = _clarity;

#pragma mark -
#pragma mark Initialization and teardown

- (id)init;
{
    if (!(self = [super initWithFragmentShaderFromString:kGPUImageClarityFragmentShaderString]))
    {
        return nil;
    }

    clarityUniform = [filterProgram uniformIndex:@"clarity"];
    self.clarity = 0.0;

    return self;
}

#pragma mark -
#pragma mark Accessors

- (void)setClarity:(GLfloat)clarity;
{
    _clarity = clarity;

    [self setFloat:_clarity forUniform:clarityUniform program:filterProgram];
}

@end

我想做的另一件事是应用 GPUImage 的内置低通和高通滤波器,但我感觉这最终会是一个相当笨重的解决方案。

最佳答案

这可能是由于 textureSize 不是作为 GPUImageFilter 的一部分为您提供的标准统一。 inputImageTexturetextureCooperative 是这些过滤器之一提供的标准制服,看起来您正在提供clarity制服。

由于 textureSize 未设置,因此默认为 0.0。然后,您的 1.0/textureSize.x 计算结果将除以零,这往往会导致 iOS 片段着色器中出现黑帧。

您可以计算并提供该统一,或者考虑将自定义过滤器基于 GPUImage3x3TextureSamplingFilter。该过滤器基类将 1.0/textureSize.x 的结果作为 texelWidth 统一值(以及垂直分量的匹配 texelHeight)传递。你不必计算这个。事实上,它还计算周围 8 个像素的纹理坐标,因此您可以删除上面的四个计算并将其转换为非相关纹理读取。您只需根据 2 * texelWidth2 * texelHeight 计算四个纹理读取即可完成剩余的四个读取。

您实际上可以将此操作分成多个 channel 以节省计算量,先进行小框模糊,然后进行叠加混合,然后是此过滤器的最后阶段。这可以进一步加快速度。

关于ios - GPUImage 自定义 OpenGL ES 着色器导致黑色图像,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/32016646/

相关文章:

ios - 仪器应用程序无法打开 tracetemplate 文件

iphone - 一旦应用程序激活,我如何测试当前 View Controller 是否是第一响应者/最顶层 View Controller

objective-c - 将日期转换为正确的格式

iOS GLKit 纹理在视网膜显示屏上模糊

ios - 为什么 AVAssetWriter 会膨胀视频文件?

ios - 无法使用 SQLite 运行应用程序 - Objective-c

android - iOS 中的 subview Controller 是否与 Android 中的 fragment 相同?

objective-c - 在数据库中保存从图库中选择的照片的路径

iphone - 使用深度缓冲区而不是模板缓冲区进行裁剪

ios - 是否可以将 Metal API 与 OpenGL ES 3.0 同时用于图形计算?