c++ - 自定义顶点处理器不起作用 - 矩阵乘法错误或其他问题?

标签 c++ matrix renderer

我正在用 C++ 编写简单的渲染器。它使用类似于 OpenGL 的约定,但它不使用 OpenGL 或 DirectXfloat3、float4、float4x4 是我自己的自定义结构。

问题是,当我将 eye 设置在 0, 0, 0 以外的地方时,我会得到奇怪的三角形结果,而我不希望看到它们。 我猜是因为错误的矩阵乘法公式、错误的乘法顺序、归一化或错误的lookAt/setPerspective公式。但我一直坚持下去,找不到错误。

我稍后会上传一些插图/屏幕,因为我现在无法访问它们。

我对矩阵使用列表示法 (matrix[column][row]),就像 OpenGL 一样。

下面是矩阵乘法代码:

class float4x4 { //[column][row]
    float4 columns[4];

public:

    float4x4 multiplyBy(float4x4 &b){
        float4x4 c = float4x4();

        c.columns[0] = float4(
            columns[0].x * b.columns[0].x + columns[1].x * b.columns[0].y + columns[2].x * b.columns[0].z + columns[3].x * b.columns[0].w,
            columns[0].y * b.columns[0].x + columns[1].y * b.columns[0].y + columns[2].y * b.columns[0].z + columns[3].y * b.columns[0].w,
            columns[0].z * b.columns[0].x + columns[1].z * b.columns[0].y + columns[2].z * b.columns[0].z + columns[3].z * b.columns[0].w,
            columns[0].w * b.columns[0].x + columns[1].w * b.columns[0].y + columns[2].w * b.columns[0].z + columns[3].w * b.columns[0].w
            );

        c.columns[1] = float4(
            columns[0].x * b.columns[1].x + columns[1].x * b.columns[1].y + columns[2].x * b.columns[1].z + columns[3].x * b.columns[1].w,
            columns[0].y * b.columns[1].x + columns[1].y * b.columns[1].y + columns[2].y * b.columns[1].z + columns[3].y * b.columns[1].w,
            columns[0].z * b.columns[1].x + columns[1].z * b.columns[1].y + columns[2].z * b.columns[1].z + columns[3].z * b.columns[1].w,
            columns[0].w * b.columns[1].x + columns[1].w * b.columns[1].y + columns[2].w * b.columns[1].z + columns[3].w * b.columns[1].w
            );

        c.columns[2] = float4(
            columns[0].x * b.columns[2].x + columns[1].x * b.columns[2].y + columns[2].x * b.columns[2].z + columns[3].x * b.columns[2].w,
            columns[0].y * b.columns[2].x + columns[1].y * b.columns[2].y + columns[2].y * b.columns[2].z + columns[3].y * b.columns[2].w,
            columns[0].z * b.columns[2].x + columns[1].z * b.columns[2].y + columns[2].z * b.columns[2].z + columns[3].z * b.columns[2].w,
            columns[0].w * b.columns[2].x + columns[1].w * b.columns[2].y + columns[2].w * b.columns[2].z + columns[3].w * b.columns[2].w
            );

        c.columns[3] = float4(
            columns[0].x * b.columns[3].x + columns[1].x * b.columns[3].y + columns[2].x * b.columns[3].z + columns[3].x * b.columns[3].w,
            columns[0].y * b.columns[3].x + columns[1].y * b.columns[3].y + columns[2].y * b.columns[3].z + columns[3].y * b.columns[3].w,
            columns[0].z * b.columns[3].x + columns[1].z * b.columns[3].y + columns[2].z * b.columns[3].z + columns[3].z * b.columns[3].w,
            columns[0].w * b.columns[3].x + columns[1].w * b.columns[3].y + columns[2].w * b.columns[3].z + columns[3].w * b.columns[3].w
            );
        return c;
    }

    float4 multiplyBy(const float4 &b){
        //based on http://stackoverflow.com/questions/25805126/vector-matrix-product-efficiency-issue
        float4x4 a = *this; //getTransposed(); ???
        float4 result(
            dotProduct(a[0], b),
            dotProduct(a[1], b),
            dotProduct(a[2], b),
            dotProduct(a[3], b)
            );
        return result;
    }

    inline float4x4 getTransposed() {
        float4x4 transposed;
        for (unsigned i = 0; i < 4; i++) {
            for (unsigned j = 0; j < 4; j++) {
                transposed.columns[i][j] = columns[j][i];
            }
        }
        return transposed;
    }
};

其中 #define dotProduct(a, b) a.getDotProduct(b) 和:

inline float getDotProduct(const float4 &anotherVector) const {
    return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z + w * anotherVector.w;
}

我的 VertexProcessor:

class VertexProcessor {
    float4x4 obj2world;
    float4x4 world2view;
    float4x4 view2proj;
    float4x4 obj2proj;

public:
    inline float3 tr(const float3 & v) { //in object space
        float4 r = obj2proj.multiplyBy(float4(v.x, v.y, v.z, 1.0f/*v.w*/));
        return float3(r.x / r.w, r.y / r.w, r.z / r.w); //we get vector in unified cube from -1,-1,-1 to 1,1,1
    }

    inline void transform() {
        obj2proj = obj2world.multiplyBy(world2view);
        obj2proj = obj2proj.multiplyBy(view2proj);
    }

    inline void setIdentity() {
        obj2world = float4x4(
            float4(1.0f, 0.0f, 0.0f, 0.0f),
            float4(0.0f, 1.0f, 0.0f, 0.0f),
            float4(0.0f, 0.0f, 1.0f, 0.0f),
            float4(0.0f, 0.0f, 0.0f, 1.0f)
            );
    }

    inline void setPerspective(float fovy, float aspect, float nearP, float farP) {
        fovy *= PI / 360.0f;
        float fValue = cos(fovy) / sin(fovy);

        view2proj[0] = float4(fValue/aspect,    0.0f,       0.f,                                    0.0f);
        view2proj[1] = float4(0.0f,             fValue,     0.0f,                                   0.0f);
        view2proj[2] = float4(0.0f,             0.0f,       (farP + nearP) / (nearP - farP),        -1.0f);
        view2proj[3] = float4(0.0f,             0.0f,       2.0f * farP * nearP / (nearP - farP),   0.0f);
    }

    inline void setLookat(float3 eye, float3 center, float3 up) {
        float3 f = center - eye;
        f.normalizeIt();
        up.normalizeIt();
        float3 s = f.getCrossProduct(up);
        float3 u = s.getCrossProduct(f);
        world2view[0] = float4(s.x, u.x, -f.x, 0.0f);
        world2view[1] = float4(s.y, u.y, -f.y, 0.0f);
        world2view[2] = float4(s.z, u.z, -f.z, 0.0f);
        world2view[3] = float4(eye/*.getNormalized() ???*/ * -1.0f, 1.0f);
    }

    inline void multByTranslation(float3 v) {
        float4x4 m(
            float4(1.0f, 0.0f, 0.0f, 0.0f),
            float4(0.0f, 1.0f, 0.0f, 0.0f),
            float4(0.0f, 0.0f, 1.0f, 0.0f),
            float4(v.x, v.y, v.z, 1.0f)
            );
        world2view = m.multiplyBy(world2view);
    }

    inline void multByScale(float3 v) {
        float4x4 m(
            float4(v.x, 0.0f, 0.0f, 0.0f),
            float4(0.0f, v.y, 0.0f, 0.0f),
            float4(0.0f, 0.0f, v.z, 0.0f),
            float4(0.0f, 0.0f, 0.0f, 1.0f)
            );
        world2view = m.multiplyBy(world2view);
    }

    inline void multByRotation(float a, float3 v) {
        float s = sin(a*PI / 180.0f), c = cos(a*PI / 180.0f);
        v.normalizeIt();
        float4x4 m(
            float4(v.x*v.x*(1-c)+c,         v.y*v.x*(1 - c) + v.z*s,    v.x*v.z*(1-c)-v.y*s,    0.0f),
            float4(v.x*v.y*(1-c)-v.z*s,     v.y*v.y*(1-c)+c,            v.y*v.z*(1-c)+v.x*s,    0.0f),
            float4(v.x*v.z*(1-c)+v.y*s,     v.y*v.z*(1-c)-v.x*s,        v.z*v.z*(1-c)+c,        0.0f),
            float4(0.0f,                    0.0f,                       0.0f,                   1.0f)
            );
        world2view = m.multiplyBy(world2view);
    }
};

光栅化器:

class Rasterizer final {
    Buffer * buffer = nullptr;

    inline float toScreenSpaceX(float x) { return (x + 1) * buffer->getWidth() * 0.5f; }
    inline float toScreenSpaceY(float y) { return (y + 1) * buffer->getHeight() * 0.5f; }

    inline int orient2d(float ax, float ay, float bx, float by, const float2& c) {
        return (bx - ax)*(c.y - ay) - (by - ay)*(c.x - ax);
    }

public:
    Rasterizer(Buffer * buffer) : buffer(buffer) {}
    //v - position in screen space ([0, width], [0, height], [-1, -1])
    void triangle(
        float3 v0, float3 v1, float3 v2,
        float3 n0, float3 n1, float3 n2,
        float2 uv0, float2 uv1, float2 uv2, 
        Light * light0, Light * light1,
        float3 camera, Texture * texture
        ) {

        v0.x = toScreenSpaceX(v0.x);
        v0.y = toScreenSpaceY(v0.y);
        v1.x = toScreenSpaceX(v1.x);
        v1.y = toScreenSpaceY(v1.y);
        v2.x = toScreenSpaceX(v2.x);
        v2.y = toScreenSpaceY(v2.y);

        //based on: https://fgiesen.wordpress.com/2013/02/08/triangle-rasterization-in-practice/

        //compute triangle bounding box
        int minX = MIN3(v0.x, v1.x, v2.x);
        int minY = MIN3(v0.y, v1.y, v2.y);
        int maxX = MAX3(v0.x, v1.x, v2.x);
        int maxY = MAX3(v0.y, v1.y, v2.y);

        //clip against screen bounds
        minX = MAX(minX, 0);
        minY = MAX(minY, 0);
        maxX = MIN(maxX, buffer->getWidth() - 1);
        maxY = MIN(maxY, buffer->getHeight() - 1);

        //rasterize
        float2 p(0.0f, 0.0f);
        for (p.y = minY; p.y <= maxY; p.y++) {
            for (p.x = minX; p.x <= maxX; p.x++) {
                // Determine barycentric coordinates
                //int w0 = orient2d(v1.x, v1.y, v2.x, v2.y, p);
                //int w1 = orient2d(v2.x, v2.y, v0.x, v0.y, p);
                //int w2 = orient2d(v0.x, v0.y, v1.x, v1.y, p);

                float w0 = (v1.y - v2.y)*(p.x - v2.x) + (v2.x - v1.x)*(p.y - v2.y);
                w0 /= (v1.y - v2.y)*(v0.x - v2.x) + (v2.x - v1.x)*(v0.y - v2.y);
                float w1 = (v2.y - v0.y)*(p.x - v2.x) + (v0.x - v2.x)*(p.y - v2.y);
                w1 /= (v2.y - v0.y)*(v1.x - v2.x) + (v0.x - v2.x)*(v1.y - v2.y);
                float w2 = 1 - w0 - w1;

                // If p is on or inside all edges, render pixel.
                if (w0 >= 0 && w1 >= 0 && w2 >= 0) {
                    float depth = w0 * v0.z + w1 * v1.z + w2 * v2.z;
                    if (depth < buffer->getDepthForPixel(p.x, p.y)) {
                        //...
                        buffer->setPixel(p.x, p.y, diffuse.r, diffuse.g, diffuse.b, ALPHA_VISIBLE, depth);
                    }
                }
            }
        }

    }
};

我坚信 Rasterizer 本身运行良好 ,因为当我用代码(而不是主循环)测试它时:

float3 v0{ 0, 0, 0.1f };
float3 v1{ 0.5, 0, 0.1f };
float3 v2{ 1, 1, 0.1f }; 
//Rasterizer test (without VertexProcessor)
rasterizer->triangle(v0, v1, v2, n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);

我得到了正确的图像,三角形在屏幕中间有一个角(统一空间中的 [0, 0]),一个在右下角([ 1, 1]) 和一个在 [0.5, 0]

float3 结构:

class float3 {
public:
    union {
        struct { float x, y, z; };
        struct { float r, g, b; };
        float p[3];
    };

    float3() = delete;
    float3(const float3 &other) : x(other.x), y(other.y), z(other.z) {}
    float3(float x, float y, float z) : x(x), y(y), z(z) {}

    float &operator[](unsigned index){
        ERROR_HANDLE(index < 3, L"The float3 index out of bounds (0-2 range, " + C::toWString(index) + L" given).");
        return p[index];
    }

    float getLength() const { return std::abs(sqrt(x*x + y*y + z*z)); }
    void normalizeIt();
    inline float3 getNormalized() const {
        float3 result(*this);
        result.normalizeIt();
        return result;
    }   
    inline float3 getCrossProduct(const float3 &anotherVector) const {
        //based on: http://www.sciencehq.com/physics/vector-product-multiplying-vectors.html
        return float3(
            y * anotherVector.z - anotherVector.y * z,
            z * anotherVector.x - anotherVector.z * x,
            x * anotherVector.y - anotherVector.x * y
            );
    }
    inline float getDotProduct(const float3 &anotherVector) const {
        //based on: https://www.ltcconline.net/greenl/courses/107/Vectors/DOTCROS.HTM
        return x * anotherVector.x + y * anotherVector.y + z * anotherVector.z;
    } 
    ...
};

主循环:

VertexProcessor vp;

DirectionalLight * light0 = new DirectionalLight({ 0.3f, 0.3f, 0.3f }, { 0.0f, -1.0f, 0.0f });
DirectionalLight * light1 = new DirectionalLight({ 0.4f, 0.4f, 0.4f }, { 0.0f, -1.0f, 0.5f });

while(!my_window.is_closed()) {

    tgaBuffer.clearDepth(10.0f); //it could be 1.0f but 10.0f won't hurt, we draw pixel if it's depth < actual depth in buffer
    tgaBuffer.clearColor(0, 0, 255, ALPHA_VISIBLE);

    vp.setPerspective(75.0f, tgaBuffer.getWidth() / tgaBuffer.getHeight(), 10.0f, 2000.0f);

    float3 eye = { 10.0f, 10.0f - frameTotal / 10.0f, 10.0f }; //animate eye

    vp.setLookat(eye, float3{ 0.0f, 0.0f, 0.0f }.getNormalized(), { 0.0f, 1.0f, 0.0f });

    vp.setIdentity();
    //we could call e.g. vp.multByRotation(...) here, but we won't to keep it simple
    vp.transform();

    //bottom
    drawTriangle(0, 1, 2);
    drawTriangle(2, 3, 0);

    drawTriangle(3, 2, 7);
    drawTriangle(7, 2, 6);

    drawTriangle(5, 1, 0);
    drawTriangle(0, 5, 4);

    drawTriangle(4, 5, 6);
    drawTriangle(6, 7, 4);

    frameTotal++;
}

drawTriangle(...) 代表:

#define drawTriangle(i0, i1, i2) rasterizer->triangle(vp.tr(v[i0]), vp.tr(v[i1]), vp.tr(v[i2]), v[i0], v[i1], v[i2], n0, n1, n2, uv0, uv1, uv2, light0, light1, eye, defaultTexture);

下面是三角形数据的初始化:

float3 offset{ 0.0f, 0.0f, 0.0f };
v.push_back(offset + float3{ -10, -10, -10 });
v.push_back(offset + float3{ +10, -10, -10 });
v.push_back(offset + float3{ +10, -10, +10 });
v.push_back(offset + float3{ -10, -10, +10 });

v.push_back(offset + float3{ -10, +10, -10 });
v.push_back(offset + float3{ +10, +10, -10 });
v.push_back(offset + float3{ +10, +10, +10 });
v.push_back(offset + float3{ -10, +10, +10 });

最佳答案

我很久以前就为 opengl 创建了一个小的 c 库。在我学习计算机图形学期间,它通常用于学习目的。我查看了我的资料来源,我对透视投影和方向的实现有很大不同。

pbm_Mat4 pbm_mat4_projection_perspective(PBfloat fov, PBfloat ratio, PBfloat near, PBfloat far) {
    PBfloat t = near * tanf(fov / 2.0f);
    PBfloat b = -t;
    PBfloat r = ratio * t, l = ratio * b;
    return pbm_mat4_create(pbm_vec4_create(2.0f * near / (r - l), 0, 0, 0),
                           pbm_vec4_create(0, 2.0f * near / (t - b), 0, 0),
                           pbm_vec4_create((r + l) / (r - l), (t + b) / (t - b), - (far + near) / (far - near), -1.0f),
                           pbm_vec4_create(0, 0, -2.0f * far * near / (far - near), 0));
}

pbm_Mat4 pbm_mat4_orientation_lookAt(pbm_Vec3 pos, pbm_Vec3 target, pbm_Vec3 up) {
    pbm_Vec3 forward = pbm_vec3_normalize(pbm_vec3_sub(target, pos));
    pbm_Vec3 right = pbm_vec3_normalize(pbm_vec3_cross(forward, up));
    up = pbm_vec3_normalize(pbm_vec3_cross(right, forward));
    forward = pbm_vec3_scalar(forward, -1);
    pos = pbm_vec3_scalar(pos, -1);
    return pbm_mat4_create(pbm_vec4_create_vec3(right),
                           pbm_vec4_create_vec3(up),
                           pbm_vec4_create_vec3(forward),
                           pbm_vec4_create_vec3_w(pbm_vec3_create(pbm_vec3_dot(right, pos),
                                                  pbm_vec3_dot(up, pos),     
                                                  pbm_vec3_dot(forward, pos)), 1));
}

这些方法已经过测试,您可能希望针对它们进行测试。如果您想要完整的资源可用 here .此外,您可以重新访问平截头体和投影矩阵 online .不幸的是,我无法与您分享我大学的资料:(

关于c++ - 自定义顶点处理器不起作用 - 矩阵乘法错误或其他问题?,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/34695849/

相关文章:

python - Python 类中的 Numpy 矩阵表现出链接行为?

java - Android - 从渲染线程中结束 Activity

c++ - 使用 GLM + Answer 将屏幕转换为 3D 世界坐标后结果不佳

c++ - 方法最开始的段错误

performance - 下秩矩阵的计算

c++ - 哪些修改会导致 C++ 代码中二进制大小的减小

c# - C# 中的矩阵乘法方法

javascript - extjs 4 网格渲染器

c++ - "empty"有简洁的反义词吗?

c++ - Qt 拆分 QString 一次