c - 英特尔内部函数 : multiply interleaved 8bit values

标签 c intel sse simd intrinsics

我正在处理 RGBA32 缓冲区(每个组件 8 位),我需要将每个组件乘以一个常数,然后将每个乘法结果加到其他组件上:

Result = r*x + g * y + b * z + a*w(两个 vector rgba和xyzw的点积)

我正在尝试使用 intel SSE intrinsics 来加速这个过程,但我不知道如何在不改变输入的情况下做这样的事情。

有什么办法吗?就像构建一个包含 {x,y,z,w,x,y,z,w,x,y,z,w,x,y,z,w} 的寄存器并执行 8 位饱和乘法?

最终目标是将 RGBA vector 乘以相应的颜色转换矩阵:

[ 66 129  25 0]   [R]
[-38 -74 112 0] * [G]
[112 -94 -18 0]   [B]
[0     0   0 0]   [A]

谢谢

编辑 1:这是最后一个函数,使用浮点计算以获得更高的颜色精度,它使用 SSE 将 rgba 图像转换为 YUV444 图像。在英特尔 i5 3570k 上转换全高清图像的函数需要 1.9 到 3.5 毫秒,仅使用一个线程(将此函数线程化真的很容易,并且可以显着提高性能):

void SSE_rgba2YUV444_FP(char* a, char* y, char* u, char* v)
{
    __m128i mask = _mm_setr_epi8(0x00,0x04,0x08,0x0c, 0x01,0x05,0x09,0x0d, 0x02,0x06,0x0a,0x0e, 0x03,0x07,0x0b,0x0f); // Masque de mélange, chaque uint8 donne la position à donner (en offset en octet) du uint8 correspondant
    float m[9] = {0.299, 0.587, 0.114, -0.1687, -0.3313, 0.5, 0.5, -0.4187, -0.0813};                                                         // Dans le __m128i que l'on mélange

    __m128i row[4];
    for(int i=0; i<4; i++) {
        row[i] = _mm_loadu_si128((__m128i*)&a[16*i]);
        row[i] = _mm_shuffle_epi8(row[i],mask);
    }
    // row[i] = {rrrrggggbbbbaaaa} tous en uint8t
    __m128i t0 = _mm_unpacklo_epi32(row[0], row[1]); //to = {rrrrrrrrgggggggg}
    __m128i t1 = _mm_unpacklo_epi32(row[2], row[3]); //t1 = {rrrrrrrrgggggggg}
    __m128i t2 = _mm_unpackhi_epi32(row[0], row[1]); //t2 = {bbbbbbbbaaaaaaaa}
    __m128i t3 = _mm_unpackhi_epi32(row[2], row[3]); //t3 = {bbbbbbbbaaaaaaaa}
    row[0] = _mm_unpacklo_epi64(t0, t1); // row[0] = {rrrrrrrrrrrrrrrr}
    row[1] = _mm_unpackhi_epi64(t0, t1); // etc
    row[2] = _mm_unpacklo_epi64(t2, t3);

    __m128i v_lo[3], v_hi[3];
    for(int i=0; i<3; i++) {
        v_lo[i] = _mm_unpacklo_epi8(row[i],_mm_setzero_si128()); // On entrelace chaque row avec des 0, ce qui fait passer les valeurs
        v_hi[i] = _mm_unpackhi_epi8(row[i],_mm_setzero_si128()); // de 8bits à 16bits pour pouvoir travailler dessus
    }

    __m128 v32_lo1[3], v32_hi1[3], v32_lo2[3], v32_hi2[3];
    for(int i=0; i<3; i++) {
        v32_lo1[i] = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_lo[i],_mm_setzero_si128()));
        v32_lo2[i] = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_lo[i],_mm_setzero_si128()));
        v32_hi1[i] = _mm_cvtepi32_ps(_mm_unpacklo_epi16(v_hi[i],_mm_setzero_si128()));
        v32_hi2[i] = _mm_cvtepi32_ps(_mm_unpackhi_epi16(v_hi[i],_mm_setzero_si128()));
    } // On a nos rgb sur 32 bits

    __m128i yuv[3]; // {Y, U, V} 
    __m128 ylo1 = _mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[2]))));
    __m128 ylo2 = _mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[2]))));
    __m128 yhi1 = _mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[2]))));
    __m128 yhi2 = _mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[0])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[1])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[2]))));

    __m128i ylo1i = _mm_cvtps_epi32(ylo1);
    __m128i ylo2i = _mm_cvtps_epi32(ylo2);
    __m128i yhi1i = _mm_cvtps_epi32(yhi1);
    __m128i yhi2i = _mm_cvtps_epi32(yhi2);

    __m128i ylo = _mm_packus_epi32(ylo1i, ylo2i);
    __m128i yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[0] = _mm_packus_epi16(ylo, yhi);

    ylo1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    ylo2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    yhi1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));
    yhi2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[3])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[4])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[5])))), _mm_set1_ps(128.0f));

    ylo1i = _mm_cvtps_epi32(ylo1);
    ylo2i = _mm_cvtps_epi32(ylo2);
    yhi1i = _mm_cvtps_epi32(yhi1);
    yhi2i = _mm_cvtps_epi32(yhi2);

    ylo = _mm_packus_epi32(ylo1i, ylo2i);
    yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[1] = _mm_packus_epi16(ylo, yhi);

    ylo1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo1[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_lo1[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_lo1[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    ylo2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_lo2[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_lo2[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_lo2[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    yhi1 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi1[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_hi1[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_hi1[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));
    yhi2 = _mm_add_ps(_mm_add_ps(_mm_mul_ps(v32_hi2[0], _mm_set1_ps(m[6])), _mm_add_ps(_mm_mul_ps(v32_hi2[1], _mm_set1_ps(m[7])), _mm_mul_ps(v32_hi2[2], _mm_set1_ps(m[8])))), _mm_set1_ps(128.0f));

    ylo1i = _mm_cvtps_epi32(ylo1);
    ylo2i = _mm_cvtps_epi32(ylo2);
    yhi1i = _mm_cvtps_epi32(yhi1);
    yhi2i = _mm_cvtps_epi32(yhi2);

    ylo = _mm_packus_epi32(ylo1i, ylo2i);
    yhi = _mm_packus_epi32(yhi1i, yhi2i);

    yuv[2] = _mm_packus_epi16(ylo, yhi);

    _mm_storeu_si128((__m128i*)y,yuv[0]);
    _mm_storeu_si128((__m128i*)u,yuv[1]);
    _mm_storeu_si128((__m128i*)v,yuv[2]);
}

最佳答案

这是一个同时找到 Y、U 和 V 并且只使用垂直运算符的解决方案

为此,我首先像这样转置四个像素

rgbargbargbargba -> rrrrggggbbbbaaaa

使用带有掩码的内在 _mm_shuffle_epi8。我这样做到 16 像素,然后再次转置它们

来自

row[0] : rrrrggggbbbbaaaa
row[1] : rrrrggggbbbbaaaa
row[2] : rrrrggggbbbbaaaa
ro2[3] : rrrrggggbbbbaaaa

row[0] : rrrrrrrrrrrrrrrr    
row[1] : gggggggggggggggg    
row[2] : bbbbbbbbbbbbbbbb

这与转置 4x4 整数矩阵的方式相同:

__m128i t0 = _mm_unpacklo_epi32(row[0], row[1]);
__m128i t1 = _mm_unpacklo_epi32(row[2], row[3]);
__m128i t2 = _mm_unpackhi_epi32(row[0], row[1]);
__m128i t3 = _mm_unpackhi_epi32(row[2], row[3]);
row[0] = _mm_unpacklo_epi64(t0, t1);
row[1] = _mm_unpackhi_epi64(t0, t1);
row[2] = _mm_unpacklo_epi64(t2, t3);

现在我将每一行分成高位和低位,并像这样扩展到 16 位

__m128i v_lo[3], v_hi[3];
for(int i=0; i<3; i++) {
    v_lo[i] = _mm_unpacklo_epi8(row[i],_mm_setzero_si128());
    v_hi[i] = _mm_unpackhi_epi8(row[i],_mm_setzero_si128());
}

最后,我像这样计算 Y、U 和 V:

 short m[9] = {66, 129, 25, -38, -74, 112, 112, -94, -18};
__m128i yuv[3];
for(int i=0; i<3; i++) {
    __m128i yuv_lo, yuv_hi;
    yuv_lo = _mm_add_epi16(_mm_add_epi16(
                   _mm_mullo_epi16(v_lo[0], _mm_set1_epi16(m[3*i+0])),
                   _mm_mullo_epi16(v_lo[1], _mm_set1_epi16(m[3*i+1]))),
                   _mm_mullo_epi16(v_lo[2], _mm_set1_epi16(m[3*i+2])));
    yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(128));
    yuv_lo = _mm_srli_epi16(yuv_lo, 8);
    yuv_lo = _mm_add_epi16(yuv_lo, _mm_set1_epi16(16));

    yuv_hi = _mm_add_epi16(_mm_add_epi16(
                   _mm_mullo_epi16(v_hi[0], _mm_set1_epi16(m[3*i+0])),
                   _mm_mullo_epi16(v_hi[1], _mm_set1_epi16(m[3*i+1]))),
                   _mm_mullo_epi16(v_hi[2], _mm_set1_epi16(m[3*i+2])));
    yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(128));
    yuv_hi = _mm_srli_epi16(yuv_hi, 8);
    yuv_hi = _mm_add_epi16(yuv_hi, _mm_set1_epi16(16));

    yuv[i] = _mm_packus_epi16(yuv_lo,yuv_hi);
}

有关此代码的工作示例,请参阅我的第一个答案和函数 rgba2yuv_SSE

关于c - 英特尔内部函数 : multiply interleaved 8bit values,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/26096265/

相关文章:

c - 使用 pthreads 互斥量和条件变量

c - 结构中的无符号数据类型

arm - 现代处理器中 L1 缓存大小的原因

docker - Docker Swarm通用设备资源连接

c - 如何忽略或跳过数组中的某些字符并将数组的其余部分存储在 C 中?

c - 解释 Unix 中程序的文字运行时间

英特尔 HAXM API 可以在 QEMU 之外使用吗?

performance - sse 指令是否消耗更多功率/能量?

linux-kernel - Linux 内核是否有自己的 SSE/AVX 上下文?

c - SSE指令MOVSD(扩展:x86,x86-64上的浮点标量和 vector 运算)