From b2176bf72a1ba6c3192a8bc5d5ac7d70e7b0ffde Mon Sep 17 00:00:00 2001 From: Pauli Oikkonen Date: Mon, 7 Jan 2019 19:28:10 +0200 Subject: [PATCH] Optimize SSE4.1 version of SAD Make it use the same vblend trick as AVX2. Interestingly, on my test setup this seems to be faster than the same code using 256-bit AVX vectors. --- src/strategies/sse41/picture-sse41.c | 69 ++++++++++------------------ 1 file changed, 24 insertions(+), 45 deletions(-) diff --git a/src/strategies/sse41/picture-sse41.c b/src/strategies/sse41/picture-sse41.c index d75b4d58..cba7c29a 100644 --- a/src/strategies/sse41/picture-sse41.c +++ b/src/strategies/sse41/picture-sse41.c @@ -32,59 +32,38 @@ unsigned kvz_reg_sad_sse41(const kvz_pixel * const data1, const kvz_pixel * cons const int width, const int height, const unsigned stride1, const unsigned stride2) { int y, x; - unsigned sad = 0; - __m128i sse_inc = _mm_setzero_si128 (); - long long int sse_inc_array[2]; + __m128i sse_inc = _mm_setzero_si128(); + // Bytes in block in 128-bit blocks per each scanline, and remainder + const int largeblock_bytes = width & ~15; + const int residual_bytes = width & 15; + + const __m128i rds = _mm_set1_epi8 (residual_bytes); + const __m128i ns = _mm_setr_epi8 (0, 1, 2, 3, 4, 5, 6, 7, + 8, 9, 10, 11, 12, 13, 14, 15); + const __m128i rdmask = _mm_cmpgt_epi8(rds, ns); + for (y = 0; y < height; ++y) { - for (x = 0; x <= width-16; x+=16) { - const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); - const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a,b)); + for (x = 0; x < largeblock_bytes; x += 16) { + __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); + __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); + __m128i curr_sads = _mm_sad_epu8(a, b); + sse_inc = _mm_add_epi32(sse_inc, curr_sads); } - { - const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); - const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); - switch (((width - (width%2)) - x)/2) { - case 0: - break; - case 1: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x01))); - break; - case 2: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x03))); - break; - case 3: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x07))); - break; - case 4: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x0f))); - break; - case 5: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x1f))); - break; - case 6: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x3f))); - break; - case 7: - sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x7f))); - break; - default: - //Should not happen - assert(0); - } - x = (width - (width%2)); - } + if (residual_bytes) { + __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); + __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); - for (; x < width; ++x) { - sad += abs(data1[y * stride1 + x] - data2[y * stride2 + x]); + __m128i b_masked = _mm_blendv_epi8(a, b, rdmask); + __m128i curr_sads = _mm_sad_epu8(a, b_masked); + sse_inc = _mm_add_epi32(sse_inc, curr_sads); } } - _mm_storeu_si128((__m128i*) sse_inc_array, sse_inc); - sad += sse_inc_array[0] + sse_inc_array[1]; + __m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2)); + __m128i sad = _mm_add_epi64 (sse_inc, sse_inc_2); - return sad; + return _mm_cvtsi128_si32(sad); } #endif //COMPILE_INTEL_SSE41