mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-12-18 03:04:06 +00:00
Optimize SSE4.1 version of SAD
Make it use the same vblend trick as AVX2. Interestingly, on my test setup this seems to be faster than the same code using 256-bit AVX vectors.
This commit is contained in:
parent
887d7700a8
commit
b2176bf72a
|
@ -32,59 +32,38 @@ unsigned kvz_reg_sad_sse41(const kvz_pixel * const data1, const kvz_pixel * cons
|
||||||
const int width, const int height, const unsigned stride1, const unsigned stride2)
|
const int width, const int height, const unsigned stride1, const unsigned stride2)
|
||||||
{
|
{
|
||||||
int y, x;
|
int y, x;
|
||||||
unsigned sad = 0;
|
__m128i sse_inc = _mm_setzero_si128();
|
||||||
__m128i sse_inc = _mm_setzero_si128 ();
|
|
||||||
long long int sse_inc_array[2];
|
|
||||||
|
|
||||||
|
// Bytes in block in 128-bit blocks per each scanline, and remainder
|
||||||
|
const int largeblock_bytes = width & ~15;
|
||||||
|
const int residual_bytes = width & 15;
|
||||||
|
|
||||||
|
const __m128i rds = _mm_set1_epi8 (residual_bytes);
|
||||||
|
const __m128i ns = _mm_setr_epi8 (0, 1, 2, 3, 4, 5, 6, 7,
|
||||||
|
8, 9, 10, 11, 12, 13, 14, 15);
|
||||||
|
const __m128i rdmask = _mm_cmpgt_epi8(rds, ns);
|
||||||
|
|
||||||
for (y = 0; y < height; ++y) {
|
for (y = 0; y < height; ++y) {
|
||||||
for (x = 0; x <= width-16; x+=16) {
|
for (x = 0; x < largeblock_bytes; x += 16) {
|
||||||
const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
__m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
||||||
const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
__m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a,b));
|
__m128i curr_sads = _mm_sad_epu8(a, b);
|
||||||
|
sse_inc = _mm_add_epi32(sse_inc, curr_sads);
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
if (residual_bytes) {
|
||||||
const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
__m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
||||||
const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
__m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
||||||
switch (((width - (width%2)) - x)/2) {
|
|
||||||
case 0:
|
|
||||||
break;
|
|
||||||
case 1:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x01)));
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x03)));
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x07)));
|
|
||||||
break;
|
|
||||||
case 4:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x0f)));
|
|
||||||
break;
|
|
||||||
case 5:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x1f)));
|
|
||||||
break;
|
|
||||||
case 6:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x3f)));
|
|
||||||
break;
|
|
||||||
case 7:
|
|
||||||
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x7f)));
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
//Should not happen
|
|
||||||
assert(0);
|
|
||||||
}
|
|
||||||
x = (width - (width%2));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (; x < width; ++x) {
|
__m128i b_masked = _mm_blendv_epi8(a, b, rdmask);
|
||||||
sad += abs(data1[y * stride1 + x] - data2[y * stride2 + x]);
|
__m128i curr_sads = _mm_sad_epu8(a, b_masked);
|
||||||
|
sse_inc = _mm_add_epi32(sse_inc, curr_sads);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
_mm_storeu_si128((__m128i*) sse_inc_array, sse_inc);
|
__m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2));
|
||||||
sad += sse_inc_array[0] + sse_inc_array[1];
|
__m128i sad = _mm_add_epi64 (sse_inc, sse_inc_2);
|
||||||
|
|
||||||
return sad;
|
return _mm_cvtsi128_si32(sad);
|
||||||
}
|
}
|
||||||
|
|
||||||
#endif //COMPILE_INTEL_SSE41
|
#endif //COMPILE_INTEL_SSE41
|
||||||
|
|
Loading…
Reference in a new issue