mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-11-24 02:24:07 +00:00
Remove 16-pixel wide AVX2 SAD implementation
At least on Skylake, it's noticeably slower than the very simple version using SSE4.1
This commit is contained in:
parent
4cb371184b
commit
6504145cce
|
@ -61,7 +61,7 @@ uint32_t kvz_reg_sad_avx2(const kvz_pixel * const data1, const kvz_pixel * const
|
|||
if (width == 12)
|
||||
return reg_sad_w12(data1, data2, height, stride1, stride2);
|
||||
if (width == 16)
|
||||
return reg_sad_w16_avx2(data1, data2, height, stride1, stride2);
|
||||
return reg_sad_w16(data1, data2, height, stride1, stride2);
|
||||
if (width == 24)
|
||||
return reg_sad_w24(data1, data2, height, stride1, stride2);
|
||||
if (width == 32)
|
||||
|
|
|
@ -4,44 +4,6 @@
|
|||
#include "strategies/sse41/reg_sad_pow2_widths-sse41.h"
|
||||
#include "kvazaar.h"
|
||||
|
||||
static uint32_t reg_sad_w16_avx2(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
||||
const int32_t height, const uint32_t stride1,
|
||||
const uint32_t stride2)
|
||||
{
|
||||
__m256i avx_inc = _mm256_setzero_si256();
|
||||
int32_t y;
|
||||
|
||||
const int32_t height_ymm_bytes = height & ~1;
|
||||
const int32_t height_parity = height & 1;
|
||||
|
||||
for (y = 0; y < height_ymm_bytes; y += 2) {
|
||||
__m128i a_up = _mm_loadu_si128((const __m128i *)(data1 + (y + 0) * stride1));
|
||||
__m128i b_up = _mm_loadu_si128((const __m128i *)(data2 + (y + 0) * stride2));
|
||||
__m128i a_dn = _mm_loadu_si128((const __m128i *)(data1 + (y + 1) * stride1));
|
||||
__m128i b_dn = _mm_loadu_si128((const __m128i *)(data2 + (y + 1) * stride2));
|
||||
|
||||
__m256i a = _mm256_inserti128_si256(_mm256_castsi128_si256(a_up), a_dn, 1);
|
||||
__m256i b = _mm256_inserti128_si256(_mm256_castsi128_si256(b_up), b_dn, 1);
|
||||
|
||||
__m256i curr_sads = _mm256_sad_epu8(a, b);
|
||||
avx_inc = _mm256_add_epi64(avx_inc, curr_sads);
|
||||
}
|
||||
__m128i inchi = _mm256_extracti128_si256(avx_inc, 1);
|
||||
__m128i inclo = _mm256_castsi256_si128 (avx_inc);
|
||||
|
||||
if (height_parity) {
|
||||
__m128i a = _mm_loadu_si128 ((__m128i *)(data1 + y * stride1));
|
||||
__m128i b = _mm_loadu_si128 ((__m128i *)(data2 + y * stride2));
|
||||
__m128i sads = _mm_sad_epu8 (a, b);
|
||||
inclo = _mm_add_epi64(inclo, sads);
|
||||
}
|
||||
__m128i sum_1 = _mm_add_epi64 (inclo, inchi);
|
||||
__m128i sum_2 = _mm_shuffle_epi32(sum_1, _MM_SHUFFLE(1, 0, 3, 2));
|
||||
__m128i sad = _mm_add_epi64 (sum_1, sum_2);
|
||||
|
||||
return _mm_cvtsi128_si32(sad);
|
||||
}
|
||||
|
||||
static uint32_t reg_sad_w32(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
||||
const int32_t height, const uint32_t stride1,
|
||||
const uint32_t stride2)
|
||||
|
|
Loading…
Reference in a new issue