AVX2-ize SAD calculation

Performance is no better than SSE though
This commit is contained in:
Pauli Oikkonen 2019-01-04 15:18:33 +02:00
parent ab3dc58df6
commit 7585f79a71

View file

@ -34,61 +34,73 @@
#include "strategyselector.h" #include "strategyselector.h"
#include "strategies/generic/picture-generic.h" #include "strategies/generic/picture-generic.h"
unsigned kvz_reg_sad_avx2(const kvz_pixel * const data1, const kvz_pixel * const data2, /**
* \brief Calculate Sum of Absolute Differences (SAD)
*
* Calculate Sum of Absolute Differences (SAD) between two rectangular regions
* located in arbitrary points in the picture.
*
* \param data1 Starting point of the first picture.
* \param data2 Starting point of the second picture.
* \param width Width of the region for which SAD is calculated.
* \param height Height of the region for which SAD is calculated.
* \param stride Width of the pixel array.
*
* \returns Sum of Absolute Differences
*/
uint32_t kvz_reg_sad_avx2(const kvz_pixel * const data1, const kvz_pixel * const data2,
const int width, const int height, const unsigned stride1, const unsigned stride2) const int width, const int height, const unsigned stride1, const unsigned stride2)
{ {
int y, x; int32_t y, x;
unsigned sad = 0; uint32_t sad = 0;
__m128i sse_inc = _mm_setzero_si128 (); __m256i avx_inc = _mm256_setzero_si256();
long long int sse_inc_array[2];
// 256-bit blocks, bytes after them, 32-bit blocks after the large blocks
const int largeblock_bytes = width & ~31;
const int any_residuals = width & 31;
const int residual_128bs = any_residuals >> 4;
const int residual_dwords = any_residuals >> 2;
const __m256i ns = _mm256_setr_epi32 (0, 1, 2, 3, 4, 5, 6, 7);
const __m256i rds = _mm256_set1_epi32 (residual_dwords);
const __m256i rdmask = _mm256_cmpgt_epi32(rds, ns);
for (y = 0; y < height; ++y) { for (y = 0; y < height; ++y) {
for (x = 0; x <= width-16; x+=16) {
const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); for (x = 0; x < largeblock_bytes; x += 32) {
const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); __m256i a = _mm256_loadu_si256((const __m256i *)(data1 + (y * stride1 + x)));
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a,b)); __m256i b = _mm256_loadu_si256((const __m256i *)(data2 + (y * stride2 + x)));
__m256i curr_sads = _mm256_sad_epu8(a, b);
avx_inc = _mm256_add_epi64(avx_inc, curr_sads);
} }
{ /*
const __m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]); * If there are no residual values, it does not matter what bogus values
const __m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]); * we use here since it will be masked away anyway
switch (((width - (width%2)) - x)/2) { */
case 0: if (any_residuals) {
break; __m256i a = _mm256_loadu_si256((const __m256i *)(data1 + (y * stride1 + x)));
case 1: __m256i b = _mm256_loadu_si256((const __m256i *)(data2 + (y * stride2 + x)));
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x01)));
break;
case 2:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x03)));
break;
case 3:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x07)));
break;
case 4:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x0f)));
break;
case 5:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x1f)));
break;
case 6:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x3f)));
break;
case 7:
sse_inc = _mm_add_epi32(sse_inc, _mm_sad_epu8(a, _mm_blend_epi16(a, b, 0x7f)));
break;
default:
//Should not happen
assert(0);
}
x = (width - (width%2));
}
for (; x < width; ++x) { __m256i b_masked = _mm256_blendv_epi8(a, b, rdmask);
sad += abs(data1[y * stride1 + x] - data2[y * stride2 + x]); __m256i curr_sads = _mm256_sad_epu8 (a, b_masked);
avx_inc = _mm256_add_epi64(avx_inc, curr_sads);
x = width & ~(uint32_t)3;
for (; x < width; x++)
sad += abs(data1[y * stride1 + x] - data2[y * stride2 + x]);
} }
} }
_mm_storeu_si128((__m128i*) sse_inc_array, sse_inc); __m256i avx_inc_2 = _mm256_permute4x64_epi64(avx_inc, _MM_SHUFFLE(1, 0, 3, 2));
sad += sse_inc_array[0] + sse_inc_array[1]; __m256i avx_inc_3 = _mm256_add_epi64 (avx_inc, avx_inc_2);
__m256i avx_inc_4 = _mm256_shuffle_epi32 (avx_inc_3, _MM_SHUFFLE(1, 0, 3, 2));
__m256i avx_inc_5 = _mm256_add_epi64 (avx_inc_3, avx_inc_4);
// 32 bits should always be enough for even the largest blocks with a SAD of
// 255 in each pixel, even though the SAD results themselves are 64 bits
__m128i avx_inc_128 = _mm256_castsi256_si128(avx_inc_5);
sad += _mm_cvtsi128_si32(avx_inc_128);
return sad; return sad;
} }