mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-11-27 19:24:06 +00:00
Improve intra SAD AVX2 intrinsics.
- Moved implementations for different sizes to inline functions that are defined using each other, reducing the amount of redundant code. - Performance of sad_8bit_32x32_avx2 improved by about 10% due to unrolling of the loop.
This commit is contained in:
parent
9f5bcf45eb
commit
669e99dd7f
|
@ -28,20 +28,66 @@
|
|||
# include <immintrin.h>
|
||||
|
||||
|
||||
static unsigned sad_8bit_8x8_avx2(const pixel *buf1, const pixel *buf2)
|
||||
/**
|
||||
* \brief Calculate SAD for 8x8 bytes in continuous memory.
|
||||
*/
|
||||
static INLINE __m256i inline_8bit_sad_8x8_avx2(const __m256i *const a, const __m256i *const b)
|
||||
{
|
||||
__m256i sum;
|
||||
{
|
||||
// Get SADs for 8x8 pixels and add the results hierarchically into sum0.
|
||||
const __m256i *const a = (const __m256i *)buf1;
|
||||
const __m256i *const b = (const __m256i *)buf2;
|
||||
__m256i sum0, sum1;
|
||||
sum0 = _mm256_sad_epu8(_mm256_load_si256(a + 0), _mm256_load_si256(b + 0));
|
||||
sum1 = _mm256_sad_epu8(_mm256_load_si256(a + 1), _mm256_load_si256(b + 1));
|
||||
|
||||
__m256i sum0, sum1;
|
||||
sum0 = _mm256_sad_epu8(_mm256_load_si256(a + 0), _mm256_load_si256(b + 0));
|
||||
sum1 = _mm256_sad_epu8(_mm256_load_si256(a + 1), _mm256_load_si256(b + 1));
|
||||
sum = _mm256_add_epi32(sum0, sum1);
|
||||
}
|
||||
return _mm256_add_epi32(sum0, sum1);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \brief Calculate SAD for 16x16 bytes in continuous memory.
|
||||
*/
|
||||
static INLINE __m256i inline_8bit_sad_16x16_avx2(const __m256i *const a, const __m256i *const b)
|
||||
{
|
||||
const unsigned size_of_8x8 = 8 * 8 / sizeof(__m256i);
|
||||
|
||||
// Calculate in 4 chunks of 16x4.
|
||||
__m256i sum0, sum1, sum2, sum3;
|
||||
sum0 = inline_8bit_sad_8x8_avx2(a + 0 * size_of_8x8, b + 0 * size_of_8x8);
|
||||
sum1 = inline_8bit_sad_8x8_avx2(a + 1 * size_of_8x8, b + 1 * size_of_8x8);
|
||||
sum2 = inline_8bit_sad_8x8_avx2(a + 2 * size_of_8x8, b + 2 * size_of_8x8);
|
||||
sum3 = inline_8bit_sad_8x8_avx2(a + 3 * size_of_8x8, b + 3 * size_of_8x8);
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum1);
|
||||
sum2 = _mm256_add_epi32(sum2, sum3);
|
||||
|
||||
return _mm256_add_epi32(sum0, sum2);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \brief Calculate SAD for 32x32 bytes in continuous memory.
|
||||
*/
|
||||
static INLINE __m256i inline_8bit_sad_32x32_avx2(const __m256i *const a, const __m256i *const b)
|
||||
{
|
||||
const unsigned size_of_16x16 = 16 * 16 / sizeof(__m256i);
|
||||
|
||||
// Calculate in 4 chunks of 32x8.
|
||||
__m256i sum0, sum1, sum2, sum3;
|
||||
sum0 = inline_8bit_sad_16x16_avx2(a + 0 * size_of_16x16, b + 0 * size_of_16x16);
|
||||
sum1 = inline_8bit_sad_16x16_avx2(a + 1 * size_of_16x16, b + 1 * size_of_16x16);
|
||||
sum2 = inline_8bit_sad_16x16_avx2(a + 2 * size_of_16x16, b + 2 * size_of_16x16);
|
||||
sum3 = inline_8bit_sad_16x16_avx2(a + 3 * size_of_16x16, b + 3 * size_of_16x16);
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum1);
|
||||
sum2 = _mm256_add_epi32(sum2, sum3);
|
||||
|
||||
return _mm256_add_epi32(sum0, sum2);
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* \brief Get sum of the low 32 bits of four 64 bit numbers from __m256i as uint32_t.
|
||||
*/
|
||||
static INLINE uint32_t m256i_horizontal_sum(const __m256i sum)
|
||||
{
|
||||
// Add the high 128 bits to low 128 bits.
|
||||
__m128i mm128_result = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extractf128_si256(sum, 1));
|
||||
// Add the high 64 bits to low 64 bits.
|
||||
|
@ -51,87 +97,42 @@ static unsigned sad_8bit_8x8_avx2(const pixel *buf1, const pixel *buf2)
|
|||
}
|
||||
|
||||
|
||||
static unsigned sad_8bit_8x8_avx2(const pixel *buf1, const pixel *buf2)
|
||||
{
|
||||
const __m256i *const a = (const __m256i *)buf1;
|
||||
const __m256i *const b = (const __m256i *)buf2;
|
||||
__m256i sum = inline_8bit_sad_8x8_avx2(a, b);
|
||||
|
||||
return m256i_horizontal_sum(sum);
|
||||
}
|
||||
|
||||
|
||||
static unsigned sad_8bit_16x16_avx2(const pixel *buf1, const pixel *buf2)
|
||||
{
|
||||
__m256i sum;
|
||||
{
|
||||
// Get SADs for 16x16 pixels and add the results hierarchically into sum.
|
||||
const __m256i *const a = (const __m256i *)buf1;
|
||||
const __m256i *const b = (const __m256i *)buf2;
|
||||
const __m256i *const a = (const __m256i *)buf1;
|
||||
const __m256i *const b = (const __m256i *)buf2;
|
||||
__m256i sum = inline_8bit_sad_16x16_avx2(a, b);
|
||||
|
||||
__m256i sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
|
||||
sum0 = _mm256_sad_epu8(_mm256_load_si256(a + 0), _mm256_load_si256(b + 0));
|
||||
sum1 = _mm256_sad_epu8(_mm256_load_si256(a + 1), _mm256_load_si256(b + 1));
|
||||
sum2 = _mm256_sad_epu8(_mm256_load_si256(a + 2), _mm256_load_si256(b + 2));
|
||||
sum3 = _mm256_sad_epu8(_mm256_load_si256(a + 3), _mm256_load_si256(b + 3));
|
||||
sum4 = _mm256_sad_epu8(_mm256_load_si256(a + 4), _mm256_load_si256(b + 4));
|
||||
sum5 = _mm256_sad_epu8(_mm256_load_si256(a + 5), _mm256_load_si256(b + 5));
|
||||
sum6 = _mm256_sad_epu8(_mm256_load_si256(a + 6), _mm256_load_si256(b + 6));
|
||||
sum7 = _mm256_sad_epu8(_mm256_load_si256(a + 7), _mm256_load_si256(b + 7));
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum1);
|
||||
sum2 = _mm256_add_epi32(sum2, sum3);
|
||||
sum4 = _mm256_add_epi32(sum4, sum5);
|
||||
sum6 = _mm256_add_epi32(sum6, sum7);
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum2);
|
||||
sum4 = _mm256_add_epi32(sum4, sum6);
|
||||
|
||||
sum = _mm256_add_epi32(sum0, sum4);
|
||||
}
|
||||
|
||||
// Add the high 128 bits to low 128 bits.
|
||||
__m128i mm128_result = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extractf128_si256(sum, 1));
|
||||
// Add the high 64 bits to low 64 bits.
|
||||
uint32_t result[4];
|
||||
_mm_storeu_si128((__m128i*)result, mm128_result);
|
||||
return result[0] + result[2];
|
||||
return m256i_horizontal_sum(sum);
|
||||
}
|
||||
|
||||
|
||||
static unsigned sad_8bit_32x32_avx2(const pixel *buf1, const pixel *buf2)
|
||||
{
|
||||
// Do 32x32 in 4 blocks.
|
||||
__m256i sum = _mm256_setzero_si256();
|
||||
for (int i = 0; i < 32; i += 8) {
|
||||
// Get SADs for 32x8 pixels and add the results hierarchically into sum.
|
||||
const __m256i *const a = (const __m256i *)buf1 + i;
|
||||
const __m256i *const b = (const __m256i *)buf2 + i;
|
||||
const __m256i *const a = (const __m256i *)buf1;
|
||||
const __m256i *const b = (const __m256i *)buf2;
|
||||
|
||||
__m256i sum0, sum1, sum2, sum3, sum4, sum5, sum6, sum7;
|
||||
sum0 = _mm256_sad_epu8(_mm256_load_si256(a + 0), _mm256_load_si256(b + 0));
|
||||
sum1 = _mm256_sad_epu8(_mm256_load_si256(a + 1), _mm256_load_si256(b + 1));
|
||||
sum2 = _mm256_sad_epu8(_mm256_load_si256(a + 2), _mm256_load_si256(b + 2));
|
||||
sum3 = _mm256_sad_epu8(_mm256_load_si256(a + 3), _mm256_load_si256(b + 3));
|
||||
sum4 = _mm256_sad_epu8(_mm256_load_si256(a + 4), _mm256_load_si256(b + 4));
|
||||
sum5 = _mm256_sad_epu8(_mm256_load_si256(a + 5), _mm256_load_si256(b + 5));
|
||||
sum6 = _mm256_sad_epu8(_mm256_load_si256(a + 6), _mm256_load_si256(b + 6));
|
||||
sum7 = _mm256_sad_epu8(_mm256_load_si256(a + 7), _mm256_load_si256(b + 7));
|
||||
__m256i sum = inline_8bit_sad_32x32_avx2(a, b);
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum1);
|
||||
sum2 = _mm256_add_epi32(sum2, sum3);
|
||||
sum4 = _mm256_add_epi32(sum4, sum5);
|
||||
sum6 = _mm256_add_epi32(sum6, sum7);
|
||||
|
||||
sum0 = _mm256_add_epi32(sum0, sum2);
|
||||
sum4 = _mm256_add_epi32(sum4, sum6);
|
||||
|
||||
sum = _mm256_add_epi32(sum, sum0);
|
||||
sum = _mm256_add_epi32(sum, sum4);
|
||||
}
|
||||
|
||||
// Add the high 128 bits to low 128 bits.
|
||||
__m128i mm128_result = _mm_add_epi32(_mm256_castsi256_si128(sum), _mm256_extractf128_si256(sum, 1));
|
||||
// Add the high 64 bits to low 64 bits.
|
||||
uint32_t result[4];
|
||||
_mm_storeu_si128((__m128i*)result, mm128_result);
|
||||
return result[0] + result[2];
|
||||
return m256i_horizontal_sum(sum);
|
||||
}
|
||||
|
||||
|
||||
#endif //COMPILE_INTEL_AVX2
|
||||
|
||||
|
||||
int strategy_register_picture_avx2(void* opaque) {
|
||||
int strategy_register_picture_avx2(void* opaque)
|
||||
{
|
||||
bool success = true;
|
||||
#if COMPILE_INTEL_AVX2
|
||||
success &= strategyselector_register(opaque, "sad_8bit_8x8", "avx2", 40, &sad_8bit_8x8_avx2);
|
||||
|
|
Loading…
Reference in a new issue