2014-04-28 12:20:49 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* This file is part of Kvazaar HEVC encoder.
|
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
2014-04-28 12:20:49 +00:00
|
|
|
* COPYING file).
|
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is free software: you can redistribute it and/or modify it under
|
|
|
|
* the terms of the GNU Lesser General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2.1 of the License, or (at your
|
|
|
|
* option) any later version.
|
2014-04-28 12:20:49 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
|
|
|
* more details.
|
2014-04-28 12:20:49 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
2014-04-28 12:20:49 +00:00
|
|
|
****************************************************************************/
|
|
|
|
|
2016-03-31 10:34:32 +00:00
|
|
|
#include "strategies/sse41/picture-sse41.h"
|
2014-07-11 14:16:32 +00:00
|
|
|
|
|
|
|
#if COMPILE_INTEL_SSE41
|
2016-04-01 14:14:23 +00:00
|
|
|
#include <immintrin.h>
|
|
|
|
#include <stdlib.h>
|
|
|
|
|
|
|
|
#include "kvazaar.h"
|
|
|
|
#include "strategyselector.h"
|
2014-07-11 14:16:32 +00:00
|
|
|
|
2019-01-15 17:32:25 +00:00
|
|
|
static uint32_t reg_sad_w8(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
|
|
|
const int32_t height, const uint32_t stride1,
|
2019-01-10 12:37:04 +00:00
|
|
|
const uint32_t stride2)
|
2019-01-15 17:32:25 +00:00
|
|
|
{
|
|
|
|
__m128i sse_inc = _mm_setzero_si128();
|
|
|
|
uint64_t result = 0;
|
|
|
|
int32_t y;
|
|
|
|
|
|
|
|
const int32_t height_xmm_bytes = height & ~1;
|
|
|
|
const int32_t height_parity = height & 1;
|
|
|
|
|
|
|
|
for (y = 0; y < height_xmm_bytes; y += 2) {
|
|
|
|
__m128d a_d = _mm_setzero_pd();
|
|
|
|
__m128d b_d = _mm_setzero_pd();
|
|
|
|
|
|
|
|
a_d = _mm_loadl_pd(a_d, (const double *)(data1 + (y + 0) * stride1));
|
|
|
|
b_d = _mm_loadl_pd(b_d, (const double *)(data2 + (y + 0) * stride2));
|
|
|
|
a_d = _mm_loadh_pd(a_d, (const double *)(data1 + (y + 1) * stride1));
|
|
|
|
b_d = _mm_loadh_pd(b_d, (const double *)(data2 + (y + 1) * stride2));
|
|
|
|
|
|
|
|
__m128i a = _mm_castpd_si128(a_d);
|
|
|
|
__m128i b = _mm_castpd_si128(b_d);
|
|
|
|
|
|
|
|
__m128i curr_sads = _mm_sad_epu8(a, b);
|
|
|
|
sse_inc = _mm_add_epi64(sse_inc, curr_sads);
|
|
|
|
}
|
|
|
|
if (height_parity) {
|
|
|
|
__m64 a = *(__m64 *)(data1 + y * stride1);
|
|
|
|
__m64 b = *(__m64 *)(data2 + y * stride2);
|
|
|
|
__m64 sads = _mm_sad_pu8(a, b);
|
|
|
|
result = (uint64_t)sads;
|
|
|
|
}
|
|
|
|
__m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2));
|
|
|
|
__m128i sad = _mm_add_epi64 (sse_inc, sse_inc_2);
|
|
|
|
|
|
|
|
result += _mm_cvtsi128_si32(sad);
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t reg_sad_w16(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
|
|
|
const int32_t height, const uint32_t stride1,
|
|
|
|
const uint32_t stride2)
|
|
|
|
{
|
|
|
|
__m128i sse_inc = _mm_setzero_si128();
|
|
|
|
int32_t y;
|
|
|
|
for (y = 0; y < height; y++) {
|
|
|
|
__m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1]);
|
|
|
|
__m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2]);
|
|
|
|
__m128i curr_sads = _mm_sad_epu8(a, b);
|
|
|
|
sse_inc = _mm_add_epi64(sse_inc, curr_sads);
|
|
|
|
}
|
|
|
|
__m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2));
|
|
|
|
__m128i sad = _mm_add_epi64 (sse_inc, sse_inc_2);
|
|
|
|
return _mm_cvtsi128_si32(sad);
|
|
|
|
}
|
|
|
|
|
|
|
|
static uint32_t reg_sad_arbitrary(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
|
|
|
const int32_t width, const int32_t height, const uint32_t stride1,
|
|
|
|
const uint32_t stride2)
|
2014-04-28 12:20:49 +00:00
|
|
|
{
|
2019-01-10 12:37:04 +00:00
|
|
|
int32_t y, x;
|
2019-01-07 17:28:10 +00:00
|
|
|
__m128i sse_inc = _mm_setzero_si128();
|
2014-04-28 12:20:49 +00:00
|
|
|
|
2019-01-07 17:28:10 +00:00
|
|
|
// Bytes in block in 128-bit blocks per each scanline, and remainder
|
2019-01-10 12:37:04 +00:00
|
|
|
const int32_t largeblock_bytes = width & ~15;
|
|
|
|
const int32_t residual_bytes = width & 15;
|
2019-01-07 17:28:10 +00:00
|
|
|
|
|
|
|
const __m128i rds = _mm_set1_epi8 (residual_bytes);
|
|
|
|
const __m128i ns = _mm_setr_epi8 (0, 1, 2, 3, 4, 5, 6, 7,
|
|
|
|
8, 9, 10, 11, 12, 13, 14, 15);
|
|
|
|
const __m128i rdmask = _mm_cmpgt_epi8(rds, ns);
|
|
|
|
|
2014-04-28 12:20:49 +00:00
|
|
|
for (y = 0; y < height; ++y) {
|
2019-01-07 17:28:10 +00:00
|
|
|
for (x = 0; x < largeblock_bytes; x += 16) {
|
|
|
|
__m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
|
|
|
__m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
|
|
|
__m128i curr_sads = _mm_sad_epu8(a, b);
|
|
|
|
sse_inc = _mm_add_epi32(sse_inc, curr_sads);
|
2014-04-28 12:20:49 +00:00
|
|
|
}
|
|
|
|
|
2019-01-07 17:28:10 +00:00
|
|
|
if (residual_bytes) {
|
|
|
|
__m128i a = _mm_loadu_si128((__m128i const*) &data1[y * stride1 + x]);
|
|
|
|
__m128i b = _mm_loadu_si128((__m128i const*) &data2[y * stride2 + x]);
|
2014-04-28 12:20:49 +00:00
|
|
|
|
2019-01-07 17:28:10 +00:00
|
|
|
__m128i b_masked = _mm_blendv_epi8(a, b, rdmask);
|
|
|
|
__m128i curr_sads = _mm_sad_epu8(a, b_masked);
|
|
|
|
sse_inc = _mm_add_epi32(sse_inc, curr_sads);
|
2014-04-28 12:20:49 +00:00
|
|
|
}
|
|
|
|
}
|
2019-01-07 17:28:10 +00:00
|
|
|
__m128i sse_inc_2 = _mm_shuffle_epi32(sse_inc, _MM_SHUFFLE(1, 0, 3, 2));
|
|
|
|
__m128i sad = _mm_add_epi64 (sse_inc, sse_inc_2);
|
2014-04-28 12:20:49 +00:00
|
|
|
|
2019-01-07 17:28:10 +00:00
|
|
|
return _mm_cvtsi128_si32(sad);
|
2014-04-28 12:20:49 +00:00
|
|
|
}
|
2014-04-29 08:14:42 +00:00
|
|
|
|
2019-01-15 17:32:25 +00:00
|
|
|
uint32_t kvz_reg_sad_sse41(const kvz_pixel * const data1, const kvz_pixel * const data2,
|
|
|
|
const int32_t width, const int32_t height, const uint32_t stride1,
|
|
|
|
const uint32_t stride2)
|
|
|
|
{
|
|
|
|
if (width == 8)
|
|
|
|
return reg_sad_w8(data1, data2, height, stride1, stride2);
|
|
|
|
if (width == 16)
|
|
|
|
return reg_sad_w16(data1, data2, height, stride1, stride2);
|
|
|
|
else
|
|
|
|
return reg_sad_arbitrary(data1, data2, width, height, stride1, stride2);
|
|
|
|
}
|
|
|
|
|
2014-07-11 14:16:32 +00:00
|
|
|
#endif //COMPILE_INTEL_SSE41
|
|
|
|
|
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
int kvz_strategy_register_picture_sse41(void* opaque, uint8_t bitdepth) {
|
2014-07-11 14:16:32 +00:00
|
|
|
bool success = true;
|
|
|
|
#if COMPILE_INTEL_SSE41
|
2015-08-12 09:28:55 +00:00
|
|
|
if (bitdepth == 8){
|
2016-09-01 15:33:51 +00:00
|
|
|
success &= kvz_strategyselector_register(opaque, "reg_sad", "sse41", 20, &kvz_reg_sad_sse41);
|
2015-08-12 09:28:55 +00:00
|
|
|
}
|
2014-07-11 14:16:32 +00:00
|
|
|
#endif
|
|
|
|
return success;
|
2014-04-29 08:14:42 +00:00
|
|
|
}
|