mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-11-24 02:24:07 +00:00
Merge branch 'avx2-quant'
This commit is contained in:
commit
7a3dabf43e
|
@ -153,6 +153,14 @@
|
|||
<ClCompile Include="..\..\src\search.c" />
|
||||
<ClCompile Include="..\..\src\search_inter.c" />
|
||||
<ClCompile Include="..\..\src\search_intra.c" />
|
||||
<ClCompile Include="..\..\src\strategies\avx2\quant-avx2.c">
|
||||
<EnableEnhancedInstructionSet Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">AdvancedVectorExtensions2</EnableEnhancedInstructionSet>
|
||||
<EnableEnhancedInstructionSet Condition="'$(Configuration)|$(Platform)'=='Release|x64'">AdvancedVectorExtensions2</EnableEnhancedInstructionSet>
|
||||
<EnableEnhancedInstructionSet Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">AdvancedVectorExtensions2</EnableEnhancedInstructionSet>
|
||||
<EnableEnhancedInstructionSet Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">AdvancedVectorExtensions2</EnableEnhancedInstructionSet>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\strategies\generic\quant-generic.c" />
|
||||
<ClCompile Include="..\..\src\strategies\strategies-quant.c" />
|
||||
<ClCompile Include="..\..\src\yuv_io.c" />
|
||||
<ClInclude Include="..\..\src\checkpoint.h" />
|
||||
<ClInclude Include="..\..\src\cli.h" />
|
||||
|
@ -201,6 +209,10 @@
|
|||
<ClInclude Include="..\..\src\kvazaar_internal.h" />
|
||||
<ClInclude Include="..\..\src\search_inter.h" />
|
||||
<ClInclude Include="..\..\src\search_intra.h" />
|
||||
<ClInclude Include="..\..\src\strategies\strategies-common.h" />
|
||||
<ClInclude Include="..\..\src\strategies\avx2\quant-avx2.h" />
|
||||
<ClInclude Include="..\..\src\strategies\generic\quant-generic.h" />
|
||||
<ClInclude Include="..\..\src\strategies\strategies-quant.h" />
|
||||
<ClInclude Include="..\..\src\yuv_io.h" />
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
|
|
|
@ -207,6 +207,15 @@
|
|||
<ClCompile Include="..\..\src\input_frame_buffer.c">
|
||||
<Filter>Source Files</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\strategies\strategies-quant.c">
|
||||
<Filter>Source Files\strategies</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\strategies\generic\quant-generic.c">
|
||||
<Filter>Source Files\strategies\generic</Filter>
|
||||
</ClCompile>
|
||||
<ClCompile Include="..\..\src\strategies\avx2\quant-avx2.c">
|
||||
<Filter>Source Files\strategies\avx2</Filter>
|
||||
</ClCompile>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<ClInclude Include="..\..\src\global.h">
|
||||
|
@ -374,6 +383,18 @@
|
|||
<ClInclude Include="..\..\src\input_frame_buffer.h">
|
||||
<Filter>Header Files</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\strategies\strategies-common.h">
|
||||
<Filter>Header Files\strategies</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\strategies\strategies-quant.h">
|
||||
<Filter>Header Files\strategies</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\strategies\generic\quant-generic.h">
|
||||
<Filter>Header Files\strategies\generic</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="..\..\src\strategies\avx2\quant-avx2.h">
|
||||
<Filter>Header Files\strategies\avx2</Filter>
|
||||
</ClInclude>
|
||||
</ItemGroup>
|
||||
<ItemGroup>
|
||||
<YASM Include="..\..\src\extras\x86inc.asm">
|
||||
|
|
|
@ -203,6 +203,7 @@ OBJS = \
|
|||
strategies/strategies-nal.o \
|
||||
strategies/strategies-dct.o \
|
||||
strategies/strategies-ipol.o \
|
||||
strategies/strategies-quant.o \
|
||||
strategies/generic/nal-generic.o \
|
||||
strategies/generic/picture-generic.o \
|
||||
strategies/sse2/picture-sse2.o \
|
||||
|
@ -213,7 +214,9 @@ OBJS = \
|
|||
strategies/generic/dct-generic.o \
|
||||
strategies/avx2/dct-avx2.o \
|
||||
strategies/generic/ipol-generic.o \
|
||||
strategies/avx2/ipol-avx2.o
|
||||
strategies/avx2/ipol-avx2.o \
|
||||
strategies/generic/quant-generic.o \
|
||||
strategies/avx2/quant-avx2.o \
|
||||
|
||||
ifndef KVZ_DISABLE_ASM
|
||||
# Compile C files in x86_asm folder with KVZ_COMPILE_ASM, which will cause
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "context.h"
|
||||
#include "cabac.h"
|
||||
#include "transform.h"
|
||||
#include "strategies/strategies-quant.h"
|
||||
|
||||
|
||||
#define QUANT_SHIFT 14
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#if COMPILE_INTEL_AVX2
|
||||
# include "image.h"
|
||||
# include "strategies/strategies-common.h"
|
||||
# include <immintrin.h>
|
||||
|
||||
|
||||
|
@ -136,6 +137,186 @@ static unsigned sad_8bit_64x64_avx2(const kvz_pixel * buf1, const kvz_pixel * bu
|
|||
return m256i_horizontal_sum(sum0);
|
||||
}
|
||||
|
||||
static unsigned satd_8bit_4x4_avx2(const kvz_pixel *org, const kvz_pixel *cur)
|
||||
{
|
||||
|
||||
__m128i original = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)org));
|
||||
__m128i current = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)cur));
|
||||
|
||||
__m128i diff_lo = _mm_sub_epi16(current, original);
|
||||
|
||||
original = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(org + 8)));
|
||||
current = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)(cur + 8)));
|
||||
|
||||
__m128i diff_hi = _mm_sub_epi16(current, original);
|
||||
|
||||
|
||||
//Hor
|
||||
__m128i row0 = _mm_hadd_epi16(diff_lo, diff_hi);
|
||||
__m128i row1 = _mm_hsub_epi16(diff_lo, diff_hi);
|
||||
|
||||
__m128i row2 = _mm_hadd_epi16(row0, row1);
|
||||
__m128i row3 = _mm_hsub_epi16(row0, row1);
|
||||
|
||||
//Ver
|
||||
row0 = _mm_hadd_epi16(row2, row3);
|
||||
row1 = _mm_hsub_epi16(row2, row3);
|
||||
|
||||
row2 = _mm_hadd_epi16(row0, row1);
|
||||
row3 = _mm_hsub_epi16(row0, row1);
|
||||
|
||||
//Abs and sum
|
||||
row2 = _mm_abs_epi16(row2);
|
||||
row3 = _mm_abs_epi16(row3);
|
||||
|
||||
row3 = _mm_add_epi16(row2, row3);
|
||||
|
||||
row3 = _mm_add_epi16(row3, _mm_shuffle_epi32(row3, KVZ_PERMUTE(2, 3, 0, 1) ));
|
||||
row3 = _mm_add_epi16(row3, _mm_shuffle_epi32(row3, KVZ_PERMUTE(1, 0, 1, 0) ));
|
||||
row3 = _mm_add_epi16(row3, _mm_shufflelo_epi16(row3, KVZ_PERMUTE(1, 0, 1, 0) ));
|
||||
|
||||
unsigned sum = _mm_extract_epi16(row3, 0);
|
||||
unsigned satd = (sum + 1) >> 1;
|
||||
|
||||
return satd;
|
||||
}
|
||||
|
||||
static void hor_add_sub_avx2(__m128i *row0, __m128i *row1){
|
||||
|
||||
__m128i a = _mm_hadd_epi16(*row0, *row1);
|
||||
__m128i b = _mm_hsub_epi16(*row0, *row1);
|
||||
|
||||
__m128i c = _mm_hadd_epi16(a, b);
|
||||
__m128i d = _mm_hsub_epi16(a, b);
|
||||
|
||||
*row0 = _mm_hadd_epi16(c, d);
|
||||
*row1 = _mm_hsub_epi16(c, d);
|
||||
}
|
||||
|
||||
static INLINE void ver_add_sub_avx2(__m128i temp_hor[8], __m128i temp_ver[8]){
|
||||
|
||||
// First stage
|
||||
for (int i = 0; i < 8; i += 2){
|
||||
temp_ver[i+0] = _mm_hadd_epi16(temp_hor[i + 0], temp_hor[i + 1]);
|
||||
temp_ver[i+1] = _mm_hsub_epi16(temp_hor[i + 0], temp_hor[i + 1]);
|
||||
}
|
||||
|
||||
// Second stage
|
||||
for (int i = 0; i < 8; i += 4){
|
||||
temp_hor[i + 0] = _mm_add_epi16(temp_ver[i + 0], temp_ver[i + 2]);
|
||||
temp_hor[i + 1] = _mm_add_epi16(temp_ver[i + 1], temp_ver[i + 3]);
|
||||
temp_hor[i + 2] = _mm_sub_epi16(temp_ver[i + 0], temp_ver[i + 2]);
|
||||
temp_hor[i + 3] = _mm_sub_epi16(temp_ver[i + 1], temp_ver[i + 3]);
|
||||
}
|
||||
|
||||
// Third stage
|
||||
for (int i = 0; i < 4; ++i){
|
||||
temp_ver[i + 0] = _mm_add_epi16(temp_hor[0 + i], temp_hor[4 + i]);
|
||||
temp_ver[i + 4] = _mm_sub_epi16(temp_hor[0 + i], temp_hor[4 + i]);
|
||||
}
|
||||
}
|
||||
|
||||
INLINE static void haddwd_accumulate_avx2(__m128i *accumulate, __m128i *ver_row)
|
||||
{
|
||||
__m128i abs_value = _mm_abs_epi16(*ver_row);
|
||||
*accumulate = _mm_add_epi32(*accumulate, _mm_madd_epi16(abs_value, _mm_set1_epi16(1)));
|
||||
}
|
||||
|
||||
INLINE static unsigned sum_block_avx2(__m128i *ver_row)
|
||||
{
|
||||
__m128i sad = _mm_setzero_si128();
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 0);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 1);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 2);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 3);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 4);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 5);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 6);
|
||||
haddwd_accumulate_avx2(&sad, ver_row + 7);
|
||||
|
||||
sad = _mm_add_epi32(sad, _mm_shuffle_epi32(sad, KVZ_PERMUTE(2, 3, 0, 1)));
|
||||
sad = _mm_add_epi32(sad, _mm_shuffle_epi32(sad, KVZ_PERMUTE(1, 0, 1, 0)));
|
||||
|
||||
return _mm_cvtsi128_si32(sad);
|
||||
}
|
||||
|
||||
INLINE static __m128i diff_row_avx2(const kvz_pixel *buf1, const kvz_pixel *buf2)
|
||||
{
|
||||
__m128i buf1_row = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)buf1));
|
||||
__m128i buf2_row = _mm_cvtepu8_epi16(_mm_loadl_epi64((__m128i*)buf2));
|
||||
return _mm_sub_epi16(buf1_row, buf2_row);
|
||||
}
|
||||
|
||||
INLINE static void diff_blocks_and_hor_transform_avx2(__m128i row_diff[8], const kvz_pixel * buf1, unsigned stride1, const kvz_pixel * buf2, unsigned stride2)
|
||||
{
|
||||
row_diff[0] = diff_row_avx2(buf1 + 0 * stride1, buf2 + 0 * stride2);
|
||||
row_diff[1] = diff_row_avx2(buf1 + 1 * stride1, buf2 + 1 * stride2);
|
||||
hor_add_sub_avx2(row_diff + 0, row_diff + 1);
|
||||
|
||||
row_diff[2] = diff_row_avx2(buf1 + 2 * stride1, buf2 + 2 * stride2);
|
||||
row_diff[3] = diff_row_avx2(buf1 + 3 * stride1, buf2 + 3 * stride2);
|
||||
hor_add_sub_avx2(row_diff + 2, row_diff + 3);
|
||||
|
||||
row_diff[4] = diff_row_avx2(buf1 + 4 * stride1, buf2 + 4 * stride2);
|
||||
row_diff[5] = diff_row_avx2(buf1 + 5 * stride1, buf2 + 5 * stride2);
|
||||
hor_add_sub_avx2(row_diff + 4, row_diff + 5);
|
||||
|
||||
row_diff[6] = diff_row_avx2(buf1 + 6 * stride1, buf2 + 6 * stride2);
|
||||
row_diff[7] = diff_row_avx2(buf1 + 7 * stride1, buf2 + 7 * stride2);
|
||||
hor_add_sub_avx2(row_diff + 6, row_diff + 7);
|
||||
}
|
||||
|
||||
static unsigned kvz_satd_8bit_8x8_general_avx2(const kvz_pixel * buf1, unsigned stride1, const kvz_pixel * buf2, unsigned stride2)
|
||||
{
|
||||
__m128i temp_hor[8];
|
||||
__m128i temp_ver[8];
|
||||
|
||||
diff_blocks_and_hor_transform_avx2(temp_hor, buf1, stride1, buf2, stride2);
|
||||
|
||||
ver_add_sub_avx2(temp_hor, temp_ver);
|
||||
|
||||
unsigned sad = sum_block_avx2(temp_ver);
|
||||
|
||||
unsigned result = (sad + 2) >> 2;
|
||||
return result;
|
||||
}
|
||||
|
||||
// Function macro for defining hadamard calculating functions
|
||||
// for fixed size blocks. They calculate hadamard for integer
|
||||
// multiples of 8x8 with the 8x8 hadamard function.
|
||||
#define SATD_NXN_AVX2(n) \
|
||||
static unsigned satd_8bit_ ## n ## x ## n ## _avx2( \
|
||||
const kvz_pixel * const block1, const kvz_pixel * const block2) \
|
||||
{ \
|
||||
unsigned x, y; \
|
||||
unsigned sum = 0; \
|
||||
for (y = 0; y < (n); y += 8) { \
|
||||
unsigned row = y * (n); \
|
||||
for (x = 0; x < (n); x += 8) { \
|
||||
sum += kvz_satd_8bit_8x8_general_avx2(&block1[row + x], (n), &block2[row + x], (n)); \
|
||||
} \
|
||||
} \
|
||||
return sum>>(KVZ_BIT_DEPTH-8); \
|
||||
}
|
||||
|
||||
static unsigned satd_8bit_8x8_avx2(
|
||||
const kvz_pixel * const block1, const kvz_pixel * const block2)
|
||||
{
|
||||
unsigned x, y;
|
||||
unsigned sum = 0;
|
||||
for (y = 0; y < (8); y += 8) {
|
||||
unsigned row = y * (8);
|
||||
for (x = 0; x < (8); x += 8) {
|
||||
sum += kvz_satd_8bit_8x8_general_avx2(&block1[row + x], (8), &block2[row + x], (8));
|
||||
}
|
||||
}
|
||||
return sum>>(KVZ_BIT_DEPTH-8); \
|
||||
}
|
||||
|
||||
//SATD_NXN_AVX2(8) //Use the non-macro version
|
||||
SATD_NXN_AVX2(16)
|
||||
SATD_NXN_AVX2(32)
|
||||
SATD_NXN_AVX2(64)
|
||||
|
||||
#endif //COMPILE_INTEL_AVX2
|
||||
|
||||
|
@ -153,6 +334,12 @@ int kvz_strategy_register_picture_avx2(void* opaque, uint8_t bitdepth)
|
|||
success &= kvz_strategyselector_register(opaque, "sad_16x16", "avx2", 40, &sad_8bit_16x16_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "sad_32x32", "avx2", 40, &sad_8bit_32x32_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "sad_64x64", "avx2", 40, &sad_8bit_64x64_avx2);
|
||||
|
||||
success &= kvz_strategyselector_register(opaque, "satd_4x4", "avx2", 40, &satd_8bit_4x4_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "satd_8x8", "avx2", 40, &satd_8bit_8x8_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "satd_16x16", "avx2", 40, &satd_8bit_16x16_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "satd_32x32", "avx2", 40, &satd_8bit_32x32_avx2);
|
||||
success &= kvz_strategyselector_register(opaque, "satd_64x64", "avx2", 40, &satd_8bit_64x64_avx2);
|
||||
}
|
||||
#endif
|
||||
return success;
|
||||
|
|
220
src/strategies/avx2/quant-avx2.c
Normal file
220
src/strategies/avx2/quant-avx2.c
Normal file
|
@ -0,0 +1,220 @@
|
|||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
|
||||
/*
|
||||
* \file
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "quant-avx2.h"
|
||||
#include "../generic/quant-generic.h"
|
||||
#include "../strategies-common.h"
|
||||
#include "strategyselector.h"
|
||||
#include "encoder.h"
|
||||
#include "transform.h"
|
||||
|
||||
#if COMPILE_INTEL_AVX2
|
||||
#include <immintrin.h>
|
||||
|
||||
/**
|
||||
* \brief quantize transformed coefficents
|
||||
*
|
||||
*/
|
||||
|
||||
void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type)
|
||||
{
|
||||
const encoder_control_t * const encoder = state->encoder_control;
|
||||
const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1];
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth - 8) * 6);
|
||||
const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6];
|
||||
const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform
|
||||
const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift;
|
||||
const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
|
||||
const int32_t q_bits8 = q_bits - 8;
|
||||
|
||||
assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t
|
||||
|
||||
uint32_t ac_sum = 0;
|
||||
|
||||
__m256i v_ac_sum = _mm256_setzero_si256();
|
||||
__m256i v_quant_coeff = _mm256_set1_epi16(quant_coeff[0]);
|
||||
|
||||
for (int32_t n = 0; n < width * height; n += 16) {
|
||||
|
||||
__m256i v_level = _mm256_loadu_si256((__m256i*)&(coef[n]));
|
||||
__m256i v_sign = _mm256_cmpgt_epi16(_mm256_setzero_si256(), v_level);
|
||||
v_sign = _mm256_or_si256(v_sign, _mm256_set1_epi16(1));
|
||||
|
||||
v_level = _mm256_abs_epi16(v_level);
|
||||
__m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0));
|
||||
__m256i high_a = _mm256_unpackhi_epi16(v_level, _mm256_set1_epi16(0));
|
||||
|
||||
__m256i low_b = _mm256_unpacklo_epi16(v_quant_coeff, _mm256_set1_epi16(0));
|
||||
__m256i high_b = _mm256_unpackhi_epi16(v_quant_coeff, _mm256_set1_epi16(0));
|
||||
|
||||
__m256i v_level32_a = _mm256_madd_epi16(low_a, low_b);
|
||||
__m256i v_level32_b = _mm256_madd_epi16(high_a, high_b);
|
||||
|
||||
v_level32_a = _mm256_add_epi32(v_level32_a, _mm256_set1_epi32(add));
|
||||
v_level32_b = _mm256_add_epi32(v_level32_b, _mm256_set1_epi32(add));
|
||||
|
||||
v_level32_a = _mm256_srai_epi32(v_level32_a, q_bits);
|
||||
v_level32_b = _mm256_srai_epi32(v_level32_b, q_bits);
|
||||
|
||||
v_level = _mm256_packs_epi32(v_level32_a, v_level32_b);
|
||||
v_level = _mm256_sign_epi16(v_level, v_sign);
|
||||
|
||||
_mm256_storeu_si256((__m256i*)&(q_coef[n]), v_level);
|
||||
|
||||
v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_a);
|
||||
v_ac_sum = _mm256_add_epi32(v_ac_sum, v_level32_b);
|
||||
}
|
||||
|
||||
__m128i temp = _mm_add_epi32(_mm256_castsi256_si128(v_ac_sum), _mm256_extracti128_si256(v_ac_sum, 1));
|
||||
temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(2, 3, 0, 1)));
|
||||
temp = _mm_add_epi32(temp, _mm_shuffle_epi32(temp, KVZ_PERMUTE(1, 0, 1, 0)));
|
||||
ac_sum += _mm_cvtsi128_si32(temp);
|
||||
|
||||
if (!(encoder->sign_hiding && ac_sum >= 2)) return;
|
||||
|
||||
int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2];
|
||||
|
||||
for (int32_t n = 0; n < width * height; n++) {
|
||||
int32_t level;
|
||||
level = coef[n];
|
||||
level = ((int64_t)abs(level) * quant_coeff[n] + add) >> q_bits;
|
||||
delta_u[n] = (int32_t)(((int64_t)abs(coef[n]) * quant_coeff[n] - (level << q_bits)) >> q_bits8);
|
||||
}
|
||||
|
||||
if (ac_sum >= 2) {
|
||||
#define SCAN_SET_SIZE 16
|
||||
#define LOG2_SCAN_SET_SIZE 4
|
||||
int32_t n, last_cg = -1, abssum = 0, subset, subpos;
|
||||
for (subset = (width*height - 1) >> LOG2_SCAN_SET_SIZE; subset >= 0; subset--) {
|
||||
int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg = -1;
|
||||
subpos = subset << LOG2_SCAN_SET_SIZE;
|
||||
abssum = 0;
|
||||
|
||||
// Find last coeff pos
|
||||
for (n = SCAN_SET_SIZE - 1; n >= 0; n--) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
last_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// First coeff pos
|
||||
for (n = 0; n <SCAN_SET_SIZE; n++) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
first_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Sum all kvz_quant coeffs between first and last
|
||||
for (n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) {
|
||||
abssum += q_coef[scan[n + subpos]];
|
||||
}
|
||||
|
||||
if (last_nz_pos_in_cg >= 0 && last_cg == -1) {
|
||||
last_cg = 1;
|
||||
}
|
||||
|
||||
if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) {
|
||||
int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1);
|
||||
if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity
|
||||
int32_t min_cost_inc = 0x7fffffff, min_pos = -1, cur_cost = 0x7fffffff;
|
||||
int16_t final_change = 0, cur_change = 0;
|
||||
for (n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) {
|
||||
uint32_t blkPos = scan[n + subpos];
|
||||
if (q_coef[blkPos] != 0) {
|
||||
if (delta_u[blkPos] > 0) {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change = 1;
|
||||
}
|
||||
else if (n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) {
|
||||
cur_cost = 0x7fffffff;
|
||||
}
|
||||
else {
|
||||
cur_cost = delta_u[blkPos];
|
||||
cur_change = -1;
|
||||
}
|
||||
}
|
||||
else if (n < first_nz_pos_in_cg && ((coef[blkPos] >= 0) ? 0 : 1) != signbit) {
|
||||
cur_cost = 0x7fffffff;
|
||||
}
|
||||
else {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change = 1;
|
||||
}
|
||||
|
||||
if (cur_cost < min_cost_inc) {
|
||||
min_cost_inc = cur_cost;
|
||||
final_change = cur_change;
|
||||
min_pos = blkPos;
|
||||
}
|
||||
} // CG loop
|
||||
|
||||
if (q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) {
|
||||
final_change = -1;
|
||||
}
|
||||
|
||||
if (coef[min_pos] >= 0) q_coef[min_pos] += final_change;
|
||||
else q_coef[min_pos] -= final_change;
|
||||
} // Hide
|
||||
}
|
||||
if (last_cg == 1) last_cg = 0;
|
||||
}
|
||||
|
||||
#undef SCAN_SET_SIZE
|
||||
#undef LOG2_SCAN_SET_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
void kvz_quant_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type)
|
||||
{
|
||||
if (state->encoder_control->scaling_list.enable){
|
||||
kvz_quant_generic(state, coef, q_coef, width, height, type, scan_idx, block_type);
|
||||
}
|
||||
else {
|
||||
kvz_quant_flat_avx2(state, coef, q_coef, width, height, type, scan_idx, block_type);
|
||||
}
|
||||
}
|
||||
|
||||
#endif //COMPILE_INTEL_AVX2
|
||||
|
||||
|
||||
int kvz_strategy_register_quant_avx2(void* opaque, uint8_t bitdepth)
|
||||
{
|
||||
bool success = true;
|
||||
|
||||
#if COMPILE_INTEL_AVX2
|
||||
success &= kvz_strategyselector_register(opaque, "quant", "avx2", 40, &kvz_quant_avx2);
|
||||
#endif //COMPILE_INTEL_AVX2
|
||||
|
||||
return success;
|
||||
}
|
26
src/strategies/avx2/quant-avx2.h
Normal file
26
src/strategies/avx2/quant-avx2.h
Normal file
|
@ -0,0 +1,26 @@
|
|||
#ifndef STRATEGIES_QUANT_AVX2_H_
|
||||
#define STRATEGIES_QUANT_AVX2_H_
|
||||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
#include <stdint.h>
|
||||
|
||||
int kvz_strategy_register_quant_avx2(void* opaque, uint8_t bitdepth);
|
||||
|
||||
#endif //STRATEGIES_QUANT_AVX2_H_
|
173
src/strategies/generic/quant-generic.c
Normal file
173
src/strategies/generic/quant-generic.c
Normal file
|
@ -0,0 +1,173 @@
|
|||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
|
||||
/*
|
||||
* \file
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "quant-generic.h"
|
||||
#include "strategyselector.h"
|
||||
#include "encoder.h"
|
||||
#include "transform.h"
|
||||
|
||||
#define QUANT_SHIFT 14
|
||||
/**
|
||||
* \brief quantize transformed coefficents
|
||||
*
|
||||
*/
|
||||
void kvz_quant_generic(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type)
|
||||
{
|
||||
const encoder_control_t * const encoder = state->encoder_control;
|
||||
const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1];
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth - 8) * 6);
|
||||
const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6];
|
||||
const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform
|
||||
const int32_t q_bits = QUANT_SHIFT + qp_scaled / 6 + transform_shift;
|
||||
const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
|
||||
const int32_t q_bits8 = q_bits - 8;
|
||||
|
||||
uint32_t ac_sum = 0;
|
||||
|
||||
for (int32_t n = 0; n < width * height; n++) {
|
||||
int32_t level;
|
||||
int32_t sign;
|
||||
|
||||
level = coef[n];
|
||||
sign = (level < 0 ? -1 : 1);
|
||||
|
||||
level = ((int64_t)abs(level) * quant_coeff[n] + add) >> q_bits;
|
||||
ac_sum += level;
|
||||
|
||||
level *= sign;
|
||||
q_coef[n] = (coeff_t)(CLIP(-32768, 32767, level));
|
||||
}
|
||||
|
||||
if (!(encoder->sign_hiding && ac_sum >= 2)) return;
|
||||
|
||||
int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2];
|
||||
|
||||
for (int32_t n = 0; n < width * height; n++) {
|
||||
int32_t level;
|
||||
level = coef[n];
|
||||
level = ((int64_t)abs(level) * quant_coeff[n] + add) >> q_bits;
|
||||
delta_u[n] = (int32_t)(((int64_t)abs(coef[n]) * quant_coeff[n] - (level << q_bits)) >> q_bits8);
|
||||
}
|
||||
|
||||
if (ac_sum >= 2) {
|
||||
#define SCAN_SET_SIZE 16
|
||||
#define LOG2_SCAN_SET_SIZE 4
|
||||
int32_t n, last_cg = -1, abssum = 0, subset, subpos;
|
||||
for (subset = (width*height - 1) >> LOG2_SCAN_SET_SIZE; subset >= 0; subset--) {
|
||||
int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg = -1;
|
||||
subpos = subset << LOG2_SCAN_SET_SIZE;
|
||||
abssum = 0;
|
||||
|
||||
// Find last coeff pos
|
||||
for (n = SCAN_SET_SIZE - 1; n >= 0; n--) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
last_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// First coeff pos
|
||||
for (n = 0; n <SCAN_SET_SIZE; n++) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
first_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Sum all kvz_quant coeffs between first and last
|
||||
for (n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) {
|
||||
abssum += q_coef[scan[n + subpos]];
|
||||
}
|
||||
|
||||
if (last_nz_pos_in_cg >= 0 && last_cg == -1) {
|
||||
last_cg = 1;
|
||||
}
|
||||
|
||||
if (last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) {
|
||||
int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1);
|
||||
if (signbit != (abssum & 0x1)) { // compare signbit with sum_parity
|
||||
int32_t min_cost_inc = 0x7fffffff, min_pos = -1, cur_cost = 0x7fffffff;
|
||||
int16_t final_change = 0, cur_change = 0;
|
||||
for (n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) {
|
||||
uint32_t blkPos = scan[n + subpos];
|
||||
if (q_coef[blkPos] != 0) {
|
||||
if (delta_u[blkPos] > 0) {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change = 1;
|
||||
}
|
||||
else if (n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) {
|
||||
cur_cost = 0x7fffffff;
|
||||
}
|
||||
else {
|
||||
cur_cost = delta_u[blkPos];
|
||||
cur_change = -1;
|
||||
}
|
||||
}
|
||||
else if (n < first_nz_pos_in_cg && ((coef[blkPos] >= 0) ? 0 : 1) != signbit) {
|
||||
cur_cost = 0x7fffffff;
|
||||
}
|
||||
else {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change = 1;
|
||||
}
|
||||
|
||||
if (cur_cost < min_cost_inc) {
|
||||
min_cost_inc = cur_cost;
|
||||
final_change = cur_change;
|
||||
min_pos = blkPos;
|
||||
}
|
||||
} // CG loop
|
||||
|
||||
if (q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) {
|
||||
final_change = -1;
|
||||
}
|
||||
|
||||
if (coef[min_pos] >= 0) q_coef[min_pos] += final_change;
|
||||
else q_coef[min_pos] -= final_change;
|
||||
} // Hide
|
||||
}
|
||||
if (last_cg == 1) last_cg = 0;
|
||||
}
|
||||
|
||||
#undef SCAN_SET_SIZE
|
||||
#undef LOG2_SCAN_SET_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int kvz_strategy_register_quant_generic(void* opaque, uint8_t bitdepth)
|
||||
{
|
||||
bool success = true;
|
||||
|
||||
success &= kvz_strategyselector_register(opaque, "quant", "generic", 0, &kvz_quant_generic);
|
||||
|
||||
return success;
|
||||
}
|
31
src/strategies/generic/quant-generic.h
Normal file
31
src/strategies/generic/quant-generic.h
Normal file
|
@ -0,0 +1,31 @@
|
|||
#ifndef STRATEGIES_QUANT_GENERIC_H_
|
||||
#define STRATEGIES_QUANT_GENERIC_H_
|
||||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
#include <stdint.h>
|
||||
#include "encoderstate.h"
|
||||
|
||||
#define QUANT_SHIFT 14
|
||||
|
||||
int kvz_strategy_register_quant_generic(void* opaque, uint8_t bitdepth);
|
||||
void kvz_quant_generic(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type);
|
||||
|
||||
#endif //STRATEGIES_QUANT_GENERIC_H_
|
8
src/strategies/strategies-common.h
Normal file
8
src/strategies/strategies-common.h
Normal file
|
@ -0,0 +1,8 @@
|
|||
#ifndef STRATEGIES_COMMON_H_
|
||||
#define STRATEGIES_COMMON_H_
|
||||
|
||||
//Use with shuffle and permutation intrinsics.
|
||||
//Parameters are indices to packed elements. Each must be 0, 1, 2 or 3.
|
||||
#define KVZ_PERMUTE(a, b, c, d) ( (a << 0) | (b << 2) | (c << 4) | (d << 6) )
|
||||
|
||||
#endif //STRATEGIES_COMMON_H_
|
41
src/strategies/strategies-quant.c
Normal file
41
src/strategies/strategies-quant.c
Normal file
|
@ -0,0 +1,41 @@
|
|||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
|
||||
#include "strategies-quant.h"
|
||||
#include "strategyselector.h"
|
||||
|
||||
// Define function pointers.
|
||||
quant_func *kvz_quant;
|
||||
|
||||
// Headers for platform optimizations.
|
||||
#include "generic/quant-generic.h"
|
||||
#include "avx2/quant-avx2.h"
|
||||
|
||||
|
||||
int kvz_strategy_register_quant(void* opaque, uint8_t bitdepth) {
|
||||
bool success = true;
|
||||
|
||||
success &= kvz_strategy_register_quant_generic(opaque, bitdepth);
|
||||
|
||||
if (kvz_g_hardware_flags.intel_flags.avx2) {
|
||||
success &= kvz_strategy_register_quant_avx2(opaque, bitdepth);
|
||||
}
|
||||
return success;
|
||||
}
|
40
src/strategies/strategies-quant.h
Normal file
40
src/strategies/strategies-quant.h
Normal file
|
@ -0,0 +1,40 @@
|
|||
#ifndef STRATEGIES_QUANT_H_
|
||||
#define STRATEGIES_QUANT_H_
|
||||
/*****************************************************************************
|
||||
* This file is part of Kvazaar HEVC encoder.
|
||||
*
|
||||
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
||||
* COPYING file).
|
||||
*
|
||||
* Kvazaar is free software: you can redistribute it and/or modify it under
|
||||
* the terms of the GNU Lesser General Public License as published by the
|
||||
* Free Software Foundation; either version 2.1 of the License, or (at your
|
||||
* option) any later version.
|
||||
*
|
||||
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
||||
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
||||
* more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License along
|
||||
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
||||
****************************************************************************/
|
||||
|
||||
#include "encoderstate.h"
|
||||
|
||||
// Declare function pointers.
|
||||
typedef unsigned (quant_func)(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type);
|
||||
|
||||
// Declare function pointers.
|
||||
extern quant_func * kvz_quant;
|
||||
|
||||
int kvz_strategy_register_quant(void* opaque, uint8_t bitdepth);
|
||||
|
||||
|
||||
#define STRATEGIES_QUANT_EXPORTS \
|
||||
{"quant", (void**) &kvz_quant}, \
|
||||
|
||||
|
||||
|
||||
#endif //STRATEGIES_QUANT_H_
|
|
@ -69,6 +69,11 @@ int kvz_strategyselector_init(int32_t cpuid, uint8_t bitdepth) {
|
|||
fprintf(stderr, "kvz_strategy_register_ipol failed!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!kvz_strategy_register_quant(&strategies, bitdepth)) {
|
||||
fprintf(stderr, "kvz_strategy_register_quant failed!\n");
|
||||
return 0;
|
||||
}
|
||||
|
||||
while(cur_strategy_to_select->fptr) {
|
||||
*(cur_strategy_to_select->fptr) = strategyselector_choose_for(&strategies, cur_strategy_to_select->strategy_type);
|
||||
|
|
|
@ -148,12 +148,14 @@ int kvz_strategyselector_register(void *opaque, const char *type, const char *st
|
|||
#include "strategies/strategies-picture.h"
|
||||
#include "strategies/strategies-dct.h"
|
||||
#include "strategies/strategies-ipol.h"
|
||||
#include "strategies/strategies-quant.h"
|
||||
|
||||
static const strategy_to_select_t strategies_to_select[] = {
|
||||
STRATEGIES_NAL_EXPORTS
|
||||
STRATEGIES_PICTURE_EXPORTS
|
||||
STRATEGIES_DCT_EXPORTS
|
||||
STRATEGIES_IPOL_EXPORTS
|
||||
STRATEGIES_QUANT_EXPORTS
|
||||
{ NULL, NULL },
|
||||
};
|
||||
|
||||
|
|
133
src/transform.c
133
src/transform.c
|
@ -33,6 +33,8 @@
|
|||
#include "nal.h"
|
||||
#include "rdo.h"
|
||||
#include "strategies/strategies-dct.h"
|
||||
#include "strategies/strategies-quant.h"
|
||||
#include "strategies/generic/quant-generic.h"
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////
|
||||
// INITIALIZATIONS
|
||||
|
@ -127,137 +129,6 @@ void kvz_itransform2d(const encoder_control_t * const encoder, int16_t *block, i
|
|||
idct_func(encoder->bitdepth, coeff, block);
|
||||
}
|
||||
|
||||
|
||||
#define QUANT_SHIFT 14
|
||||
/**
|
||||
* \brief quantize transformed coefficents
|
||||
*
|
||||
*/
|
||||
void kvz_quant(const encoder_state_t * const state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type )
|
||||
{
|
||||
const encoder_control_t * const encoder = state->encoder_control;
|
||||
const uint32_t log2_block_size = kvz_g_convert_to_bit[ width ] + 2;
|
||||
const uint32_t * const scan = kvz_g_sig_last_scan[ scan_idx ][ log2_block_size - 1 ];
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth-8)*6);
|
||||
|
||||
const uint32_t log2_tr_size = kvz_g_convert_to_bit[ width ] + 2;
|
||||
const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size-2][scalinglist_type][qp_scaled%6];
|
||||
const int32_t transform_shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size; //!< Represents scaling through forward transform
|
||||
const int32_t q_bits = QUANT_SHIFT + qp_scaled/6 + transform_shift;
|
||||
const int32_t add = ((state->global->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
|
||||
const int32_t q_bits8 = q_bits - 8;
|
||||
|
||||
uint32_t ac_sum = 0;
|
||||
|
||||
for (int32_t n = 0; n < width * height; n++) {
|
||||
int32_t level;
|
||||
int32_t sign;
|
||||
|
||||
level = coef[n];
|
||||
sign = (level < 0 ? -1: 1);
|
||||
|
||||
level = ((int64_t)abs(level) * quant_coeff[n] + add) >> q_bits;
|
||||
ac_sum += level;
|
||||
|
||||
level *= sign;
|
||||
q_coef[n] = (coeff_t)(CLIP( -32768, 32767, level));
|
||||
}
|
||||
|
||||
if (!(encoder->sign_hiding && ac_sum >= 2)) return;
|
||||
|
||||
int32_t delta_u[LCU_WIDTH*LCU_WIDTH >> 2];
|
||||
|
||||
for (int32_t n = 0; n < width * height; n++) {
|
||||
int32_t level;
|
||||
level = coef[n];
|
||||
level = ((int64_t)abs(level) * quant_coeff[n] + add) >> q_bits;
|
||||
delta_u[n] = (int32_t)(((int64_t)abs(coef[n]) * quant_coeff[n] - (level << q_bits)) >> q_bits8);
|
||||
}
|
||||
|
||||
if(ac_sum >= 2) {
|
||||
#define SCAN_SET_SIZE 16
|
||||
#define LOG2_SCAN_SET_SIZE 4
|
||||
int32_t n,last_cg = -1, abssum = 0, subset, subpos;
|
||||
for(subset = (width*height - 1)>>LOG2_SCAN_SET_SIZE; subset >= 0; subset--) {
|
||||
int32_t first_nz_pos_in_cg = SCAN_SET_SIZE, last_nz_pos_in_cg=-1;
|
||||
subpos = subset<<LOG2_SCAN_SET_SIZE;
|
||||
abssum = 0;
|
||||
|
||||
// Find last coeff pos
|
||||
for (n = SCAN_SET_SIZE - 1; n >= 0; n--) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
last_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// First coeff pos
|
||||
for (n = 0; n <SCAN_SET_SIZE; n++) {
|
||||
if (q_coef[scan[n + subpos]]) {
|
||||
first_nz_pos_in_cg = n;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Sum all kvz_quant coeffs between first and last
|
||||
for(n = first_nz_pos_in_cg; n <= last_nz_pos_in_cg; n++) {
|
||||
abssum += q_coef[scan[n + subpos]];
|
||||
}
|
||||
|
||||
if(last_nz_pos_in_cg >= 0 && last_cg == -1) {
|
||||
last_cg = 1;
|
||||
}
|
||||
|
||||
if(last_nz_pos_in_cg - first_nz_pos_in_cg >= 4) {
|
||||
int32_t signbit = (q_coef[scan[subpos + first_nz_pos_in_cg]] > 0 ? 0 : 1) ;
|
||||
if(signbit != (abssum&0x1)) { // compare signbit with sum_parity
|
||||
int32_t min_cost_inc = 0x7fffffff, min_pos =-1, cur_cost=0x7fffffff;
|
||||
int16_t final_change = 0, cur_change=0;
|
||||
for(n = (last_cg == 1 ? last_nz_pos_in_cg : SCAN_SET_SIZE - 1); n >= 0; n--) {
|
||||
uint32_t blkPos = scan[n + subpos];
|
||||
if(q_coef[blkPos] != 0) {
|
||||
if(delta_u[blkPos] > 0) {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change=1;
|
||||
} else if(n == first_nz_pos_in_cg && abs(q_coef[blkPos]) == 1) {
|
||||
cur_cost=0x7fffffff;
|
||||
} else {
|
||||
cur_cost = delta_u[blkPos];
|
||||
cur_change =-1;
|
||||
}
|
||||
} else if(n < first_nz_pos_in_cg && ((coef[blkPos] >= 0)?0:1) != signbit) {
|
||||
cur_cost = 0x7fffffff;
|
||||
} else {
|
||||
cur_cost = -delta_u[blkPos];
|
||||
cur_change = 1;
|
||||
}
|
||||
|
||||
if(cur_cost < min_cost_inc) {
|
||||
min_cost_inc = cur_cost;
|
||||
final_change = cur_change;
|
||||
min_pos = blkPos;
|
||||
}
|
||||
} // CG loop
|
||||
|
||||
if(q_coef[min_pos] == 32767 || q_coef[min_pos] == -32768) {
|
||||
final_change = -1;
|
||||
}
|
||||
|
||||
if(coef[min_pos] >= 0) q_coef[min_pos] += final_change;
|
||||
else q_coef[min_pos] -= final_change;
|
||||
} // Hide
|
||||
}
|
||||
if (last_cg == 1) last_cg=0;
|
||||
}
|
||||
|
||||
#undef SCAN_SET_SIZE
|
||||
#undef LOG2_SCAN_SET_SIZE
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* \brief inverse quantize transformed and quantized coefficents
|
||||
*
|
||||
|
|
|
@ -35,8 +35,6 @@ extern const int16_t kvz_g_inv_quant_scales[6];
|
|||
|
||||
|
||||
|
||||
void kvz_quant(const encoder_state_t *state, coeff_t *coef, coeff_t *q_coef, int32_t width,
|
||||
int32_t height, int8_t type, int8_t scan_idx, int8_t block_type);
|
||||
void kvz_dequant(const encoder_state_t *state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height, int8_t type, int8_t block_type);
|
||||
|
||||
void kvz_transformskip(const encoder_control_t *encoder, int16_t *block,int16_t *coeff, int8_t block_size);
|
||||
|
|
Loading…
Reference in a new issue