Move dequant to strategies. Copy generic to AVX2 as well.

This commit is contained in:
Ari Lemmetti 2015-10-23 19:53:50 +03:00
parent 47082738aa
commit cf347e33c4
6 changed files with 101 additions and 52 deletions

View file

@ -409,6 +409,54 @@ void kvz_quant_avx2(const encoder_state_t * const state, coeff_t *coef, coeff_t
} }
} }
/**
* \brief inverse quantize transformed and quantized coefficents
*
*/
void kvz_dequant_avx2(const encoder_state_t * const state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height,int8_t type, int8_t block_type)
{
const encoder_control_t * const encoder = state->encoder_control;
int32_t shift,add,coeff_q;
int32_t n;
int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth-8)*6);
shift = 20 - QUANT_SHIFT - transform_shift;
if (encoder->scaling_list.enable)
{
uint32_t log2_tr_size = kvz_g_convert_to_bit[ width ] + 2;
int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
const int32_t *dequant_coef = encoder->scaling_list.de_quant_coeff[log2_tr_size-2][scalinglist_type][qp_scaled%6];
shift += 4;
if (shift >qp_scaled / 6) {
add = 1 << (shift - qp_scaled/6 - 1);
for (n = 0; n < width * height; n++) {
coeff_q = ((q_coef[n] * dequant_coef[n]) + add ) >> (shift - qp_scaled/6);
coef[n] = (coeff_t)CLIP(-32768,32767,coeff_q);
}
} else {
for (n = 0; n < width * height; n++) {
// Clip to avoid possible overflow in following shift left operation
coeff_q = CLIP(-32768, 32767, q_coef[n] * dequant_coef[n]);
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q << (qp_scaled/6 - shift));
}
}
} else {
int32_t scale = kvz_g_inv_quant_scales[qp_scaled%6] << (qp_scaled/6);
add = 1 << (shift-1);
for (n = 0; n < width*height; n++) {
coeff_q = (q_coef[n] * scale + add) >> shift;
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q);
}
}
}
#endif //COMPILE_INTEL_AVX2 #endif //COMPILE_INTEL_AVX2
@ -420,6 +468,7 @@ int kvz_strategy_register_quant_avx2(void* opaque, uint8_t bitdepth)
success &= kvz_strategyselector_register(opaque, "quant", "avx2", 40, &kvz_quant_avx2); success &= kvz_strategyselector_register(opaque, "quant", "avx2", 40, &kvz_quant_avx2);
if (bitdepth == 8) { if (bitdepth == 8) {
success &= kvz_strategyselector_register(opaque, "quantize_residual", "avx2", 40, &kvz_quantize_residual_avx2); success &= kvz_strategyselector_register(opaque, "quantize_residual", "avx2", 40, &kvz_quantize_residual_avx2);
success &= kvz_strategyselector_register(opaque, "dequant", "avx2", 40, &kvz_dequant_avx2);
} }
#endif //COMPILE_INTEL_AVX2 #endif //COMPILE_INTEL_AVX2

View file

@ -276,7 +276,53 @@ int kvz_quantize_residual_generic(encoder_state_t *const state,
return has_coeffs; return has_coeffs;
} }
/**
* \brief inverse quantize transformed and quantized coefficents
*
*/
void kvz_dequant_generic(const encoder_state_t * const state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height,int8_t type, int8_t block_type)
{
const encoder_control_t * const encoder = state->encoder_control;
int32_t shift,add,coeff_q;
int32_t n;
int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth-8)*6);
shift = 20 - QUANT_SHIFT - transform_shift;
if (encoder->scaling_list.enable)
{
uint32_t log2_tr_size = kvz_g_convert_to_bit[ width ] + 2;
int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
const int32_t *dequant_coef = encoder->scaling_list.de_quant_coeff[log2_tr_size-2][scalinglist_type][qp_scaled%6];
shift += 4;
if (shift >qp_scaled / 6) {
add = 1 << (shift - qp_scaled/6 - 1);
for (n = 0; n < width * height; n++) {
coeff_q = ((q_coef[n] * dequant_coef[n]) + add ) >> (shift - qp_scaled/6);
coef[n] = (coeff_t)CLIP(-32768,32767,coeff_q);
}
} else {
for (n = 0; n < width * height; n++) {
// Clip to avoid possible overflow in following shift left operation
coeff_q = CLIP(-32768, 32767, q_coef[n] * dequant_coef[n]);
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q << (qp_scaled/6 - shift));
}
}
} else {
int32_t scale = kvz_g_inv_quant_scales[qp_scaled%6] << (qp_scaled/6);
add = 1 << (shift-1);
for (n = 0; n < width*height; n++) {
coeff_q = (q_coef[n] * scale + add) >> shift;
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q);
}
}
}
int kvz_strategy_register_quant_generic(void* opaque, uint8_t bitdepth) int kvz_strategy_register_quant_generic(void* opaque, uint8_t bitdepth)
{ {
@ -284,6 +330,7 @@ int kvz_strategy_register_quant_generic(void* opaque, uint8_t bitdepth)
success &= kvz_strategyselector_register(opaque, "quant", "generic", 0, &kvz_quant_generic); success &= kvz_strategyselector_register(opaque, "quant", "generic", 0, &kvz_quant_generic);
success &= kvz_strategyselector_register(opaque, "quantize_residual", "generic", 0, &kvz_quantize_residual_generic); success &= kvz_strategyselector_register(opaque, "quantize_residual", "generic", 0, &kvz_quantize_residual_generic);
success &= kvz_strategyselector_register(opaque, "dequant", "generic", 0, &kvz_dequant_generic);
return success; return success;
} }

View file

@ -24,6 +24,7 @@
// Define function pointers. // Define function pointers.
quant_func *kvz_quant; quant_func *kvz_quant;
quant_residual_func *kvz_quantize_residual; quant_residual_func *kvz_quantize_residual;
dequant_func *kvz_dequant;
// Headers for platform optimizations. // Headers for platform optimizations.
#include "generic/quant-generic.h" #include "generic/quant-generic.h"

View file

@ -31,10 +31,13 @@ typedef unsigned (quant_residual_func)(encoder_state_t *const state,
const int in_stride, const int out_stride, const int in_stride, const int out_stride,
const kvz_pixel *const ref_in, const kvz_pixel *const pred_in, const kvz_pixel *const ref_in, const kvz_pixel *const pred_in,
kvz_pixel *rec_out, coeff_t *coeff_out); kvz_pixel *rec_out, coeff_t *coeff_out);
typedef unsigned (dequant_func)(const encoder_state_t * const state, coeff_t *q_coef, coeff_t *coef, int32_t width,
int32_t height, int8_t type, int8_t block_type);
// Declare function pointers. // Declare function pointers.
extern quant_func * kvz_quant; extern quant_func * kvz_quant;
extern quant_residual_func * kvz_quantize_residual; extern quant_residual_func * kvz_quantize_residual;
extern dequant_func *kvz_dequant;
int kvz_strategy_register_quant(void* opaque, uint8_t bitdepth); int kvz_strategy_register_quant(void* opaque, uint8_t bitdepth);
@ -42,6 +45,7 @@ int kvz_strategy_register_quant(void* opaque, uint8_t bitdepth);
#define STRATEGIES_QUANT_EXPORTS \ #define STRATEGIES_QUANT_EXPORTS \
{"quant", (void**) &kvz_quant}, \ {"quant", (void**) &kvz_quant}, \
{"quantize_residual", (void**) &kvz_quantize_residual}, \ {"quantize_residual", (void**) &kvz_quantize_residual}, \
{"dequant", (void**) &kvz_dequant}, \

View file

@ -129,54 +129,6 @@ void kvz_itransform2d(const encoder_control_t * const encoder, int16_t *block, i
idct_func(encoder->bitdepth, coeff, block); idct_func(encoder->bitdepth, coeff, block);
} }
/**
* \brief inverse quantize transformed and quantized coefficents
*
*/
void kvz_dequant(const encoder_state_t * const state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height,int8_t type, int8_t block_type)
{
const encoder_control_t * const encoder = state->encoder_control;
int32_t shift,add,coeff_q;
int32_t n;
int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
int32_t qp_scaled = kvz_get_scaled_qp(type, state->global->QP, (encoder->bitdepth-8)*6);
shift = 20 - QUANT_SHIFT - transform_shift;
if (encoder->scaling_list.enable)
{
uint32_t log2_tr_size = kvz_g_convert_to_bit[ width ] + 2;
int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
const int32_t *dequant_coef = encoder->scaling_list.de_quant_coeff[log2_tr_size-2][scalinglist_type][qp_scaled%6];
shift += 4;
if (shift >qp_scaled / 6) {
add = 1 << (shift - qp_scaled/6 - 1);
for (n = 0; n < width * height; n++) {
coeff_q = ((q_coef[n] * dequant_coef[n]) + add ) >> (shift - qp_scaled/6);
coef[n] = (coeff_t)CLIP(-32768,32767,coeff_q);
}
} else {
for (n = 0; n < width * height; n++) {
// Clip to avoid possible overflow in following shift left operation
coeff_q = CLIP(-32768, 32767, q_coef[n] * dequant_coef[n]);
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q << (qp_scaled/6 - shift));
}
}
} else {
int32_t scale = kvz_g_inv_quant_scales[qp_scaled%6] << (qp_scaled/6);
add = 1 << (shift-1);
for (n = 0; n < width*height; n++) {
coeff_q = (q_coef[n] * scale + add) >> shift;
coef[n] = (coeff_t)CLIP(-32768, 32767, coeff_q);
}
}
}
/** /**
* \brief Like kvz_quantize_residual except that this uses trskip if that is better. * \brief Like kvz_quantize_residual except that this uses trskip if that is better.
* *

View file

@ -33,10 +33,6 @@
extern const uint8_t kvz_g_chroma_scale[58]; extern const uint8_t kvz_g_chroma_scale[58];
extern const int16_t kvz_g_inv_quant_scales[6]; extern const int16_t kvz_g_inv_quant_scales[6];
void kvz_dequant(const encoder_state_t *state, coeff_t *q_coef, coeff_t *coef, int32_t width, int32_t height, int8_t type, int8_t block_type);
void kvz_transformskip(const encoder_control_t *encoder, int16_t *block,int16_t *coeff, int8_t block_size); void kvz_transformskip(const encoder_control_t *encoder, int16_t *block,int16_t *coeff, int8_t block_size);
void kvz_itransformskip(const encoder_control_t *encoder, int16_t *block,int16_t *coeff, int8_t block_size); void kvz_itransformskip(const encoder_control_t *encoder, int16_t *block,int16_t *coeff, int8_t block_size);