mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-11-27 19:24:06 +00:00
Use separate lambda and QP for each LCU
Adds fields lambda, lambda_sqrt and qp to encoder_state_t. Drops field cur_lambda_cost_sqrt from encoder_state_config_frame_t and renames cur_lambda_cost to lambda.
This commit is contained in:
parent
435c387357
commit
640ff94ecd
|
@ -201,7 +201,11 @@ static void encoder_state_worker_encode_lcu(void * opaque) {
|
|||
encoder_state_t *state = lcu->encoder_state;
|
||||
const encoder_control_t * const encoder = state->encoder_control;
|
||||
videoframe_t* const frame = state->tile->frame;
|
||||
|
||||
|
||||
state->lambda = state->frame->lambda;
|
||||
state->lambda_sqrt = sqrt(state->frame->lambda);
|
||||
state->qp = state->frame->QP;
|
||||
|
||||
//This part doesn't write to bitstream, it's only search, deblock and sao
|
||||
|
||||
kvz_search_lcu(state, lcu->position_px.x, lcu->position_px.y, state->tile->hor_buf_search, state->tile->ver_buf_search);
|
||||
|
@ -902,7 +906,7 @@ static void encoder_state_init_new_frame(encoder_state_t * const state, kvz_pict
|
|||
if (cfg->target_bitrate > 0) {
|
||||
// Rate control enabled.
|
||||
lambda = kvz_select_picture_lambda(state);
|
||||
state->frame->QP = kvz_lambda_to_QP(lambda);
|
||||
state->frame->QP = kvz_lambda_to_qp(lambda);
|
||||
} else {
|
||||
if (cfg->gop_len > 0 && state->frame->slicetype != KVZ_SLICE_I) {
|
||||
kvz_gop_config const * const gop =
|
||||
|
@ -914,8 +918,7 @@ static void encoder_state_init_new_frame(encoder_state_t * const state, kvz_pict
|
|||
}
|
||||
lambda = kvz_select_picture_lambda_from_qp(state);
|
||||
}
|
||||
state->frame->cur_lambda_cost = lambda;
|
||||
state->frame->cur_lambda_cost_sqrt = sqrt(lambda);
|
||||
state->frame->lambda = lambda;
|
||||
|
||||
encoder_state_init_children(state);
|
||||
}
|
||||
|
|
|
@ -51,16 +51,29 @@ typedef enum {
|
|||
|
||||
|
||||
typedef struct encoder_state_config_frame_t {
|
||||
double cur_lambda_cost; //!< \brief Lambda for SSE
|
||||
double cur_lambda_cost_sqrt; //!< \brief Lambda for SAD and SATD
|
||||
|
||||
/**
|
||||
* \brief Frame-level lambda.
|
||||
*
|
||||
* Use state->lambda or state->lambda_sqrt for cost computations.
|
||||
*
|
||||
* \see encoder_state_t::lambda
|
||||
* \see encoder_state_t::lambda_sqrt
|
||||
*/
|
||||
double lambda;
|
||||
|
||||
int32_t num; /*!< \brief Frame number */
|
||||
int32_t poc; /*!< \brief Picture order count */
|
||||
int8_t gop_offset; /*!< \brief Offset in the gop structure */
|
||||
|
||||
int8_t QP; //!< \brief Quantization parameter
|
||||
double QP_factor; //!< \brief Quantization factor
|
||||
|
||||
|
||||
/**
|
||||
* \brief Frame-level quantization parameter
|
||||
*
|
||||
* \see encoder_state_t::qp
|
||||
*/
|
||||
int8_t QP;
|
||||
//! \brief quantization factor
|
||||
double QP_factor;
|
||||
|
||||
//Current picture available references
|
||||
image_list_t *ref;
|
||||
int8_t ref_list;
|
||||
|
@ -199,7 +212,14 @@ typedef struct encoder_state_t {
|
|||
cabac_data_t cabac;
|
||||
|
||||
uint32_t stats_bitstream_length; //Bitstream length written in bytes
|
||||
|
||||
|
||||
//! \brief Lambda for SSE
|
||||
double lambda;
|
||||
//! \brief Lambda for SAD and SATD
|
||||
double lambda_sqrt;
|
||||
//! \brief Quantization parameter for the current LCU
|
||||
int8_t qp;
|
||||
|
||||
//Jobs to wait for
|
||||
threadqueue_job_t * tqj_recon_done; //Reconstruction is done
|
||||
threadqueue_job_t * tqj_bitstream_written; //Bitstream is written
|
||||
|
|
|
@ -291,7 +291,7 @@ static void filter_deblock_edge_luma(encoder_state_t * const state,
|
|||
kvz_pixel *src = orig_src;
|
||||
|
||||
int8_t strength = 0;
|
||||
int32_t qp = state->frame->QP;
|
||||
int32_t qp = state->qp;
|
||||
int32_t bitdepth_scale = 1 << (encoder->bitdepth - 8);
|
||||
int32_t b_index = CLIP(0, 51, qp + (beta_offset_div2 << 1));
|
||||
int32_t beta = kvz_g_beta_table_8x8[b_index] * bitdepth_scale;
|
||||
|
@ -490,7 +490,7 @@ static void filter_deblock_edge_chroma(encoder_state_t * const state,
|
|||
};
|
||||
int8_t strength = 2;
|
||||
|
||||
int32_t QP = kvz_g_chroma_scale[state->frame->QP];
|
||||
int32_t QP = kvz_g_chroma_scale[state->qp];
|
||||
int32_t bitdepth_scale = 1 << (encoder->bitdepth-8);
|
||||
int32_t TC_index = CLIP(0, 51+2, (int32_t)(QP + 2*(strength-1) + (tc_offset_div2 << 1)));
|
||||
int32_t Tc = kvz_g_tc_table_8x8[TC_index]*bitdepth_scale;
|
||||
|
|
|
@ -49,7 +49,7 @@ static void update_rc_parameters(encoder_state_t * state)
|
|||
// lambda computed from real bpp
|
||||
const double lambda_comp = CLIP(MIN_LAMBDA, MAX_LAMBDA, alpha_old * pow(bpp, beta_old));
|
||||
// lambda used in encoding
|
||||
const double lambda_real = state->frame->cur_lambda_cost;
|
||||
const double lambda_real = state->frame->lambda;
|
||||
const double lambda_log_ratio = log(lambda_real) - log(lambda_comp);
|
||||
|
||||
const double alpha = alpha_old + 0.1 * lambda_log_ratio * alpha_old;
|
||||
|
@ -148,7 +148,7 @@ double kvz_select_picture_lambda(encoder_state_t * const state)
|
|||
return CLIP(MIN_LAMBDA, MAX_LAMBDA, lambda);
|
||||
}
|
||||
|
||||
int8_t kvz_lambda_to_QP(const double lambda)
|
||||
int8_t kvz_lambda_to_qp(const double lambda)
|
||||
{
|
||||
const int8_t qp = 4.2005 * log(lambda) + 13.7223 + 0.5;
|
||||
return CLIP(0, 51, qp);
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
|
||||
double kvz_select_picture_lambda(encoder_state_t * const state);
|
||||
|
||||
int8_t kvz_lambda_to_QP(const double lambda);
|
||||
int8_t kvz_lambda_to_qp(const double lambda);
|
||||
|
||||
double kvz_select_picture_lambda_from_qp(encoder_state_t const * const state);
|
||||
|
||||
|
|
30
src/rdo.c
30
src/rdo.c
|
@ -257,7 +257,7 @@ uint32_t kvz_get_coded_level ( encoder_state_t * const state, double *coded_cost
|
|||
cabac_ctx_t* base_sig_model = type?(cabac->ctx.cu_sig_model_chroma):(cabac->ctx.cu_sig_model_luma);
|
||||
|
||||
if( !last && max_abs_level < 3 ) {
|
||||
*coded_cost_sig = state->frame->cur_lambda_cost * CTX_ENTROPY_BITS(&base_sig_model[ctx_num_sig], 0);
|
||||
*coded_cost_sig = state->lambda * CTX_ENTROPY_BITS(&base_sig_model[ctx_num_sig], 0);
|
||||
*coded_cost = *coded_cost0 + *coded_cost_sig;
|
||||
if (max_abs_level == 0) return best_abs_level;
|
||||
} else {
|
||||
|
@ -265,13 +265,13 @@ uint32_t kvz_get_coded_level ( encoder_state_t * const state, double *coded_cost
|
|||
}
|
||||
|
||||
if( !last ) {
|
||||
cur_cost_sig = state->frame->cur_lambda_cost * CTX_ENTROPY_BITS(&base_sig_model[ctx_num_sig], 1);
|
||||
cur_cost_sig = state->lambda * CTX_ENTROPY_BITS(&base_sig_model[ctx_num_sig], 1);
|
||||
}
|
||||
|
||||
min_abs_level = ( max_abs_level > 1 ? max_abs_level - 1 : 1 );
|
||||
for (abs_level = max_abs_level; abs_level >= min_abs_level ; abs_level-- ) {
|
||||
double err = (double)(level_double - ( abs_level << q_bits ) );
|
||||
double cur_cost = err * err * temp + state->frame->cur_lambda_cost *
|
||||
double cur_cost = err * err * temp + state->lambda *
|
||||
kvz_get_ic_rate( state, abs_level, ctx_num_one, ctx_num_abs,
|
||||
abs_go_rice, c1_idx, c2_idx, type);
|
||||
cur_cost += cur_cost_sig;
|
||||
|
@ -308,7 +308,7 @@ static double get_rate_last(const encoder_state_t * const state,
|
|||
if( ctx_y > 3 ) {
|
||||
uiCost += 32768.0 * ((ctx_y-2)>>1);
|
||||
}
|
||||
return state->frame->cur_lambda_cost*uiCost;
|
||||
return state->lambda * uiCost;
|
||||
}
|
||||
|
||||
static void calc_last_bits(encoder_state_t * const state, int32_t width, int32_t height, int8_t type,
|
||||
|
@ -358,7 +358,7 @@ void kvz_rdoq_sign_hiding(const encoder_state_t *const state,
|
|||
|
||||
int64_t rd_factor = (int64_t)(
|
||||
kvz_g_inv_quant_scales[qp_scaled % 6] * kvz_g_inv_quant_scales[qp_scaled % 6] * (1 << (2 * (qp_scaled / 6)))
|
||||
/ state->frame->cur_lambda_cost / 16 / (1 << (2 * (encoder->bitdepth - 8)))
|
||||
/ state->lambda / 16 / (1 << (2 * (encoder->bitdepth - 8)))
|
||||
+ 0.5);
|
||||
int32_t lastCG = -1;
|
||||
int32_t absSum = 0;
|
||||
|
@ -467,7 +467,7 @@ void kvz_rdoq(encoder_state_t * const state, coeff_t *coef, coeff_t *dest_coeff,
|
|||
uint32_t max_num_coeff = width * height;
|
||||
int32_t scalinglist_type= (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->frame->QP, (encoder->bitdepth - 8) * 6);
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth - 8) * 6);
|
||||
|
||||
int32_t q_bits = QUANT_SHIFT + qp_scaled/6 + transform_shift;
|
||||
|
||||
|
@ -669,7 +669,7 @@ void kvz_rdoq(encoder_state_t * const state, coeff_t *coef, coeff_t *dest_coeff,
|
|||
if (sig_coeffgroup_flag[cg_blkpos] == 0) {
|
||||
uint32_t ctx_sig = kvz_context_get_sig_coeff_group(sig_coeffgroup_flag, cg_pos_x,
|
||||
cg_pos_y, width);
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig],0);
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->lambda *CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig],0);
|
||||
base_cost += cost_coeffgroup_sig[cg_scanpos] - rd_stats.sig_cost;
|
||||
} else {
|
||||
if (cg_scanpos < cg_last_scanpos){
|
||||
|
@ -686,9 +686,9 @@ void kvz_rdoq(encoder_state_t * const state, coeff_t *coef, coeff_t *dest_coeff,
|
|||
ctx_sig = kvz_context_get_sig_coeff_group(sig_coeffgroup_flag, cg_pos_x,
|
||||
cg_pos_y, width);
|
||||
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 1);
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->lambda * CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 1);
|
||||
base_cost += cost_coeffgroup_sig[cg_scanpos];
|
||||
cost_zero_cg += state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 0);
|
||||
cost_zero_cg += state->lambda * CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 0);
|
||||
|
||||
// try to convert the current coeff group from non-zero to all-zero
|
||||
cost_zero_cg += rd_stats.uncoded_dist; // distortion for resetting non-zero levels to zero levels
|
||||
|
@ -701,7 +701,7 @@ void kvz_rdoq(encoder_state_t * const state, coeff_t *coef, coeff_t *dest_coeff,
|
|||
sig_coeffgroup_flag[cg_blkpos] = 0;
|
||||
base_cost = cost_zero_cg;
|
||||
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 0);
|
||||
cost_coeffgroup_sig[cg_scanpos] = state->lambda * CTX_ENTROPY_BITS(&base_coeff_group_ctx[ctx_sig], 0);
|
||||
|
||||
// reset coeffs to 0 in this block
|
||||
for (int32_t scanpos_in_cg = cg_size - 1; scanpos_in_cg >= 0; scanpos_in_cg--) {
|
||||
|
@ -728,13 +728,13 @@ void kvz_rdoq(encoder_state_t * const state, coeff_t *coef, coeff_t *dest_coeff,
|
|||
int32_t best_last_idx_p1 = 0;
|
||||
|
||||
if( block_type != CU_INTRA && !type/* && pcCU->getTransformIdx( uiAbsPartIdx ) == 0*/ ) {
|
||||
best_cost = block_uncoded_cost + state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&(cabac->ctx.cu_qt_root_cbf_model),0);
|
||||
base_cost += state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&(cabac->ctx.cu_qt_root_cbf_model),1);
|
||||
best_cost = block_uncoded_cost + state->lambda * CTX_ENTROPY_BITS(&(cabac->ctx.cu_qt_root_cbf_model),0);
|
||||
base_cost += state->lambda * CTX_ENTROPY_BITS(&(cabac->ctx.cu_qt_root_cbf_model),1);
|
||||
} else {
|
||||
cabac_ctx_t* base_cbf_model = type?(cabac->ctx.qt_cbf_model_chroma):(cabac->ctx.qt_cbf_model_luma);
|
||||
ctx_cbf = ( type ? tr_depth : !tr_depth);
|
||||
best_cost = block_uncoded_cost + state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_cbf_model[ctx_cbf],0);
|
||||
base_cost += state->frame->cur_lambda_cost*CTX_ENTROPY_BITS(&base_cbf_model[ctx_cbf],1);
|
||||
best_cost = block_uncoded_cost + state->lambda * CTX_ENTROPY_BITS(&base_cbf_model[ctx_cbf],0);
|
||||
base_cost += state->lambda * CTX_ENTROPY_BITS(&base_cbf_model[ctx_cbf],1);
|
||||
}
|
||||
|
||||
for ( int32_t cg_scanpos = cg_last_scanpos; cg_scanpos >= 0; cg_scanpos--) {
|
||||
|
@ -1006,5 +1006,5 @@ int kvz_calc_mvd_cost_cabac(encoder_state_t * const state, int x, int y, int mv_
|
|||
*bitcost = (23 - state_cabac_copy.bits_left) + (state_cabac_copy.num_buffered_bytes << 3);
|
||||
|
||||
// Store bitcost before restoring cabac
|
||||
return *bitcost * (int32_t)(state->frame->cur_lambda_cost_sqrt + 0.5);
|
||||
return *bitcost * (int32_t)(state->lambda_sqrt + 0.5);
|
||||
}
|
||||
|
|
12
src/sao.c
12
src/sao.c
|
@ -501,7 +501,7 @@ static void sao_search_edge_sao(const encoder_state_t * const state,
|
|||
|
||||
{
|
||||
float mode_bits = sao_mode_bits_edge(state, edge_class, edge_offset, sao_top, sao_left, buf_cnt);
|
||||
sum_ddistortion += (int)((double)mode_bits*state->frame->cur_lambda_cost+0.5);
|
||||
sum_ddistortion += (int)((double)mode_bits*state->lambda +0.5);
|
||||
}
|
||||
// SAO is not applied for category 0.
|
||||
edge_offset[SAO_EO_CAT0] = 0;
|
||||
|
@ -545,7 +545,7 @@ static void sao_search_band_sao(const encoder_state_t * const state, const kvz_p
|
|||
}
|
||||
|
||||
temp_rate = sao_mode_bits_band(state, sao_out->band_position, temp_offsets, sao_top, sao_left, buf_cnt);
|
||||
ddistortion += (int)((double)temp_rate*state->frame->cur_lambda_cost + 0.5);
|
||||
ddistortion += (int)((double)temp_rate*state->lambda + 0.5);
|
||||
|
||||
// Select band sao over edge sao when distortion is lower
|
||||
if (ddistortion < sao_out->ddistortion) {
|
||||
|
@ -589,7 +589,7 @@ static void sao_search_best_mode(const encoder_state_t * const state, const kvz_
|
|||
|
||||
{
|
||||
float mode_bits = sao_mode_bits_edge(state, edge_sao.eo_class, edge_sao.offsets, sao_top, sao_left, buf_cnt);
|
||||
int ddistortion = (int)(mode_bits * state->frame->cur_lambda_cost + 0.5);
|
||||
int ddistortion = (int)(mode_bits * state->lambda + 0.5);
|
||||
unsigned buf_i;
|
||||
|
||||
for (buf_i = 0; buf_i < buf_cnt; ++buf_i) {
|
||||
|
@ -603,7 +603,7 @@ static void sao_search_best_mode(const encoder_state_t * const state, const kvz_
|
|||
|
||||
{
|
||||
float mode_bits = sao_mode_bits_band(state, band_sao.band_position, band_sao.offsets, sao_top, sao_left, buf_cnt);
|
||||
int ddistortion = (int)(mode_bits * state->frame->cur_lambda_cost + 0.5);
|
||||
int ddistortion = (int)(mode_bits * state->lambda + 0.5);
|
||||
unsigned buf_i;
|
||||
|
||||
for (buf_i = 0; buf_i < buf_cnt; ++buf_i) {
|
||||
|
@ -626,7 +626,7 @@ static void sao_search_best_mode(const encoder_state_t * const state, const kvz_
|
|||
// Choose between SAO and doing nothing, taking into account the
|
||||
// rate-distortion cost of coding do nothing.
|
||||
{
|
||||
int cost_of_nothing = (int)(sao_mode_bits_none(state, sao_top, sao_left) * state->frame->cur_lambda_cost + 0.5);
|
||||
int cost_of_nothing = (int)(sao_mode_bits_none(state, sao_top, sao_left) * state->lambda + 0.5);
|
||||
if (sao_out->ddistortion >= cost_of_nothing) {
|
||||
sao_out->type = SAO_TYPE_NONE;
|
||||
merge_cost[0] = cost_of_nothing;
|
||||
|
@ -643,7 +643,7 @@ static void sao_search_best_mode(const encoder_state_t * const state, const kvz_
|
|||
if (merge_cand) {
|
||||
unsigned buf_i;
|
||||
float mode_bits = sao_mode_bits_merge(state, i + 1);
|
||||
int ddistortion = (int)(mode_bits * state->frame->cur_lambda_cost + 0.5);
|
||||
int ddistortion = (int)(mode_bits * state->lambda + 0.5);
|
||||
|
||||
switch (merge_cand->type) {
|
||||
case SAO_TYPE_EDGE:
|
||||
|
|
22
src/search.c
22
src/search.c
|
@ -321,7 +321,7 @@ double kvz_cu_rd_cost_luma(const encoder_state_t *const state,
|
|||
sum += kvz_cu_rd_cost_luma(state, x_px, y_px + offset, depth + 1, pred_cu, lcu);
|
||||
sum += kvz_cu_rd_cost_luma(state, x_px + offset, y_px + offset, depth + 1, pred_cu, lcu);
|
||||
|
||||
return sum + tr_tree_bits * state->frame->cur_lambda_cost;
|
||||
return sum + tr_tree_bits * state->lambda;
|
||||
}
|
||||
|
||||
// Add transform_tree cbf_luma bit cost.
|
||||
|
@ -353,7 +353,7 @@ double kvz_cu_rd_cost_luma(const encoder_state_t *const state,
|
|||
}
|
||||
|
||||
double bits = tr_tree_bits + coeff_bits;
|
||||
return (double)ssd * LUMA_MULT + bits * state->frame->cur_lambda_cost;
|
||||
return (double)ssd * LUMA_MULT + bits * state->lambda;
|
||||
}
|
||||
|
||||
|
||||
|
@ -398,7 +398,7 @@ double kvz_cu_rd_cost_chroma(const encoder_state_t *const state,
|
|||
sum += kvz_cu_rd_cost_chroma(state, x_px, y_px + offset, depth + 1, pred_cu, lcu);
|
||||
sum += kvz_cu_rd_cost_chroma(state, x_px + offset, y_px + offset, depth + 1, pred_cu, lcu);
|
||||
|
||||
return sum + tr_tree_bits * state->frame->cur_lambda_cost;
|
||||
return sum + tr_tree_bits * state->lambda;
|
||||
}
|
||||
|
||||
// Chroma SSD
|
||||
|
@ -428,7 +428,7 @@ double kvz_cu_rd_cost_chroma(const encoder_state_t *const state,
|
|||
}
|
||||
|
||||
double bits = tr_tree_bits + coeff_bits;
|
||||
return (double)ssd * CHROMA_MULT + bits * state->frame->cur_lambda_cost;
|
||||
return (double)ssd * CHROMA_MULT + bits * state->lambda;
|
||||
}
|
||||
|
||||
|
||||
|
@ -682,7 +682,7 @@ static double search_cu(encoder_state_t * const state, int x, int y, int depth,
|
|||
mode_bits = inter_bitcost;
|
||||
}
|
||||
|
||||
cost += mode_bits * state->frame->cur_lambda_cost;
|
||||
cost += mode_bits * state->lambda;
|
||||
}
|
||||
|
||||
// Recursively split all the way to max search depth.
|
||||
|
@ -695,15 +695,15 @@ static double search_cu(encoder_state_t * const state, int x, int y, int depth,
|
|||
// Add cost of cu_split_flag.
|
||||
uint8_t split_model = get_ctx_cu_split_model(lcu, x, y, depth);
|
||||
const cabac_ctx_t *ctx = &(state->cabac.ctx.split_flag_model[split_model]);
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 0) * state->frame->cur_lambda_cost;
|
||||
split_cost += CTX_ENTROPY_FBITS(ctx, 1) * state->frame->cur_lambda_cost;
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 0) * state->lambda;
|
||||
split_cost += CTX_ENTROPY_FBITS(ctx, 1) * state->lambda;
|
||||
}
|
||||
|
||||
if (cur_cu->type == CU_INTRA && depth == MAX_DEPTH) {
|
||||
// Add cost of intra part_size.
|
||||
const cabac_ctx_t *ctx = &(state->cabac.ctx.part_size_model[0]);
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 1) * state->frame->cur_lambda_cost; // 2Nx2N
|
||||
split_cost += CTX_ENTROPY_FBITS(ctx, 0) * state->frame->cur_lambda_cost; // NxN
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 1) * state->lambda; // 2Nx2N
|
||||
split_cost += CTX_ENTROPY_FBITS(ctx, 0) * state->lambda; // NxN
|
||||
}
|
||||
|
||||
// If skip mode was selected for the block, skip further search.
|
||||
|
@ -750,11 +750,11 @@ static double search_cu(encoder_state_t * const state, int x, int y, int depth,
|
|||
// Add the cost of coding no-split.
|
||||
uint8_t split_model = get_ctx_cu_split_model(lcu, x, y, depth);
|
||||
const cabac_ctx_t *ctx = &(state->cabac.ctx.split_flag_model[split_model]);
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 0) * state->frame->cur_lambda_cost;
|
||||
cost += CTX_ENTROPY_FBITS(ctx, 0) * state->lambda;
|
||||
|
||||
// Add the cost of coding intra mode only once.
|
||||
double mode_bits = calc_mode_bits(state, &work_tree[depth], cur_cu, x, y);
|
||||
cost += mode_bits * state->frame->cur_lambda_cost;
|
||||
cost += mode_bits * state->lambda;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -253,7 +253,7 @@ static int calc_mvd_cost(encoder_state_t * const state, int x, int y, int mv_shi
|
|||
temp_bitcost += cur_mv_cand ? cand2_cost : cand1_cost;
|
||||
}
|
||||
*bitcost = temp_bitcost;
|
||||
return temp_bitcost*(int32_t)(state->frame->cur_lambda_cost_sqrt+0.5);
|
||||
return temp_bitcost*(int32_t)(state->lambda_sqrt + 0.5);
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -117,7 +117,7 @@ static double get_cost(encoder_state_t * const state,
|
|||
trskip_bits += 2.0 * (CTX_ENTROPY_FBITS(ctx, 1) - CTX_ENTROPY_FBITS(ctx, 0));
|
||||
}
|
||||
|
||||
double sad_cost = TRSKIP_RATIO * sad_func(pred, orig_block) + state->frame->cur_lambda_cost_sqrt * trskip_bits;
|
||||
double sad_cost = TRSKIP_RATIO * sad_func(pred, orig_block) + state->lambda_sqrt * trskip_bits;
|
||||
if (sad_cost < satd_cost) {
|
||||
return sad_cost;
|
||||
}
|
||||
|
@ -164,7 +164,7 @@ static void get_cost_dual(encoder_state_t * const state,
|
|||
double sad_costs[PARALLEL_BLKS] = { 0 };
|
||||
sad_twin_func(preds, orig_block, PARALLEL_BLKS, unsigned_sad_costs);
|
||||
for (int i = 0; i < PARALLEL_BLKS; ++i) {
|
||||
sad_costs[i] = TRSKIP_RATIO * (double)unsigned_sad_costs[i] + state->frame->cur_lambda_cost_sqrt * trskip_bits;
|
||||
sad_costs[i] = TRSKIP_RATIO * (double)unsigned_sad_costs[i] + state->lambda_sqrt * trskip_bits;
|
||||
if (sad_costs[i] < (double)satd_costs[i]) {
|
||||
costs_out[i] = sad_costs[i];
|
||||
}
|
||||
|
@ -254,7 +254,7 @@ static double search_intra_trdepth(encoder_state_t * const state,
|
|||
// max_depth.
|
||||
// - Min transform size hasn't been reached (MAX_PU_DEPTH).
|
||||
if (depth < max_depth && depth < MAX_PU_DEPTH) {
|
||||
split_cost = 3 * state->frame->cur_lambda_cost;
|
||||
split_cost = 3 * state->lambda;
|
||||
|
||||
split_cost += search_intra_trdepth(state, x_px, y_px, depth + 1, max_depth, intra_mode, nosplit_cost, pred_cu, lcu);
|
||||
if (split_cost < nosplit_cost) {
|
||||
|
@ -296,7 +296,7 @@ static double search_intra_trdepth(encoder_state_t * const state,
|
|||
}
|
||||
|
||||
double bits = tr_split_bit + cbf_bits;
|
||||
split_cost += bits * state->frame->cur_lambda_cost;
|
||||
split_cost += bits * state->lambda;
|
||||
} else {
|
||||
assert(width <= TR_MAX_WIDTH);
|
||||
}
|
||||
|
@ -529,7 +529,7 @@ static int8_t search_intra_rough(encoder_state_t * const state,
|
|||
|
||||
// Add prediction mode coding cost as the last thing. We don't want this
|
||||
// affecting the halving search.
|
||||
int lambda_cost = (int)(state->frame->cur_lambda_cost_sqrt + 0.5);
|
||||
int lambda_cost = (int)(state->lambda_sqrt + 0.5);
|
||||
for (int mode_i = 0; mode_i < modes_selected; ++mode_i) {
|
||||
costs[mode_i] += lambda_cost * kvz_luma_mode_bits(state, modes[mode_i], intra_preds);
|
||||
}
|
||||
|
@ -600,7 +600,7 @@ static int8_t search_intra_rdo(encoder_state_t * const state,
|
|||
|
||||
for(int rdo_mode = 0; rdo_mode < modes_to_check; rdo_mode ++) {
|
||||
int rdo_bitcost = kvz_luma_mode_bits(state, modes[rdo_mode], intra_preds);
|
||||
costs[rdo_mode] = rdo_bitcost * (int)(state->frame->cur_lambda_cost + 0.5);
|
||||
costs[rdo_mode] = rdo_bitcost * (int)(state->lambda + 0.5);
|
||||
|
||||
// Perform transform split search and save mode RD cost for the best one.
|
||||
cu_info_t pred_cu;
|
||||
|
@ -701,7 +701,7 @@ int8_t kvz_search_intra_chroma_rdo(encoder_state_t * const state,
|
|||
chroma.cost = kvz_cu_rd_cost_chroma(state, lcu_px.x, lcu_px.y, depth, tr_cu, lcu);
|
||||
|
||||
double mode_bits = kvz_chroma_mode_bits(state, chroma.mode, intra_mode);
|
||||
chroma.cost += mode_bits * state->frame->cur_lambda_cost;
|
||||
chroma.cost += mode_bits * state->lambda;
|
||||
|
||||
if (chroma.cost < best_chroma.cost) {
|
||||
best_chroma = chroma;
|
||||
|
|
|
@ -52,7 +52,7 @@ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t *coef, coe
|
|||
const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1];
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->frame->QP, (encoder->bitdepth - 8) * 6);
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth - 8) * 6);
|
||||
const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6];
|
||||
|
@ -457,7 +457,7 @@ void kvz_dequant_avx2(const encoder_state_t * const state, coeff_t *q_coef, coef
|
|||
int32_t n;
|
||||
int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->frame->QP, (encoder->bitdepth-8)*6);
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth-8)*6);
|
||||
|
||||
shift = 20 - QUANT_SHIFT - transform_shift;
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ void kvz_quant_generic(const encoder_state_t * const state, coeff_t *coef, coeff
|
|||
const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const uint32_t * const scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1];
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->frame->QP, (encoder->bitdepth - 8) * 6);
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth - 8) * 6);
|
||||
const uint32_t log2_tr_size = kvz_g_convert_to_bit[width] + 2;
|
||||
const int32_t scalinglist_type = (block_type == CU_INTRA ? 0 : 3) + (int8_t)("\0\3\1\2"[type]);
|
||||
const int32_t *quant_coeff = encoder->scaling_list.quant_coeff[log2_tr_size - 2][scalinglist_type][qp_scaled % 6];
|
||||
|
@ -286,7 +286,7 @@ void kvz_dequant_generic(const encoder_state_t * const state, coeff_t *q_coef, c
|
|||
int32_t n;
|
||||
int32_t transform_shift = 15 - encoder->bitdepth - (kvz_g_convert_to_bit[ width ] + 2);
|
||||
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->frame->QP, (encoder->bitdepth-8)*6);
|
||||
int32_t qp_scaled = kvz_get_scaled_qp(type, state->qp, (encoder->bitdepth-8)*6);
|
||||
|
||||
shift = 20 - QUANT_SHIFT - transform_shift;
|
||||
|
||||
|
|
|
@ -232,7 +232,7 @@ int kvz_quantize_residual_trskip(
|
|||
int has_coeffs;
|
||||
} skip, noskip, *best;
|
||||
|
||||
const int bit_cost = (int)(state->frame->cur_lambda_cost+0.5);
|
||||
const int bit_cost = (int)(state->lambda + 0.5);
|
||||
|
||||
noskip.has_coeffs = kvz_quantize_residual(
|
||||
state, cur_cu, width, color, scan_order,
|
||||
|
|
Loading…
Reference in a new issue