From 480a907c07768159e9b3885a43920954a0c83769 Mon Sep 17 00:00:00 2001 From: Joose Sainio Date: Mon, 13 Jun 2022 10:00:46 +0300 Subject: [PATCH] [lfnst] change kvz_ to uvg_ --- src/encode_coding_tree.c | 12 +++++------ src/lfnst_tables.h | 8 ++++---- src/search_intra.c | 8 ++++---- src/strategies/avx2/quant-avx2.c | 4 ++-- src/strategies/generic/quant-generic.c | 4 ++-- src/transform.c | 28 +++++++++++++------------- src/transform.h | 4 ++-- 7 files changed, 34 insertions(+), 34 deletions(-) diff --git a/src/encode_coding_tree.c b/src/encode_coding_tree.c index a5d5262a..c5eee014 100644 --- a/src/encode_coding_tree.c +++ b/src/encode_coding_tree.c @@ -130,11 +130,11 @@ static int get_isp_split_dim(const int width, const int height, const int isp_sp non_split_dim_size = height; } - const int min_num_samples_cu = 1 << ((kvz_math_floor_log2(MIN_TB_SIZE_Y) << 1)); - const int factor_to_min_samples = non_split_dim_size < min_num_samples_cu ? min_num_samples_cu >> kvz_math_floor_log2(non_split_dim_size) : 1; + const int min_num_samples_cu = 1 << ((uvg_math_floor_log2(MIN_TB_SIZE_Y) << 1)); + const int factor_to_min_samples = non_split_dim_size < min_num_samples_cu ? min_num_samples_cu >> uvg_math_floor_log2(non_split_dim_size) : 1; partition_size = (split_dim_size >> div_shift) < factor_to_min_samples ? factor_to_min_samples : (split_dim_size >> div_shift); - assert(!(kvz_math_floor_log2(partition_size) + kvz_math_floor_log2(non_split_dim_size) < kvz_math_floor_log2(min_num_samples_cu)) && "Partition has less than minimum amount of samples."); + assert(!(uvg_math_floor_log2(partition_size) + uvg_math_floor_log2(non_split_dim_size) < uvg_math_floor_log2(min_num_samples_cu)) && "Partition has less than minimum amount of samples."); return partition_size; } @@ -217,7 +217,7 @@ static bool encode_lfnst_idx(encoder_state_t * const state, cabac_data_t * const // TODO: this works only for square blocks const int pu_x = x + ((i % tu_row_length) * tu_width); const int pu_y = y + ((i / tu_row_length) * tu_height); - const cu_info_t* cur_tu = kvz_cu_array_at_const(frame->cu_array, pu_x, pu_y); + const cu_info_t* cur_tu = uvg_cu_array_at_const(frame->cu_array, pu_x, pu_y); assert(cur_tu != NULL && "NULL transform unit."); bool cbf_set = cbf_is_set(cur_tu->cbf, tr_depth, COLOR_Y); @@ -1207,7 +1207,7 @@ void uvg_encode_intra_luma_coding_unit(const encoder_state_t * const state, } } - kvz_cabac_encode_trunc_bin(cabac, tmp_pred, 67 - INTRA_MPM_COUNT, bits_out); + uvg_cabac_encode_trunc_bin(cabac, tmp_pred, 67 - INTRA_MPM_COUNT, bits_out); } if (cabac->only_count && bits_out) *bits_out += bits; } @@ -1658,7 +1658,7 @@ void uvg_encode_coding_tree(encoder_state_t * const state, if (state->encoder_control->chroma_format != UVG_CSP_400 && depth == 4 && x % 8 && y % 8) { encode_chroma_intra_cu(cabac, cur_cu, state->encoder_control->cfg.cclm); // LFNST constraints must be reset here. Otherwise the left over values will interfere when calculating new constraints - cu_info_t* tmp = kvz_cu_array_at(frame->cu_array, x, y); + cu_info_t* tmp = uvg_cu_array_at(frame->cu_array, x, y); tmp->violates_lfnst_constrained[0] = false; tmp->violates_lfnst_constrained[1] = false; tmp->lfnst_last_scan_pos = false; diff --git a/src/lfnst_tables.h b/src/lfnst_tables.h index e9ba505c..0adf8b7f 100644 --- a/src/lfnst_tables.h +++ b/src/lfnst_tables.h @@ -48,12 +48,12 @@ #define NUM_EXT_LUMA_MODE 28 #define DIA_IDX 34 -const uint8_t kvz_lfnst_lut[NUM_INTRA_MODE + NUM_EXT_LUMA_MODE - 1] = +const uint8_t uvg_lfnst_lut[NUM_INTRA_MODE + NUM_EXT_LUMA_MODE - 1] = {//0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1 }; -const int8_t kvz_lfnst_8x8[4][2][16][48] = { +const int8_t uvg_lfnst_8x8[4][2][16][48] = { { //0 { { -117, 28, 18, 2, 4, 1, 2, 1, 32, -18, -2, 0, -1, 0, 0, 0, 14, -1, -3, 0, -1, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 3, 0, -1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0 }, @@ -208,7 +208,7 @@ const int8_t kvz_lfnst_8x8[4][2][16][48] = { } }; -const int8_t kvz_lfnst_4x4[4][2][16][16] = { +const int8_t uvg_lfnst_4x4[4][2][16][16] = { { //0 { { 108, -44, -15, 1, -44, 19, 7, -1, -11, 6, 2, -1, 0, -1, -1, 0 }, @@ -363,7 +363,7 @@ const int8_t kvz_lfnst_4x4[4][2][16][16] = { } }; -const uint32_t kvz_coef_top_left_diag_scan_8x8[8][64] = { +const uint32_t uvg_coef_top_left_diag_scan_8x8[8][64] = { {0, 1, 1, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 6, 4, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 10, 4, 5, 5, 6, 6, 6, 7, 7, 7, 7, 8, 8, 8, 9, 9, 10, 8, 9, 9, 10, 10, 10, 11, 11, 11, 11, 12, 12, 12, 13, 13, 14}, {0, 2, 1, 4, 3, 2, 6, 5, 4, 3, 7, 6, 5, 8, 7, 9, 8, 10, 9, 12, 11, 10, 14, 13, 12, 11, 15, 14, 13, 16, 15, 17, 4, 6, 5, 8, 7, 6, 10, 9, 8, 7, 11, 10, 9, 12, 11, 13, 12, 14, 13, 16, 15, 14, 18, 17, 16, 15, 19, 18, 17, 20, 19, 21}, {0, 4, 1, 8, 5, 2, 12, 9, 6, 3, 13, 10, 7, 14, 11, 15, 16, 20, 17, 24, 21, 18, 28, 25, 22, 19, 29, 26, 23, 30, 27, 31, 4, 8, 5, 12, 9, 6, 16, 13, 10, 7, 17, 14, 11, 18, 15, 19, 20, 24, 21, 28, 25, 22, 32, 29, 26, 23, 33, 30, 27, 34, 31, 35}, diff --git a/src/search_intra.c b/src/search_intra.c index a3295653..7760a2f1 100644 --- a/src/search_intra.c +++ b/src/search_intra.c @@ -259,11 +259,11 @@ static void derive_lfnst_constraints(cu_info_t* const pred_cu, const int lcu_x = (lcu_px.x >> shift) & ~3; const int lcu_y = (lcu_px.y >> shift) & ~3; - int8_t scan_idx = kvz_get_scan_order(pred_cu->type, pred_cu->intra.mode, depth); + int8_t scan_idx = uvg_get_scan_order(pred_cu->type, pred_cu->intra.mode, depth); // ToDo: large block support in VVC? - const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; - const uint32_t* scan = kvz_g_sig_last_scan[scan_idx][log2_block_size - 1]; + const uint32_t log2_block_size = uvg_g_convert_to_bit[width] + 2; + const uint32_t* scan = uvg_g_sig_last_scan[scan_idx][log2_block_size - 1]; const coeff_t* coeff; switch (color) { case COLOR_Y: @@ -462,7 +462,7 @@ static double search_intra_trdepth( depth, search_data, pred_cu, lcu); - best_rd_cost += kvz_cu_rd_cost_chroma(state, lcu_px.x, lcu_px.y, depth, pred_cu, lcu); + best_rd_cost += uvg_cu_rd_cost_chroma(state, lcu_px.x, lcu_px.y, depth, pred_cu, lcu); pred_cu->intra.mode = luma_mode; // Check lfnst constraints for chroma diff --git a/src/strategies/avx2/quant-avx2.c b/src/strategies/avx2/quant-avx2.c index c37e3bc1..e4002583 100644 --- a/src/strategies/avx2/quant-avx2.c +++ b/src/strategies/avx2/quant-avx2.c @@ -707,7 +707,7 @@ int uvg_quantize_residual_avx2(encoder_state_t *const state, if (state->encoder_control->cfg.lfnst && cur_cu->type == CU_INTRA) { // Forward low frequency non-separable transform - kvz_fwd_lfnst(cur_cu, width, height, color, lfnst_index, coeff); + uvg_fwd_lfnst(cur_cu, width, height, color, lfnst_index, coeff); } // Quantize coeffs. (coeff -> coeff_out) @@ -745,7 +745,7 @@ int uvg_quantize_residual_avx2(encoder_state_t *const state, if (state->encoder_control->cfg.lfnst && cur_cu->type == CU_INTRA) { // Inverse low frequency non-separable transform - kvz_inv_lfnst(cur_cu, width, height, color, lfnst_index, coeff); + uvg_inv_lfnst(cur_cu, width, height, color, lfnst_index, coeff); } if (use_trskip) { uvg_itransformskip(state->encoder_control, residual, coeff, width); diff --git a/src/strategies/generic/quant-generic.c b/src/strategies/generic/quant-generic.c index 3517b5b0..9ccb1d16 100644 --- a/src/strategies/generic/quant-generic.c +++ b/src/strategies/generic/quant-generic.c @@ -463,7 +463,7 @@ int uvg_quantize_residual_generic(encoder_state_t *const state, if (state->encoder_control->cfg.lfnst && cur_cu->type == CU_INTRA) { // Forward low frequency non-separable transform - kvz_fwd_lfnst(cur_cu, width, height, color, lfnst_index, coeff); + uvg_fwd_lfnst(cur_cu, width, height, color, lfnst_index, coeff); } @@ -507,7 +507,7 @@ int uvg_quantize_residual_generic(encoder_state_t *const state, if (state->encoder_control->cfg.lfnst && cur_cu->type == CU_INTRA) { // Inverse low frequency non-separable transform - kvz_inv_lfnst(cur_cu, width, height, color, lfnst_index, coeff); + uvg_inv_lfnst(cur_cu, width, height, color, lfnst_index, coeff); } if (use_trskip) { uvg_itransformskip(state->encoder_control, residual, coeff, width); diff --git a/src/transform.c b/src/transform.c index cb3b85ba..50e5a9c5 100644 --- a/src/transform.c +++ b/src/transform.c @@ -232,9 +232,9 @@ void uvg_itransform2d(const encoder_control_t * const encoder, } } -void kvz_fwd_lfnst_NxN(coeff_t *src, coeff_t *dst, const int8_t mode, const int8_t index, const int8_t size, int zero_out_size) +void uvg_fwd_lfnst_NxN(coeff_t *src, coeff_t *dst, const int8_t mode, const int8_t index, const int8_t size, int zero_out_size) { - const int8_t *tr_mat = (size > 4) ? kvz_lfnst_8x8[mode][index][0] : kvz_lfnst_4x4[mode][index][0]; + const int8_t *tr_mat = (size > 4) ? uvg_lfnst_8x8[mode][index][0] : uvg_lfnst_4x4[mode][index][0]; const int tr_size = (size > 4) ? 48 : 16; int coef; coeff_t *out = dst; @@ -277,7 +277,7 @@ static inline bool get_transpose_flag(const int8_t intra_mode) ((intra_mode < NUM_LUMA_MODE) && (intra_mode > DIA_IDX)); } -void kvz_fwd_lfnst(const cu_info_t* const cur_cu, +void uvg_fwd_lfnst(const cu_info_t* const cur_cu, const int width, const int height, const uint8_t color, const uint16_t lfnst_idx, @@ -295,14 +295,14 @@ void kvz_fwd_lfnst(const cu_info_t* const cur_cu, const int cu_type = cur_cu->type; - const int scan_order = kvz_get_scan_order(cu_type, intra_mode, depth); + const int scan_order = uvg_get_scan_order(cu_type, intra_mode, depth); if (lfnst_index && !mts_skip && (is_separate_tree || color == COLOR_Y)) { - const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; + const uint32_t log2_block_size = uvg_g_convert_to_bit[width] + 2; assert(log2_block_size != -1 && "LFNST: invalid block width."); const bool whge3 = width >= 8 && height >= 8; - const uint32_t* scan = whge3 ? kvz_coef_top_left_diag_scan_8x8[log2_block_size] : kvz_g_sig_last_scan[scan_order][log2_block_size - 1]; + const uint32_t* scan = whge3 ? uvg_coef_top_left_diag_scan_8x8[log2_block_size] : uvg_g_sig_last_scan[scan_order][log2_block_size - 1]; if (is_cclm_mode) { intra_mode = cur_cu->intra.mode; @@ -367,7 +367,7 @@ void kvz_fwd_lfnst(const cu_info_t* const cur_cu, } } - kvz_fwd_lfnst_NxN(tmp_in_matrix, tmp_out_matrix, kvz_lfnst_lut[intra_mode], lfnst_index - 1, sb_size, + uvg_fwd_lfnst_NxN(tmp_in_matrix, tmp_out_matrix, uvg_lfnst_lut[intra_mode], lfnst_index - 1, sb_size, (tu_4x4 || tu_8x8) ? 8 : 16); lfnst_tmp = tmp_out_matrix; // forward spectral rearrangement @@ -383,11 +383,11 @@ void kvz_fwd_lfnst(const cu_info_t* const cur_cu, } } -void kvz_inv_lfnst_NxN(coeff_t *src, coeff_t *dst, const uint32_t mode, const uint32_t index, const uint32_t size, int zero_out_size, const int max_log2_tr_dyn_range) +void uvg_inv_lfnst_NxN(coeff_t *src, coeff_t *dst, const uint32_t mode, const uint32_t index, const uint32_t size, int zero_out_size, const int max_log2_tr_dyn_range) { const coeff_t output_min = -(1 << max_log2_tr_dyn_range); const coeff_t output_max = (1 << max_log2_tr_dyn_range) - 1; - const int8_t *tr_mat = (size > 4) ? kvz_lfnst_8x8[mode][index][0] : kvz_lfnst_4x4[mode][index][0]; + const int8_t *tr_mat = (size > 4) ? uvg_lfnst_8x8[mode][index][0] : uvg_lfnst_4x4[mode][index][0]; const int tr_size = (size > 4) ? 48 : 16; int resi; coeff_t *out = dst; @@ -408,7 +408,7 @@ void kvz_inv_lfnst_NxN(coeff_t *src, coeff_t *dst, const uint32_t mode, const ui } } -void kvz_inv_lfnst(const cu_info_t *cur_cu, +void uvg_inv_lfnst(const cu_info_t *cur_cu, const int width, const int height, const uint8_t color, const uint16_t lfnst_idx, @@ -429,12 +429,12 @@ void kvz_inv_lfnst(const cu_info_t *cur_cu, const int cu_type = cur_cu->type; - const int scan_order = kvz_get_scan_order(cu_type, intra_mode, depth); + const int scan_order = uvg_get_scan_order(cu_type, intra_mode, depth); if (lfnst_index && !mts_skip && (is_separate_tree || color == COLOR_Y)) { - const uint32_t log2_block_size = kvz_g_convert_to_bit[width] + 2; + const uint32_t log2_block_size = uvg_g_convert_to_bit[width] + 2; const bool whge3 = width >= 8 && height >= 8; - const uint32_t* scan = whge3 ? kvz_coef_top_left_diag_scan_8x8[log2_block_size] : kvz_g_sig_last_scan[scan_order][log2_block_size - 1]; + const uint32_t* scan = whge3 ? uvg_coef_top_left_diag_scan_8x8[log2_block_size] : uvg_g_sig_last_scan[scan_order][log2_block_size - 1]; if (is_cclm_mode) { intra_mode = cur_cu->intra.mode; @@ -469,7 +469,7 @@ void kvz_inv_lfnst(const cu_info_t *cur_cu, scan_ptr++; } - kvz_inv_lfnst_NxN(tmp_in_matrix, tmp_out_matrix, kvz_lfnst_lut[intra_mode], lfnst_index - 1, sb_size, + uvg_inv_lfnst_NxN(tmp_in_matrix, tmp_out_matrix, uvg_lfnst_lut[intra_mode], lfnst_index - 1, sb_size, (tu_4x4_flag || tu_8x8_flag) ? 8 : 16, max_log2_dyn_range); lfnst_tmp = tmp_out_matrix; // inverse low frequency non-separale transform diff --git a/src/transform.h b/src/transform.h index 92cefc41..a18d4d47 100644 --- a/src/transform.h +++ b/src/transform.h @@ -79,13 +79,13 @@ void uvg_quantize_lcu_residual( lcu_t* lcu, bool early_skip); -void kvz_fwd_lfnst(const cu_info_t* const cur_cu, +void uvg_fwd_lfnst(const cu_info_t* const cur_cu, const int width, const int height, const uint8_t color, const uint16_t lfnst_idx, coeff_t *coeffs); -void kvz_inv_lfnst(const cu_info_t* cur_cu, +void uvg_inv_lfnst(const cu_info_t* cur_cu, const int width, const int height, const uint8_t color, const uint16_t lfnst_idx,