2014-01-24 10:37:15 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* This file is part of Kvazaar HEVC encoder.
|
2014-02-21 13:00:20 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
2014-01-24 10:37:15 +00:00
|
|
|
* COPYING file).
|
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is free software: you can redistribute it and/or modify it under
|
|
|
|
* the terms of the GNU Lesser General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2.1 of the License, or (at your
|
|
|
|
* option) any later version.
|
2014-01-24 10:37:15 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
|
|
|
* more details.
|
2014-01-24 10:37:15 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
2014-01-24 10:37:15 +00:00
|
|
|
****************************************************************************/
|
|
|
|
|
2013-09-18 09:16:03 +00:00
|
|
|
#include "transform.h"
|
|
|
|
|
2016-04-01 14:14:23 +00:00
|
|
|
#include "image.h"
|
|
|
|
#include "kvazaar.h"
|
2014-05-12 08:35:40 +00:00
|
|
|
#include "rdo.h"
|
2014-07-29 15:10:47 +00:00
|
|
|
#include "strategies/strategies-dct.h"
|
2015-10-29 14:41:01 +00:00
|
|
|
#include "strategies/strategies-picture.h"
|
2016-04-01 14:14:23 +00:00
|
|
|
#include "strategies/strategies-quant.h"
|
|
|
|
#include "tables.h"
|
|
|
|
|
2012-06-11 15:43:29 +00:00
|
|
|
|
2013-09-20 08:50:53 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// INITIALIZATIONS
|
2014-02-21 13:00:20 +00:00
|
|
|
//
|
config: Add --cqmfile to use custom quantization matrices from a file.
The coefficients in a matrix are stored in up-right diagonal order.
The following indicates the default matrices specified in the spec.
INTRA4X4_LUMA
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA4X4_CHROMAU
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA4X4_CHROMAV
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_LUMA
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_CHROMAU
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_CHROMAV
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA8X8_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA8X8_CHROMAU
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA8X8_CHROMAV
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER8X8_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER8X8_CHROMAU
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER8X8_CHROMAV
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA16X16_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA16X16_CHROMAU
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA16X16_CHROMAV
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER16X16_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER16X16_CHROMAU
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER16X16_CHROMAV
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA32X32_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER32X32_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA16X16_LUMA_DC
16
INTRA16X16_CHROMAU_DC
16
INTRA16X16_CHROMAV_DC
16
INTER16X16_LUMA_DC
16
INTER16X16_CHROMAU_DC
16
INTER16X16_CHROMAV_DC
16
INTRA32X32_LUMA_DC
16
INTER32X32_LUMA_DC
16
2014-02-11 10:55:21 +00:00
|
|
|
|
2013-03-07 15:42:00 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
const uint8_t kvz_g_chroma_scale[58]=
|
2013-03-07 15:42:00 +00:00
|
|
|
{
|
|
|
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,
|
|
|
|
17,18,19,20,21,22,23,24,25,26,27,28,29,29,30,31,32,
|
|
|
|
33,33,34,34,35,35,36,36,37,37,38,39,40,41,42,43,44,
|
|
|
|
45,46,47,48,49,50,51
|
|
|
|
};
|
|
|
|
|
2013-09-20 08:50:53 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// FUNCTIONS
|
2014-02-21 13:00:20 +00:00
|
|
|
//
|
2012-06-11 15:43:29 +00:00
|
|
|
|
2014-02-21 13:41:55 +00:00
|
|
|
/**
|
|
|
|
* \brief Get scaled QP used in quantization
|
2014-02-21 13:00:20 +00:00
|
|
|
*
|
2014-02-21 13:41:55 +00:00
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
int32_t kvz_get_scaled_qp(int8_t type, int8_t qp, int8_t qp_offset)
|
2014-02-21 13:41:55 +00:00
|
|
|
{
|
|
|
|
int32_t qp_scaled = 0;
|
|
|
|
if(type == 0) {
|
|
|
|
qp_scaled = qp + qp_offset;
|
|
|
|
} else {
|
|
|
|
qp_scaled = CLIP(-qp_offset, 57, qp);
|
|
|
|
if(qp_scaled < 0) {
|
|
|
|
qp_scaled = qp_scaled + qp_offset;
|
|
|
|
} else {
|
2015-08-26 08:50:27 +00:00
|
|
|
qp_scaled = kvz_g_chroma_scale[qp_scaled] + qp_offset;
|
2014-02-21 13:41:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return qp_scaled;
|
|
|
|
}
|
2013-09-20 08:50:53 +00:00
|
|
|
|
2014-04-02 07:54:03 +00:00
|
|
|
/**
|
|
|
|
* \brief NxN inverse transform (2D)
|
|
|
|
* \param coeff input data (transform coefficients)
|
|
|
|
* \param block output data (residual)
|
|
|
|
* \param block_size input data (width of transform)
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_transformskip(const encoder_control_t * const encoder, int16_t *block,int16_t *coeff, int8_t block_size)
|
2014-04-02 07:54:03 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
uint32_t log2_tr_size = kvz_g_convert_to_bit[block_size] + 2;
|
2014-04-17 08:28:20 +00:00
|
|
|
int32_t shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size;
|
2014-04-02 07:54:03 +00:00
|
|
|
int32_t j,k;
|
|
|
|
for (j = 0; j < block_size; j++) {
|
|
|
|
for(k = 0; k < block_size; k ++) {
|
|
|
|
coeff[j * block_size + k] = block[j * block_size + k] << shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \brief inverse transform skip
|
|
|
|
* \param coeff input data (transform coefficients)
|
|
|
|
* \param block output data (residual)
|
|
|
|
* \param block_size width of transform
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_itransformskip(const encoder_control_t * const encoder, int16_t *block,int16_t *coeff, int8_t block_size)
|
2014-04-02 07:54:03 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
uint32_t log2_tr_size = kvz_g_convert_to_bit[block_size] + 2;
|
2014-04-17 08:28:20 +00:00
|
|
|
int32_t shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size;
|
2014-04-02 07:54:03 +00:00
|
|
|
int32_t j,k;
|
|
|
|
int32_t offset;
|
|
|
|
offset = (1 << (shift -1)); // For rounding
|
|
|
|
for ( j = 0; j < block_size; j++ ) {
|
|
|
|
for(k = 0; k < block_size; k ++) {
|
|
|
|
block[j * block_size + k] = (coeff[j * block_size + k] + offset) >> shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-07 15:42:00 +00:00
|
|
|
|
2014-02-21 13:00:20 +00:00
|
|
|
/**
|
2013-09-20 08:50:53 +00:00
|
|
|
* \brief forward transform (2D)
|
|
|
|
* \param block input residual
|
|
|
|
* \param coeff transform coefficients
|
|
|
|
* \param block_size width of transform
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_transform2d(const encoder_control_t * const encoder, int16_t *block, int16_t *coeff, int8_t block_size, int32_t mode)
|
2013-02-05 13:48:06 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
dct_func *dct_func = kvz_get_dct_func(block_size, mode);
|
2014-07-29 15:10:47 +00:00
|
|
|
dct_func(encoder->bitdepth, block, coeff);
|
2013-02-05 13:48:06 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_itransform2d(const encoder_control_t * const encoder, int16_t *block, int16_t *coeff, int8_t block_size, int32_t mode)
|
2013-03-07 15:42:00 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
dct_func *idct_func = kvz_get_idct_func(block_size, mode);
|
2014-10-02 13:10:17 +00:00
|
|
|
idct_func(encoder->bitdepth, coeff, block);
|
2013-03-07 15:42:00 +00:00
|
|
|
}
|
2013-02-05 13:48:06 +00:00
|
|
|
|
2014-05-13 16:41:29 +00:00
|
|
|
/**
|
2015-08-26 08:50:27 +00:00
|
|
|
* \brief Like kvz_quantize_residual except that this uses trskip if that is better.
|
2014-05-13 16:41:29 +00:00
|
|
|
*
|
|
|
|
* Using this function saves one step of quantization and inverse quantization
|
|
|
|
* compared to doing the decision separately from the actual operation.
|
|
|
|
*
|
|
|
|
* \param width Transform width.
|
|
|
|
* \param color Color.
|
|
|
|
* \param scan_order Coefficient scan order.
|
|
|
|
* \param trskip_out Whether transform skip is used.
|
|
|
|
* \param stride Stride for ref_in, pred_in rec_out and coeff_out.
|
|
|
|
* \param ref_in Reference pixels.
|
|
|
|
* \param pred_in Predicted pixels.
|
|
|
|
* \param rec_out Reconstructed pixels.
|
|
|
|
* \param coeff_out Coefficients used for reconstruction of rec_out.
|
|
|
|
*
|
|
|
|
* \returns Whether coeff_out contains any non-zero coefficients.
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
int kvz_quantize_residual_trskip(
|
2015-03-04 15:00:23 +00:00
|
|
|
encoder_state_t *const state,
|
2015-03-04 14:37:35 +00:00
|
|
|
const cu_info_t *const cur_cu, const int width, const color_t color,
|
2014-05-13 16:41:29 +00:00
|
|
|
const coeff_scan_order_t scan_order, int8_t *trskip_out,
|
|
|
|
const int in_stride, const int out_stride,
|
2015-06-30 08:43:48 +00:00
|
|
|
const kvz_pixel *const ref_in, const kvz_pixel *const pred_in,
|
|
|
|
kvz_pixel *rec_out, coeff_t *coeff_out)
|
2014-05-13 16:41:29 +00:00
|
|
|
{
|
2014-05-13 17:35:30 +00:00
|
|
|
struct {
|
2015-06-30 08:43:48 +00:00
|
|
|
kvz_pixel rec[4*4];
|
2015-03-04 14:33:47 +00:00
|
|
|
coeff_t coeff[4*4];
|
2014-05-14 08:35:38 +00:00
|
|
|
uint32_t cost;
|
2014-05-13 17:35:30 +00:00
|
|
|
int has_coeffs;
|
|
|
|
} skip, noskip, *best;
|
2014-05-14 08:35:38 +00:00
|
|
|
|
2015-03-04 15:00:23 +00:00
|
|
|
const int bit_cost = (int)(state->global->cur_lambda_cost+0.5);
|
2014-05-13 17:35:30 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
noskip.has_coeffs = kvz_quantize_residual(
|
2015-03-04 15:00:23 +00:00
|
|
|
state, cur_cu, width, color, scan_order,
|
2014-05-13 16:41:29 +00:00
|
|
|
0, in_stride, 4,
|
2014-05-13 17:35:30 +00:00
|
|
|
ref_in, pred_in, noskip.rec, noskip.coeff);
|
2015-08-26 08:50:27 +00:00
|
|
|
noskip.cost = kvz_pixels_calc_ssd(ref_in, noskip.rec, in_stride, 4, 4);
|
|
|
|
noskip.cost += kvz_get_coeff_cost(state, noskip.coeff, 4, 0, scan_order) * bit_cost;
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
skip.has_coeffs = kvz_quantize_residual(
|
2015-03-24 10:40:01 +00:00
|
|
|
state, cur_cu, width, color, scan_order,
|
|
|
|
1, in_stride, 4,
|
|
|
|
ref_in, pred_in, skip.rec, skip.coeff);
|
2015-08-26 08:50:27 +00:00
|
|
|
skip.cost = kvz_pixels_calc_ssd(ref_in, skip.rec, in_stride, 4, 4);
|
|
|
|
skip.cost += kvz_get_coeff_cost(state, skip.coeff, 4, 0, scan_order) * bit_cost;
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
if (noskip.cost <= skip.cost) {
|
2014-05-13 16:41:29 +00:00
|
|
|
*trskip_out = 0;
|
2014-05-13 17:35:30 +00:00
|
|
|
best = &noskip;
|
2014-05-13 16:41:29 +00:00
|
|
|
} else {
|
|
|
|
*trskip_out = 1;
|
2014-05-13 17:35:30 +00:00
|
|
|
best = &skip;
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
if (best->has_coeffs || rec_out != pred_in) {
|
|
|
|
// If there is no residual and reconstruction is already in rec_out,
|
|
|
|
// we can skip this.
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_pixels_blit(best->rec, rec_out, width, width, 4, out_stride);
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_coefficients_blit(best->coeff, coeff_out, width, width, 4, out_stride);
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
return best->has_coeffs;
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
|
|
|
|
2014-05-12 08:35:40 +00:00
|
|
|
|
|
|
|
/**
|
|
|
|
* This function calculates the residual coefficients for a region of the LCU
|
|
|
|
* (defined by x, y and depth) and updates the reconstruction with the
|
|
|
|
* kvantized residual.
|
|
|
|
*
|
|
|
|
* It handles recursion for transform split, but that is currently only work
|
|
|
|
* for 64x64 inter to 32x32 transform blocks.
|
|
|
|
*
|
|
|
|
* Inputs are:
|
|
|
|
* - lcu->rec pixels after prediction for the area
|
|
|
|
* - lcu->ref reference pixels for the area
|
|
|
|
* - lcu->cu for the area
|
|
|
|
*
|
|
|
|
* Outputs are:
|
|
|
|
* - lcu->rec reconstruction after quantized residual
|
|
|
|
* - lcu->coeff quantized coefficients for the area
|
|
|
|
* - lcu->cbf coded block flags for the area
|
|
|
|
* - lcu->cu.intra[].tr_skip for the area
|
|
|
|
*/
|
2016-01-15 08:51:40 +00:00
|
|
|
void kvz_quantize_lcu_luma_residual(encoder_state_t * const state, int32_t x, int32_t y, const uint8_t depth, cu_info_t *cur_pu, lcu_t* lcu)
|
2014-05-12 08:35:40 +00:00
|
|
|
{
|
|
|
|
// we have 64>>depth transform size
|
2015-09-02 07:55:19 +00:00
|
|
|
const vector2d_t lcu_px = { SUB_SCU(x), SUB_SCU(y) };
|
2016-01-15 08:51:40 +00:00
|
|
|
if (cur_pu == NULL) {
|
|
|
|
cur_pu = LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y);
|
2014-09-23 11:41:25 +00:00
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
const int8_t width = LCU_WIDTH>>depth;
|
|
|
|
|
|
|
|
// Tell clang-analyzer what is up. For some reason it can't figure out from
|
|
|
|
// asserting just depth.
|
|
|
|
assert(width == 4 || width == 8 || width == 16 || width == 32 || width == 64);
|
|
|
|
|
|
|
|
// Split transform and increase depth
|
2016-01-15 08:51:40 +00:00
|
|
|
if (depth == 0 || cur_pu->tr_depth > depth) {
|
2014-05-12 08:35:40 +00:00
|
|
|
int offset = width / 2;
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_quantize_lcu_luma_residual(state, x, y, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_luma_residual(state, x + offset, y, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_luma_residual(state, x, y + offset, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_luma_residual(state, x + offset, y + offset, depth+1, NULL, lcu);
|
2014-05-12 08:35:40 +00:00
|
|
|
|
|
|
|
// Propagate coded block flags from child CUs to parent CU.
|
2016-01-15 08:51:40 +00:00
|
|
|
if (depth <= MAX_DEPTH) {
|
2016-05-22 07:08:11 +00:00
|
|
|
uint16_t child_cbfs[3] = {
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y )->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y + offset)->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y + offset)->cbf,
|
|
|
|
};
|
|
|
|
cbf_set_conditionally(&cur_pu->cbf, child_cbfs, depth, COLOR_Y);
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
{
|
|
|
|
const int luma_offset = lcu_px.x + lcu_px.y * LCU_WIDTH;
|
|
|
|
|
|
|
|
// Pointers to current location in arrays with prediction.
|
2015-06-30 08:43:48 +00:00
|
|
|
kvz_pixel *recbase_y = &lcu->rec.y[luma_offset];
|
2014-05-12 08:35:40 +00:00
|
|
|
// Pointers to current location in arrays with reference.
|
2015-06-30 08:43:48 +00:00
|
|
|
const kvz_pixel *base_y = &lcu->ref.y[luma_offset];
|
2014-05-12 08:35:40 +00:00
|
|
|
// Pointers to current location in arrays with kvantized coefficients.
|
2015-03-04 14:33:47 +00:00
|
|
|
coeff_t *orig_coeff_y = &lcu->coeff.y[luma_offset];
|
2014-05-12 08:35:40 +00:00
|
|
|
|
2016-01-15 08:51:40 +00:00
|
|
|
coeff_scan_order_t scan_idx_luma = kvz_get_scan_order(cur_pu->type, cur_pu->intra.mode, depth);
|
2014-05-12 08:35:40 +00:00
|
|
|
|
|
|
|
#if OPTIMIZATION_SKIP_RESIDUAL_ON_THRESHOLD
|
|
|
|
uint32_t residual_sum = 0;
|
|
|
|
#endif
|
|
|
|
|
|
|
|
// Clear coded block flag structures for depths lower than current depth.
|
|
|
|
// This should ensure that the CBF data doesn't get corrupted if this function
|
|
|
|
// is called more than once.
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_clear(&cur_pu->cbf, depth, COLOR_Y);
|
2014-05-12 08:35:40 +00:00
|
|
|
|
2014-05-14 08:35:38 +00:00
|
|
|
if (width == 4 &&
|
2015-03-04 15:00:23 +00:00
|
|
|
state->encoder_control->trskip_enable)
|
2014-05-14 08:35:38 +00:00
|
|
|
{
|
2014-05-13 17:35:30 +00:00
|
|
|
// Try quantization with trskip and use it if it's better.
|
2015-08-26 08:50:27 +00:00
|
|
|
int has_coeffs = kvz_quantize_residual_trskip(
|
2016-01-15 08:51:40 +00:00
|
|
|
state, cur_pu, width, COLOR_Y, scan_idx_luma,
|
|
|
|
&cur_pu->intra.tr_skip,
|
2014-05-13 16:41:29 +00:00
|
|
|
LCU_WIDTH, LCU_WIDTH,
|
|
|
|
base_y, recbase_y, recbase_y, orig_coeff_y
|
2014-05-13 16:45:50 +00:00
|
|
|
);
|
2014-05-13 16:41:29 +00:00
|
|
|
if (has_coeffs) {
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_set(&cur_pu->cbf, depth, COLOR_Y);
|
2014-05-13 16:45:50 +00:00
|
|
|
}
|
|
|
|
} else {
|
2015-08-26 08:50:27 +00:00
|
|
|
int has_coeffs = kvz_quantize_residual(
|
2016-01-15 08:51:40 +00:00
|
|
|
state, cur_pu, width, COLOR_Y, scan_idx_luma,
|
2014-05-13 16:41:29 +00:00
|
|
|
0,
|
|
|
|
LCU_WIDTH, LCU_WIDTH,
|
2014-05-13 14:52:01 +00:00
|
|
|
base_y, recbase_y, recbase_y, orig_coeff_y
|
|
|
|
);
|
|
|
|
if (has_coeffs) {
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_set(&cur_pu->cbf, depth, COLOR_Y);
|
2014-05-13 14:52:01 +00:00
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
|
|
|
}
|
2014-05-14 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_quantize_lcu_chroma_residual(encoder_state_t * const state, int32_t x, int32_t y, const uint8_t depth, cu_info_t *cur_cu, lcu_t* lcu)
|
2014-05-14 08:19:48 +00:00
|
|
|
{
|
|
|
|
// we have 64>>depth transform size
|
2015-09-02 07:55:19 +00:00
|
|
|
const vector2d_t lcu_px = { SUB_SCU(x), SUB_SCU(y) };
|
2014-05-14 08:19:48 +00:00
|
|
|
const int8_t width = LCU_WIDTH>>depth;
|
2014-09-23 11:41:25 +00:00
|
|
|
if (cur_cu == NULL) {
|
2015-07-23 06:40:41 +00:00
|
|
|
cur_cu = LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y);
|
2014-09-23 11:41:25 +00:00
|
|
|
}
|
2014-05-14 08:19:48 +00:00
|
|
|
|
|
|
|
// Tell clang-analyzer what is up. For some reason it can't figure out from
|
|
|
|
// asserting just depth.
|
|
|
|
assert(width == 4 || width == 8 || width == 16 || width == 32 || width == 64);
|
|
|
|
|
|
|
|
// Split transform and increase depth
|
|
|
|
if (depth == 0 || cur_cu->tr_depth > depth) {
|
|
|
|
int offset = width / 2;
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_quantize_lcu_chroma_residual(state, x, y, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_chroma_residual(state, x + offset, y, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_chroma_residual(state, x, y + offset, depth+1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_chroma_residual(state, x + offset, y + offset, depth+1, NULL, lcu);
|
2014-05-14 08:19:48 +00:00
|
|
|
|
|
|
|
// Propagate coded block flags from child CUs to parent CU.
|
|
|
|
if (depth < MAX_DEPTH) {
|
2016-05-22 07:08:11 +00:00
|
|
|
uint16_t child_cbfs[3] = {
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y )->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y + offset)->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y + offset)->cbf,
|
|
|
|
};
|
|
|
|
cbf_set_conditionally(&cur_cu->cbf, child_cbfs, depth, COLOR_U);
|
|
|
|
cbf_set_conditionally(&cur_cu->cbf, child_cbfs, depth, COLOR_V);
|
2014-05-14 08:19:48 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return;
|
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
|
|
|
|
// If luma is 4x4, do chroma for the 8x8 luma area when handling the top
|
|
|
|
// left PU because the coordinates are correct.
|
2016-01-15 08:51:40 +00:00
|
|
|
if (depth <= MAX_DEPTH || (lcu_px.x % 8 == 0 && lcu_px.y % 8 == 0)) {
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_clear(&cur_cu->cbf, depth, COLOR_U);
|
|
|
|
cbf_clear(&cur_cu->cbf, depth, COLOR_V);
|
2014-10-15 13:01:58 +00:00
|
|
|
|
2014-05-13 07:04:05 +00:00
|
|
|
const int chroma_offset = lcu_px.x / 2 + lcu_px.y / 2 * LCU_WIDTH_C;
|
2015-06-30 08:43:48 +00:00
|
|
|
kvz_pixel *recbase_u = &lcu->rec.u[chroma_offset];
|
|
|
|
kvz_pixel *recbase_v = &lcu->rec.v[chroma_offset];
|
|
|
|
const kvz_pixel *base_u = &lcu->ref.u[chroma_offset];
|
|
|
|
const kvz_pixel *base_v = &lcu->ref.v[chroma_offset];
|
2015-03-04 14:33:47 +00:00
|
|
|
coeff_t *orig_coeff_u = &lcu->coeff.u[chroma_offset];
|
|
|
|
coeff_t *orig_coeff_v = &lcu->coeff.v[chroma_offset];
|
2014-05-13 11:55:41 +00:00
|
|
|
coeff_scan_order_t scan_idx_chroma;
|
|
|
|
int tr_skip = 0;
|
|
|
|
int chroma_depth = (depth == MAX_PU_DEPTH ? depth - 1 : depth);
|
|
|
|
int chroma_width = LCU_WIDTH_C >> chroma_depth;
|
2014-05-12 08:35:40 +00:00
|
|
|
|
2016-01-15 08:51:40 +00:00
|
|
|
scan_idx_chroma = kvz_get_scan_order(cur_cu->type, cur_cu->intra.mode_chroma, depth);
|
2015-08-26 08:50:27 +00:00
|
|
|
if (kvz_quantize_residual(state, cur_cu, chroma_width, COLOR_U, scan_idx_chroma, tr_skip, LCU_WIDTH_C, LCU_WIDTH_C, base_u, recbase_u, recbase_u, orig_coeff_u)) {
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_set(&cur_cu->cbf, depth, COLOR_U);
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
2015-08-26 08:50:27 +00:00
|
|
|
if (kvz_quantize_residual(state, cur_cu, chroma_width, COLOR_V, scan_idx_chroma, tr_skip, LCU_WIDTH_C, LCU_WIDTH_C, base_v, recbase_v, recbase_v, orig_coeff_v)) {
|
2016-05-22 07:08:11 +00:00
|
|
|
cbf_set(&cur_cu->cbf, depth, COLOR_V);
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|