2014-01-24 10:37:15 +00:00
|
|
|
/*****************************************************************************
|
|
|
|
* This file is part of Kvazaar HEVC encoder.
|
2014-02-21 13:00:20 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Copyright (C) 2013-2015 Tampere University of Technology and others (see
|
2014-01-24 10:37:15 +00:00
|
|
|
* COPYING file).
|
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is free software: you can redistribute it and/or modify it under
|
|
|
|
* the terms of the GNU Lesser General Public License as published by the
|
|
|
|
* Free Software Foundation; either version 2.1 of the License, or (at your
|
|
|
|
* option) any later version.
|
2014-01-24 10:37:15 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* Kvazaar is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
* WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
|
|
|
|
* more details.
|
2014-01-24 10:37:15 +00:00
|
|
|
*
|
2015-02-23 11:18:48 +00:00
|
|
|
* You should have received a copy of the GNU General Public License along
|
|
|
|
* with Kvazaar. If not, see <http://www.gnu.org/licenses/>.
|
2014-01-24 10:37:15 +00:00
|
|
|
****************************************************************************/
|
|
|
|
|
2013-09-18 09:16:03 +00:00
|
|
|
#include "transform.h"
|
|
|
|
|
2016-04-01 14:14:23 +00:00
|
|
|
#include "image.h"
|
|
|
|
#include "kvazaar.h"
|
2014-05-12 08:35:40 +00:00
|
|
|
#include "rdo.h"
|
2014-07-29 15:10:47 +00:00
|
|
|
#include "strategies/strategies-dct.h"
|
2016-04-01 14:14:23 +00:00
|
|
|
#include "strategies/strategies-quant.h"
|
2016-10-20 12:37:15 +00:00
|
|
|
#include "strategies/strategies-picture.h"
|
2016-04-01 14:14:23 +00:00
|
|
|
#include "tables.h"
|
|
|
|
|
2016-07-23 12:04:45 +00:00
|
|
|
/**
|
|
|
|
* \brief RDPCM direction.
|
|
|
|
*/
|
|
|
|
typedef enum rdpcm_dir {
|
|
|
|
RDPCM_VER = 0, // vertical
|
|
|
|
RDPCM_HOR = 1, // horizontal
|
|
|
|
} rdpcm_dir;
|
2012-06-11 15:43:29 +00:00
|
|
|
|
2013-09-20 08:50:53 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// INITIALIZATIONS
|
2014-02-21 13:00:20 +00:00
|
|
|
//
|
config: Add --cqmfile to use custom quantization matrices from a file.
The coefficients in a matrix are stored in up-right diagonal order.
The following indicates the default matrices specified in the spec.
INTRA4X4_LUMA
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA4X4_CHROMAU
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA4X4_CHROMAV
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_LUMA
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_CHROMAU
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTER4X4_CHROMAV
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16,
16, 16, 16, 16
INTRA8X8_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA8X8_CHROMAU
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA8X8_CHROMAV
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER8X8_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER8X8_CHROMAU
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER8X8_CHROMAV
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA16X16_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA16X16_CHROMAU
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTRA16X16_CHROMAV
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER16X16_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER16X16_CHROMAU
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTER16X16_CHROMAV
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA32X32_LUMA
16, 16, 16, 16, 17, 18, 21, 24,
16, 16, 16, 16, 17, 19, 22, 25,
16, 16, 17, 18, 20, 22, 25, 29,
16, 16, 18, 21, 24, 27, 31, 36,
17, 17, 20, 24, 30, 35, 41, 47,
18, 19, 22, 27, 35, 44, 54, 65,
21, 22, 25, 31, 41, 54, 70, 88,
24, 25, 29, 36, 47, 65, 88, 115
INTER32X32_LUMA
16, 16, 16, 16, 17, 18, 20, 24,
16, 16, 16, 17, 18, 20, 24, 25,
16, 16, 17, 18, 20, 24, 25, 28,
16, 17, 18, 20, 24, 25, 28, 33,
17, 18, 20, 24, 25, 28, 33, 41,
18, 20, 24, 25, 28, 33, 41, 54,
20, 24, 25, 28, 33, 41, 54, 71,
24, 25, 28, 33, 41, 54, 71, 91
INTRA16X16_LUMA_DC
16
INTRA16X16_CHROMAU_DC
16
INTRA16X16_CHROMAV_DC
16
INTER16X16_LUMA_DC
16
INTER16X16_CHROMAU_DC
16
INTER16X16_CHROMAV_DC
16
INTRA32X32_LUMA_DC
16
INTER32X32_LUMA_DC
16
2014-02-11 10:55:21 +00:00
|
|
|
|
2013-03-07 15:42:00 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
const uint8_t kvz_g_chroma_scale[58]=
|
2013-03-07 15:42:00 +00:00
|
|
|
{
|
|
|
|
0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,
|
|
|
|
17,18,19,20,21,22,23,24,25,26,27,28,29,29,30,31,32,
|
|
|
|
33,33,34,34,35,35,36,36,37,37,38,39,40,41,42,43,44,
|
|
|
|
45,46,47,48,49,50,51
|
|
|
|
};
|
|
|
|
|
2013-09-20 08:50:53 +00:00
|
|
|
//////////////////////////////////////////////////////////////////////////
|
|
|
|
// FUNCTIONS
|
2014-02-21 13:00:20 +00:00
|
|
|
//
|
2012-06-11 15:43:29 +00:00
|
|
|
|
2016-05-30 11:08:30 +00:00
|
|
|
/**
|
|
|
|
* \brief Bypass transform and quantization.
|
|
|
|
*
|
|
|
|
* Copies the reference pixels directly to reconstruction and the residual
|
|
|
|
* directly to coefficients. Used when cu_transquant_bypass_flag is set.
|
|
|
|
* Parameters pred_in and rec_out may be aliased.
|
|
|
|
*
|
|
|
|
* \param width Transform width.
|
|
|
|
* \param in_stride Stride for ref_in and pred_in
|
|
|
|
* \param out_stride Stride for rec_out and coeff_out.
|
|
|
|
* \param ref_in Reference pixels.
|
|
|
|
* \param pred_in Predicted pixels.
|
|
|
|
* \param rec_out Returns the reconstructed pixels.
|
|
|
|
* \param coeff_out Returns the coefficients used for reconstruction of rec_out.
|
|
|
|
*
|
|
|
|
* \returns Whether coeff_out contains any non-zero coefficients.
|
|
|
|
*/
|
|
|
|
static bool bypass_transquant(const int width,
|
|
|
|
const int in_stride,
|
|
|
|
const int out_stride,
|
|
|
|
const kvz_pixel *const ref_in,
|
|
|
|
const kvz_pixel *const pred_in,
|
|
|
|
kvz_pixel *rec_out,
|
|
|
|
coeff_t *coeff_out)
|
|
|
|
{
|
|
|
|
bool nonzero_coeffs = false;
|
|
|
|
|
|
|
|
for (int y = 0; y < width; ++y) {
|
|
|
|
for (int x = 0; x < width; ++x) {
|
|
|
|
int32_t in_idx = x + y * in_stride;
|
|
|
|
int32_t out_idx = x + y * out_stride;
|
|
|
|
|
|
|
|
// The residual must be computed before writing to rec_out because
|
|
|
|
// pred_in and rec_out may point to the same array.
|
|
|
|
coeff_t coeff = (coeff_t)(ref_in[in_idx] - pred_in[in_idx]);
|
|
|
|
coeff_out[out_idx] = coeff;
|
|
|
|
rec_out[out_idx] = ref_in[in_idx];
|
|
|
|
|
|
|
|
nonzero_coeffs |= (coeff != 0);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return nonzero_coeffs;
|
|
|
|
}
|
|
|
|
|
2016-07-23 12:04:45 +00:00
|
|
|
/**
|
|
|
|
* Apply DPCM to residual.
|
|
|
|
*
|
|
|
|
* \param width width of the block
|
|
|
|
* \param stride stride of coeff array
|
|
|
|
* \param dir RDPCM direction
|
|
|
|
* \param coeff coefficients (residual) to filter
|
|
|
|
*/
|
|
|
|
static void rdpcm(const int width,
|
|
|
|
const int stride,
|
|
|
|
const rdpcm_dir dir,
|
|
|
|
coeff_t *coeff)
|
|
|
|
{
|
|
|
|
const int offset = (dir == RDPCM_HOR) ? 1 : stride;
|
|
|
|
const int min_x = (dir == RDPCM_HOR) ? 1 : 0;
|
|
|
|
const int min_y = (dir == RDPCM_HOR) ? 0 : 1;
|
|
|
|
|
|
|
|
for (int y = width - 1; y >= min_y; y--) {
|
|
|
|
for (int x = width - 1; x >= min_x; x--) {
|
|
|
|
const int index = x + y * stride;
|
|
|
|
coeff[index] -= coeff[index - offset];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-21 13:41:55 +00:00
|
|
|
/**
|
|
|
|
* \brief Get scaled QP used in quantization
|
2014-02-21 13:00:20 +00:00
|
|
|
*
|
2014-02-21 13:41:55 +00:00
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
int32_t kvz_get_scaled_qp(int8_t type, int8_t qp, int8_t qp_offset)
|
2014-02-21 13:41:55 +00:00
|
|
|
{
|
|
|
|
int32_t qp_scaled = 0;
|
|
|
|
if(type == 0) {
|
|
|
|
qp_scaled = qp + qp_offset;
|
|
|
|
} else {
|
|
|
|
qp_scaled = CLIP(-qp_offset, 57, qp);
|
|
|
|
if(qp_scaled < 0) {
|
|
|
|
qp_scaled = qp_scaled + qp_offset;
|
|
|
|
} else {
|
2015-08-26 08:50:27 +00:00
|
|
|
qp_scaled = kvz_g_chroma_scale[qp_scaled] + qp_offset;
|
2014-02-21 13:41:55 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return qp_scaled;
|
|
|
|
}
|
2013-09-20 08:50:53 +00:00
|
|
|
|
2014-04-02 07:54:03 +00:00
|
|
|
/**
|
|
|
|
* \brief NxN inverse transform (2D)
|
|
|
|
* \param coeff input data (transform coefficients)
|
|
|
|
* \param block output data (residual)
|
|
|
|
* \param block_size input data (width of transform)
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_transformskip(const encoder_control_t * const encoder, int16_t *block,int16_t *coeff, int8_t block_size)
|
2014-04-02 07:54:03 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
uint32_t log2_tr_size = kvz_g_convert_to_bit[block_size] + 2;
|
2014-04-17 08:28:20 +00:00
|
|
|
int32_t shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size;
|
2014-04-02 07:54:03 +00:00
|
|
|
int32_t j,k;
|
|
|
|
for (j = 0; j < block_size; j++) {
|
|
|
|
for(k = 0; k < block_size; k ++) {
|
|
|
|
coeff[j * block_size + k] = block[j * block_size + k] << shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* \brief inverse transform skip
|
|
|
|
* \param coeff input data (transform coefficients)
|
|
|
|
* \param block output data (residual)
|
|
|
|
* \param block_size width of transform
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_itransformskip(const encoder_control_t * const encoder, int16_t *block,int16_t *coeff, int8_t block_size)
|
2014-04-02 07:54:03 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
uint32_t log2_tr_size = kvz_g_convert_to_bit[block_size] + 2;
|
2014-04-17 08:28:20 +00:00
|
|
|
int32_t shift = MAX_TR_DYNAMIC_RANGE - encoder->bitdepth - log2_tr_size;
|
2014-04-02 07:54:03 +00:00
|
|
|
int32_t j,k;
|
|
|
|
int32_t offset;
|
|
|
|
offset = (1 << (shift -1)); // For rounding
|
|
|
|
for ( j = 0; j < block_size; j++ ) {
|
|
|
|
for(k = 0; k < block_size; k ++) {
|
|
|
|
block[j * block_size + k] = (coeff[j * block_size + k] + offset) >> shift;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2013-03-07 15:42:00 +00:00
|
|
|
|
2014-02-21 13:00:20 +00:00
|
|
|
/**
|
2013-09-20 08:50:53 +00:00
|
|
|
* \brief forward transform (2D)
|
|
|
|
* \param block input residual
|
|
|
|
* \param coeff transform coefficients
|
|
|
|
* \param block_size width of transform
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_transform2d(const encoder_control_t * const encoder, int16_t *block, int16_t *coeff, int8_t block_size, int32_t mode)
|
2013-02-05 13:48:06 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
dct_func *dct_func = kvz_get_dct_func(block_size, mode);
|
2014-07-29 15:10:47 +00:00
|
|
|
dct_func(encoder->bitdepth, block, coeff);
|
2013-02-05 13:48:06 +00:00
|
|
|
}
|
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
void kvz_itransform2d(const encoder_control_t * const encoder, int16_t *block, int16_t *coeff, int8_t block_size, int32_t mode)
|
2013-03-07 15:42:00 +00:00
|
|
|
{
|
2015-08-26 08:50:27 +00:00
|
|
|
dct_func *idct_func = kvz_get_idct_func(block_size, mode);
|
2014-10-02 13:10:17 +00:00
|
|
|
idct_func(encoder->bitdepth, coeff, block);
|
2013-03-07 15:42:00 +00:00
|
|
|
}
|
2013-02-05 13:48:06 +00:00
|
|
|
|
2014-05-13 16:41:29 +00:00
|
|
|
/**
|
2015-08-26 08:50:27 +00:00
|
|
|
* \brief Like kvz_quantize_residual except that this uses trskip if that is better.
|
2014-05-13 16:41:29 +00:00
|
|
|
*
|
|
|
|
* Using this function saves one step of quantization and inverse quantization
|
|
|
|
* compared to doing the decision separately from the actual operation.
|
|
|
|
*
|
|
|
|
* \param width Transform width.
|
|
|
|
* \param color Color.
|
|
|
|
* \param scan_order Coefficient scan order.
|
|
|
|
* \param trskip_out Whether transform skip is used.
|
|
|
|
* \param stride Stride for ref_in, pred_in rec_out and coeff_out.
|
|
|
|
* \param ref_in Reference pixels.
|
|
|
|
* \param pred_in Predicted pixels.
|
|
|
|
* \param rec_out Reconstructed pixels.
|
|
|
|
* \param coeff_out Coefficients used for reconstruction of rec_out.
|
|
|
|
*
|
|
|
|
* \returns Whether coeff_out contains any non-zero coefficients.
|
|
|
|
*/
|
2015-08-26 08:50:27 +00:00
|
|
|
int kvz_quantize_residual_trskip(
|
2015-03-04 15:00:23 +00:00
|
|
|
encoder_state_t *const state,
|
2015-03-04 14:37:35 +00:00
|
|
|
const cu_info_t *const cur_cu, const int width, const color_t color,
|
2014-05-13 16:41:29 +00:00
|
|
|
const coeff_scan_order_t scan_order, int8_t *trskip_out,
|
|
|
|
const int in_stride, const int out_stride,
|
2015-06-30 08:43:48 +00:00
|
|
|
const kvz_pixel *const ref_in, const kvz_pixel *const pred_in,
|
|
|
|
kvz_pixel *rec_out, coeff_t *coeff_out)
|
2014-05-13 16:41:29 +00:00
|
|
|
{
|
2014-05-13 17:35:30 +00:00
|
|
|
struct {
|
2015-06-30 08:43:48 +00:00
|
|
|
kvz_pixel rec[4*4];
|
2015-03-04 14:33:47 +00:00
|
|
|
coeff_t coeff[4*4];
|
2014-05-14 08:35:38 +00:00
|
|
|
uint32_t cost;
|
2014-05-13 17:35:30 +00:00
|
|
|
int has_coeffs;
|
|
|
|
} skip, noskip, *best;
|
2014-05-14 08:35:38 +00:00
|
|
|
|
2016-08-21 04:16:59 +00:00
|
|
|
const int bit_cost = (int)(state->lambda + 0.5);
|
2014-05-13 17:35:30 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
noskip.has_coeffs = kvz_quantize_residual(
|
2015-03-04 15:00:23 +00:00
|
|
|
state, cur_cu, width, color, scan_order,
|
2014-05-13 16:41:29 +00:00
|
|
|
0, in_stride, 4,
|
2014-05-13 17:35:30 +00:00
|
|
|
ref_in, pred_in, noskip.rec, noskip.coeff);
|
2015-08-26 08:50:27 +00:00
|
|
|
noskip.cost = kvz_pixels_calc_ssd(ref_in, noskip.rec, in_stride, 4, 4);
|
|
|
|
noskip.cost += kvz_get_coeff_cost(state, noskip.coeff, 4, 0, scan_order) * bit_cost;
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2015-08-26 08:50:27 +00:00
|
|
|
skip.has_coeffs = kvz_quantize_residual(
|
2015-03-24 10:40:01 +00:00
|
|
|
state, cur_cu, width, color, scan_order,
|
|
|
|
1, in_stride, 4,
|
|
|
|
ref_in, pred_in, skip.rec, skip.coeff);
|
2015-08-26 08:50:27 +00:00
|
|
|
skip.cost = kvz_pixels_calc_ssd(ref_in, skip.rec, in_stride, 4, 4);
|
|
|
|
skip.cost += kvz_get_coeff_cost(state, skip.coeff, 4, 0, scan_order) * bit_cost;
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
if (noskip.cost <= skip.cost) {
|
2014-05-13 16:41:29 +00:00
|
|
|
*trskip_out = 0;
|
2014-05-13 17:35:30 +00:00
|
|
|
best = &noskip;
|
2014-05-13 16:41:29 +00:00
|
|
|
} else {
|
|
|
|
*trskip_out = 1;
|
2014-05-13 17:35:30 +00:00
|
|
|
best = &skip;
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
if (best->has_coeffs || rec_out != pred_in) {
|
|
|
|
// If there is no residual and reconstruction is already in rec_out,
|
|
|
|
// we can skip this.
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_pixels_blit(best->rec, rec_out, width, width, 4, out_stride);
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
2015-08-26 08:50:27 +00:00
|
|
|
kvz_coefficients_blit(best->coeff, coeff_out, width, width, 4, out_stride);
|
2014-05-13 16:41:29 +00:00
|
|
|
|
2014-05-13 17:35:30 +00:00
|
|
|
return best->has_coeffs;
|
2014-05-13 16:41:29 +00:00
|
|
|
}
|
|
|
|
|
2014-05-12 08:35:40 +00:00
|
|
|
/**
|
2016-10-17 09:46:44 +00:00
|
|
|
* Calculate the residual coefficients for a single TU.
|
2014-05-12 08:35:40 +00:00
|
|
|
*/
|
2016-10-17 09:46:44 +00:00
|
|
|
static void quantize_tr_residual(encoder_state_t * const state,
|
|
|
|
const color_t color,
|
|
|
|
const int32_t x,
|
|
|
|
const int32_t y,
|
|
|
|
const uint8_t depth,
|
|
|
|
cu_info_t *cur_pu,
|
|
|
|
lcu_t* lcu)
|
2014-05-12 08:35:40 +00:00
|
|
|
{
|
2016-10-17 09:28:36 +00:00
|
|
|
const kvz_config *cfg = &state->encoder_control->cfg;
|
|
|
|
const int32_t shift = color == COLOR_Y ? 0 : 1;
|
2016-10-17 09:46:44 +00:00
|
|
|
const vector2d_t lcu_px = { SUB_SCU(x) >> shift, SUB_SCU(y) >> shift };
|
2014-05-12 08:35:40 +00:00
|
|
|
|
2016-10-17 09:28:36 +00:00
|
|
|
// If luma is 4x4, do chroma for the 8x8 luma area when handling the top
|
|
|
|
// left PU because the coordinates are correct.
|
|
|
|
bool handled_elsewhere = color != COLOR_Y &&
|
|
|
|
depth > MAX_DEPTH &&
|
2016-10-17 09:46:44 +00:00
|
|
|
(lcu_px.x % 4 != 0 || lcu_px.y % 4 != 0);
|
2016-10-17 09:28:36 +00:00
|
|
|
if (handled_elsewhere) {
|
|
|
|
return;
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
2014-05-14 08:19:48 +00:00
|
|
|
|
2016-10-17 09:28:36 +00:00
|
|
|
// Clear coded block flag structures for depths lower than current depth.
|
|
|
|
// This should ensure that the CBF data doesn't get corrupted if this function
|
|
|
|
// is called more than once.
|
|
|
|
cbf_clear(&cur_pu->cbf, depth, color);
|
2014-05-14 08:19:48 +00:00
|
|
|
|
2016-10-17 09:46:44 +00:00
|
|
|
int32_t tr_width;
|
|
|
|
if (color == COLOR_Y) {
|
|
|
|
tr_width = LCU_WIDTH >> depth;
|
|
|
|
} else {
|
2016-10-17 09:28:36 +00:00
|
|
|
const int chroma_depth = (depth == MAX_PU_DEPTH ? depth - 1 : depth);
|
|
|
|
tr_width = LCU_WIDTH_C >> chroma_depth;
|
2014-09-23 11:41:25 +00:00
|
|
|
}
|
2016-10-17 09:46:44 +00:00
|
|
|
const int32_t lcu_width = LCU_WIDTH >> shift;
|
2016-10-17 09:28:36 +00:00
|
|
|
const int8_t mode =
|
|
|
|
(color == COLOR_Y) ? cur_pu->intra.mode : cur_pu->intra.mode_chroma;
|
|
|
|
const coeff_scan_order_t scan_idx =
|
|
|
|
kvz_get_scan_order(cur_pu->type, mode, depth);
|
2016-10-17 09:46:44 +00:00
|
|
|
const int offset = lcu_px.x + lcu_px.y * lcu_width;
|
2016-10-17 09:28:36 +00:00
|
|
|
|
|
|
|
// Pointers to current location in arrays with prediction. The
|
|
|
|
// reconstruction will be written to this array.
|
|
|
|
kvz_pixel *pred = NULL;
|
|
|
|
// Pointers to current location in arrays with reference.
|
|
|
|
const kvz_pixel *ref = NULL;
|
|
|
|
// Pointers to current location in arrays with quantized coefficients.
|
|
|
|
coeff_t *coeff = NULL;
|
|
|
|
|
|
|
|
switch (color) {
|
|
|
|
case COLOR_Y:
|
|
|
|
pred = &lcu->rec.y[offset];
|
|
|
|
ref = &lcu->ref.y[offset];
|
|
|
|
coeff = &lcu->coeff.y[offset];
|
|
|
|
break;
|
|
|
|
case COLOR_U:
|
|
|
|
pred = &lcu->rec.u[offset];
|
|
|
|
ref = &lcu->ref.u[offset];
|
|
|
|
coeff = &lcu->coeff.u[offset];
|
|
|
|
break;
|
|
|
|
case COLOR_V:
|
|
|
|
pred = &lcu->rec.v[offset];
|
|
|
|
ref = &lcu->ref.v[offset];
|
|
|
|
coeff = &lcu->coeff.v[offset];
|
|
|
|
break;
|
2014-05-14 08:19:48 +00:00
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
|
2016-10-17 09:28:36 +00:00
|
|
|
const bool can_use_trskip = tr_width == 4 &&
|
|
|
|
color == COLOR_Y &&
|
|
|
|
cfg->trskip_enable;
|
|
|
|
|
2016-10-17 09:46:44 +00:00
|
|
|
bool has_coeffs;
|
|
|
|
|
2016-10-17 09:28:36 +00:00
|
|
|
if (cfg->lossless) {
|
2016-10-17 09:46:44 +00:00
|
|
|
has_coeffs = bypass_transquant(tr_width,
|
|
|
|
lcu_width, // in stride
|
|
|
|
lcu_width, // out stride
|
|
|
|
ref,
|
|
|
|
pred,
|
|
|
|
pred,
|
|
|
|
coeff);
|
2016-10-17 09:28:36 +00:00
|
|
|
if (cfg->implicit_rdpcm && cur_pu->type == CU_INTRA) {
|
|
|
|
// implicit rdpcm for horizontal and vertical intra modes
|
|
|
|
if (mode == 10) {
|
|
|
|
rdpcm(tr_width, lcu_width, RDPCM_HOR, coeff);
|
|
|
|
} else if (mode == 26) {
|
|
|
|
rdpcm(tr_width, lcu_width, RDPCM_VER, coeff);
|
2016-05-30 11:08:30 +00:00
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
2016-10-17 09:46:44 +00:00
|
|
|
|
2016-10-17 09:28:36 +00:00
|
|
|
} else if (can_use_trskip) {
|
|
|
|
// Try quantization with trskip and use it if it's better.
|
2016-10-17 09:46:44 +00:00
|
|
|
has_coeffs = kvz_quantize_residual_trskip(state,
|
|
|
|
cur_pu,
|
|
|
|
tr_width,
|
|
|
|
color,
|
|
|
|
scan_idx,
|
|
|
|
&cur_pu->intra.tr_skip,
|
|
|
|
lcu_width,
|
|
|
|
lcu_width,
|
|
|
|
ref,
|
|
|
|
pred,
|
|
|
|
pred,
|
|
|
|
coeff);
|
2016-10-17 09:28:36 +00:00
|
|
|
} else {
|
2016-10-17 09:46:44 +00:00
|
|
|
has_coeffs = kvz_quantize_residual(state,
|
|
|
|
cur_pu,
|
|
|
|
tr_width,
|
|
|
|
color,
|
|
|
|
scan_idx,
|
|
|
|
false, // tr skip
|
|
|
|
lcu_width,
|
|
|
|
lcu_width,
|
|
|
|
ref,
|
|
|
|
pred,
|
|
|
|
pred,
|
|
|
|
coeff);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (has_coeffs) {
|
|
|
|
cbf_set(&cur_pu->cbf, depth, color);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This function calculates the residual coefficients for a region of the LCU
|
|
|
|
* (defined by x, y and depth) and updates the reconstruction with the
|
|
|
|
* kvantized residual. Processes the TU tree recursively.
|
|
|
|
*
|
|
|
|
* Inputs are:
|
|
|
|
* - lcu->rec pixels after prediction for the area
|
|
|
|
* - lcu->ref reference pixels for the area
|
|
|
|
* - lcu->cu for the area
|
|
|
|
*
|
|
|
|
* Outputs are:
|
|
|
|
* - lcu->rec reconstruction after quantized residual
|
|
|
|
* - lcu->coeff quantized coefficients for the area
|
|
|
|
* - lcu->cbf coded block flags for the area
|
|
|
|
* - lcu->cu.intra.tr_skip tr skip flags for the area (in case of luma)
|
|
|
|
*/
|
|
|
|
void kvz_quantize_lcu_residual(encoder_state_t * const state,
|
2016-10-17 09:57:34 +00:00
|
|
|
const bool luma,
|
|
|
|
const bool chroma,
|
2016-10-17 09:46:44 +00:00
|
|
|
const int32_t x,
|
|
|
|
const int32_t y,
|
|
|
|
const uint8_t depth,
|
|
|
|
cu_info_t *cur_pu,
|
|
|
|
lcu_t* lcu)
|
|
|
|
{
|
|
|
|
const int32_t width = LCU_WIDTH >> depth;
|
|
|
|
const vector2d_t lcu_px = { SUB_SCU(x), SUB_SCU(y) };
|
|
|
|
|
|
|
|
if (cur_pu == NULL) {
|
|
|
|
cur_pu = LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y);
|
|
|
|
}
|
|
|
|
|
|
|
|
// Tell clang-analyzer what is up. For some reason it can't figure out from
|
|
|
|
// asserting just depth.
|
|
|
|
assert(width == 4 ||
|
|
|
|
width == 8 ||
|
|
|
|
width == 16 ||
|
|
|
|
width == 32 ||
|
|
|
|
width == 64);
|
|
|
|
|
|
|
|
if (depth == 0 || cur_pu->tr_depth > depth) {
|
2016-10-17 09:57:34 +00:00
|
|
|
// Split transform and increase depth
|
|
|
|
const int offset = width / 2;
|
|
|
|
const int32_t x2 = x + offset;
|
|
|
|
const int32_t y2 = y + offset;
|
|
|
|
|
|
|
|
kvz_quantize_lcu_residual(state, luma, chroma, x, y, depth + 1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_residual(state, luma, chroma, x2, y, depth + 1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_residual(state, luma, chroma, x, y2, depth + 1, NULL, lcu);
|
|
|
|
kvz_quantize_lcu_residual(state, luma, chroma, x2, y2, depth + 1, NULL, lcu);
|
2016-10-17 09:46:44 +00:00
|
|
|
|
|
|
|
// Propagate coded block flags from child CUs to parent CU.
|
2016-10-17 09:57:34 +00:00
|
|
|
uint16_t child_cbfs[3] = {
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y )->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x, lcu_px.y + offset)->cbf,
|
|
|
|
LCU_GET_CU_AT_PX(lcu, lcu_px.x + offset, lcu_px.y + offset)->cbf,
|
|
|
|
};
|
|
|
|
|
|
|
|
if (luma && depth < MAX_DEPTH) {
|
|
|
|
cbf_set_conditionally(&cur_pu->cbf, child_cbfs, depth, COLOR_Y);
|
|
|
|
}
|
|
|
|
if (chroma && depth <= MAX_DEPTH) {
|
|
|
|
cbf_set_conditionally(&cur_pu->cbf, child_cbfs, depth, COLOR_U);
|
|
|
|
cbf_set_conditionally(&cur_pu->cbf, child_cbfs, depth, COLOR_V);
|
2016-10-17 09:28:36 +00:00
|
|
|
}
|
2016-10-17 09:46:44 +00:00
|
|
|
|
|
|
|
} else {
|
2016-10-17 09:57:34 +00:00
|
|
|
// Process a leaf TU.
|
|
|
|
if (luma) {
|
|
|
|
quantize_tr_residual(state, COLOR_Y, x, y, depth, cur_pu, lcu);
|
|
|
|
}
|
|
|
|
if (chroma) {
|
|
|
|
quantize_tr_residual(state, COLOR_U, x, y, depth, cur_pu, lcu);
|
|
|
|
quantize_tr_residual(state, COLOR_V, x, y, depth, cur_pu, lcu);
|
|
|
|
}
|
2014-05-12 08:35:40 +00:00
|
|
|
}
|
|
|
|
}
|