encoder->in.cur_pic --> cur_pic

This commit is contained in:
Laurent Fasnacht 2014-04-17 07:30:06 +02:00
parent 21d34613c2
commit 9ac3b7bf2b
4 changed files with 101 additions and 89 deletions

View file

@ -63,6 +63,7 @@ static void encode_sao(const encoder_control * const encoder,
*/
void init_lambda(const encoder_control * const encoder)
{
const picture * const cur_pic = encoder->in.cur_pic;
double qp = encoder->QP;
double lambda_scale = 1.0;
double qp_temp = qp - 12;
@ -71,13 +72,13 @@ void init_lambda(const encoder_control * const encoder)
// Default QP-factor from HM config
double qp_factor = 0.4624;
if (encoder->in.cur_pic->slicetype == SLICE_I) {
if (cur_pic->slicetype == SLICE_I) {
qp_factor=0.57*lambda_scale;
}
lambda = qp_factor*pow( 2.0, qp_temp/3.0 );
if (encoder->in.cur_pic->slicetype != SLICE_I ) {
if (cur_pic->slicetype != SLICE_I ) {
lambda *= 0.95;
}
@ -245,7 +246,7 @@ void encode_one_frame(encoder_control* encoder)
const int is_p_radl = (encoder->cfg->intra_period > 1 && (encoder->frame % encoder->cfg->intra_period) == 0);
const int is_radl_frame = is_first_frame || is_i_radl || is_p_radl;
picture *pic = encoder->in.cur_pic;
picture * const cur_pic = encoder->in.cur_pic;
cabac_data cabac;
@ -262,8 +263,8 @@ void encode_one_frame(encoder_control* encoder)
encoder->poc = 0;
encoder->in.cur_pic->slicetype = SLICE_I;
encoder->in.cur_pic->type = NAL_IDR_W_RADL;
cur_pic->slicetype = SLICE_I;
cur_pic->type = NAL_IDR_W_RADL;
// Access Unit Delimiter (AUD)
if (encoder->aud_enable)
@ -292,8 +293,8 @@ void encode_one_frame(encoder_control* encoder)
}
} else {
// When intra period == 1, all pictures are intra
encoder->in.cur_pic->slicetype = encoder->cfg->intra_period==1 ? SLICE_I : SLICE_P;
encoder->in.cur_pic->type = NAL_TRAIL_R;
cur_pic->slicetype = encoder->cfg->intra_period==1 ? SLICE_I : SLICE_P;
cur_pic->type = NAL_TRAIL_R;
// Access Unit Delimiter (AUD)
if (encoder->aud_enable)
@ -313,7 +314,7 @@ void encode_one_frame(encoder_control* encoder)
cabac.stream = stream;
cabac_start(&cabac);
init_contexts(&cabac, encoder->QP, encoder->in.cur_pic->slicetype);
init_contexts(&cabac, encoder->QP, cur_pic->slicetype);
encode_slice_header(encoder);
bitstream_align(stream);
@ -347,23 +348,23 @@ void encode_one_frame(encoder_control* encoder)
}
// Take bottom and right pixels from this LCU to be used on the search of next LCU.
picture_blit_pixels(&pic->y_recdata[(bottom - 1) * size.x + px.x],
picture_blit_pixels(&cur_pic->y_recdata[(bottom - 1) * size.x + px.x],
&hor_buf->y[px.x],
lcu_dim.x, 1, size.x, size.x);
picture_blit_pixels(&pic->u_recdata[(bottom / 2 - 1) * size.x / 2 + px.x / 2],
picture_blit_pixels(&cur_pic->u_recdata[(bottom / 2 - 1) * size.x / 2 + px.x / 2],
&hor_buf->u[px.x / 2],
lcu_dim.x / 2, 1, size.x / 2, size.x / 2);
picture_blit_pixels(&pic->v_recdata[(bottom / 2 - 1) * size.x / 2 + px.x / 2],
picture_blit_pixels(&cur_pic->v_recdata[(bottom / 2 - 1) * size.x / 2 + px.x / 2],
&hor_buf->v[px.x / 2],
lcu_dim.x / 2, 1, size.x / 2, size.x / 2);
picture_blit_pixels(&pic->y_recdata[px.y * size.x + right - 1],
picture_blit_pixels(&cur_pic->y_recdata[px.y * size.x + right - 1],
&ver_buf->y[1],
1, lcu_dim.y, size.x, 1);
picture_blit_pixels(&pic->u_recdata[px.y * size.x / 4 + (right / 2) - 1],
picture_blit_pixels(&cur_pic->u_recdata[px.y * size.x / 4 + (right / 2) - 1],
&ver_buf->u[1],
1, lcu_dim.y / 2, size.x / 2, 1);
picture_blit_pixels(&pic->v_recdata[px.y * size.x / 4 + (right / 2) - 1],
picture_blit_pixels(&cur_pic->v_recdata[px.y * size.x / 4 + (right / 2) - 1],
&ver_buf->v[1],
1, lcu_dim.y / 2, size.x / 2, 1);
@ -373,21 +374,21 @@ void encode_one_frame(encoder_control* encoder)
if (encoder->sao_enable) {
const int stride = encoder->in.width_in_lcu;
sao_info *sao_luma = &pic->sao_luma[lcu.y * stride + lcu.x];
sao_info *sao_chroma = &pic->sao_chroma[lcu.y * stride + lcu.x];
sao_info *sao_luma = &cur_pic->sao_luma[lcu.y * stride + lcu.x];
sao_info *sao_chroma = &cur_pic->sao_chroma[lcu.y * stride + lcu.x];
init_sao_info(sao_luma);
init_sao_info(sao_chroma);
{
sao_info *sao_top = lcu. y != 0 ? &pic->sao_luma[(lcu.y - 1) * stride + lcu.x] : NULL;
sao_info *sao_left = lcu.x != 0 ? &pic->sao_luma[lcu.y * stride + lcu.x -1] : NULL;
sao_search_luma(encoder->in.cur_pic, lcu.x, lcu.y, sao_luma, sao_top, sao_left);
sao_info *sao_top = lcu. y != 0 ? &cur_pic->sao_luma[(lcu.y - 1) * stride + lcu.x] : NULL;
sao_info *sao_left = lcu.x != 0 ? &cur_pic->sao_luma[lcu.y * stride + lcu.x -1] : NULL;
sao_search_luma(cur_pic, lcu.x, lcu.y, sao_luma, sao_top, sao_left);
}
{
sao_info *sao_top = lcu.y != 0 ? &pic->sao_chroma[(lcu.y - 1) * stride + lcu.x] : NULL;
sao_info *sao_left = lcu.x != 0 ? &pic->sao_chroma[lcu.y * stride + lcu.x - 1] : NULL;
sao_search_chroma(encoder->in.cur_pic, lcu.x, lcu.y, sao_chroma, sao_top, sao_left);
sao_info *sao_top = lcu.y != 0 ? &cur_pic->sao_chroma[(lcu.y - 1) * stride + lcu.x] : NULL;
sao_info *sao_left = lcu.x != 0 ? &cur_pic->sao_chroma[lcu.y * stride + lcu.x - 1] : NULL;
sao_search_chroma(cur_pic, lcu.x, lcu.y, sao_chroma, sao_top, sao_left);
}
// Merge only if both luma and chroma can be merged
@ -417,7 +418,7 @@ void encode_one_frame(encoder_control* encoder)
// Calculate checksum
add_checksum(encoder);
encoder->in.cur_pic->poc = encoder->poc;
cur_pic->poc = encoder->poc;
dealloc_yuv_t(hor_buf);
dealloc_yuv_t(ver_buf);
@ -511,13 +512,14 @@ int read_one_frame(FILE* file, const encoder_control * const encoder)
static void add_checksum(const encoder_control * const encoder)
{
bitstream * const stream = encoder->stream;
const picture * const cur_pic = encoder->in.cur_pic;
unsigned char checksum[3][SEI_HASH_MAX_LENGTH];
uint32_t checksum_val;
unsigned int i;
nal_write(stream, NAL_SUFFIT_SEI_NUT, 0, 0);
picture_checksum(encoder->in.cur_pic, checksum);
picture_checksum(cur_pic, checksum);
WRITE_U(stream, 132, 8, "sei_type");
WRITE_U(stream, 13, 8, "size");
@ -537,8 +539,9 @@ static void add_checksum(const encoder_control * const encoder)
void encode_access_unit_delimiter(const encoder_control * const encoder)
{
bitstream * const stream = encoder->stream;
uint8_t pic_type = encoder->in.cur_pic->slicetype == SLICE_I ? 0
: encoder->in.cur_pic->slicetype == SLICE_P ? 1
const picture * const cur_pic = encoder->in.cur_pic;
uint8_t pic_type = cur_pic->slicetype == SLICE_I ? 0
: cur_pic->slicetype == SLICE_P ? 1
: 2;
WRITE_U(stream, pic_type, 3, "pic_type");
}
@ -990,7 +993,7 @@ static void encode_VUI(const encoder_control * const encoder)
void encode_slice_header(const encoder_control * const encoder)
{
bitstream * const stream = encoder->stream;
picture *cur_pic = encoder->in.cur_pic;
const picture * const cur_pic = encoder->in.cur_pic;
#ifdef _DEBUG
printf("=========== Slice ===========\n");
@ -998,8 +1001,8 @@ void encode_slice_header(const encoder_control * const encoder)
WRITE_U(stream, 1, 1, "first_slice_segment_in_pic_flag");
if (encoder->in.cur_pic->type >= NAL_BLA_W_LP
&& encoder->in.cur_pic->type <= NAL_RSV_IRAP_VCL23) {
if (cur_pic->type >= NAL_BLA_W_LP
&& cur_pic->type <= NAL_RSV_IRAP_VCL23) {
WRITE_U(stream, 1, 1, "no_output_of_prior_pics_flag");
}
@ -1007,7 +1010,7 @@ void encode_slice_header(const encoder_control * const encoder)
//WRITE_U(stream, 0, 1, "dependent_slice_segment_flag");
WRITE_UE(stream, encoder->in.cur_pic->slicetype, "slice_type");
WRITE_UE(stream, cur_pic->slicetype, "slice_type");
// if !entropy_slice_flag
@ -1015,8 +1018,8 @@ void encode_slice_header(const encoder_control * const encoder)
//WRITE_U(stream, 1, 1, "pic_output_flag");
//end if
//if( IdrPicFlag ) <- nal_unit_type == 5
if (encoder->in.cur_pic->type != NAL_IDR_W_RADL
&& encoder->in.cur_pic->type != NAL_IDR_N_LP) {
if (cur_pic->type != NAL_IDR_W_RADL
&& cur_pic->type != NAL_IDR_N_LP) {
int j;
int ref_negative = encoder->ref->used_size;
int ref_positive = 0;
@ -1041,13 +1044,13 @@ void encode_slice_header(const encoder_control * const encoder)
WRITE_U(stream, cur_pic->slice_sao_chroma_flag, 1, "slice_sao_chroma_flag");
}
if (encoder->in.cur_pic->slicetype != SLICE_I) {
if (cur_pic->slicetype != SLICE_I) {
WRITE_U(stream, 1, 1, "num_ref_idx_active_override_flag");
WRITE_UE(stream, encoder->ref->used_size-1, "num_ref_idx_l0_active_minus1");
WRITE_UE(stream, 5-MRG_MAX_NUM_CANDS, "five_minus_max_num_merge_cand");
}
if (encoder->in.cur_pic->slicetype == SLICE_B) {
if (cur_pic->slicetype == SLICE_B) {
WRITE_U(stream, 0, 1, "mvd_l1_zero_flag");
}
@ -1061,12 +1064,12 @@ void encode_slice_header(const encoder_control * const encoder)
static void encode_sao_color(const encoder_control * const encoder, cabac_data *cabac, sao_info *sao,
color_index color_i)
{
picture *pic = encoder->in.cur_pic;
const picture * const cur_pic = encoder->in.cur_pic;
sao_eo_cat i;
// Skip colors with no SAO.
if (color_i == COLOR_Y && !pic->slice_sao_luma_flag) return;
if (color_i != COLOR_Y && !pic->slice_sao_chroma_flag) return;
if (color_i == COLOR_Y && !cur_pic->slice_sao_luma_flag) return;
if (color_i != COLOR_Y && !cur_pic->slice_sao_chroma_flag) return;
/// sao_type_idx_luma: TR, cMax = 2, cRiceParam = 0, bins = {0, bypass}
/// sao_type_idx_chroma: TR, cMax = 2, cRiceParam = 0, bins = {0, bypass}
@ -1145,7 +1148,8 @@ static void encode_sao(const encoder_control * const encoder,
void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac,
uint16_t x_ctb, uint16_t y_ctb, uint8_t depth)
{
cu_info *cur_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *cur_cu = &cur_pic->cu_array[MAX_DEPTH][x_ctb + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)];
uint8_t split_flag = GET_SPLITDATA(cur_cu, depth);
uint8_t split_model = 0;
@ -1162,11 +1166,11 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
// Implisit split flag when on border
if (!border) {
// Get left and top block split_flags and if they are present and true, increase model number
if (x_ctb > 0 && GET_SPLITDATA(&(encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)]), depth) == 1) {
if (x_ctb > 0 && GET_SPLITDATA(&(cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)]), depth) == 1) {
split_model++;
}
if (y_ctb > 0 && GET_SPLITDATA(&(encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)]), depth) == 1) {
if (y_ctb > 0 && GET_SPLITDATA(&(cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)]), depth) == 1) {
split_model++;
}
@ -1196,16 +1200,16 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
// Encode skip flag
if (encoder->in.cur_pic->slicetype != SLICE_I) {
if (cur_pic->slicetype != SLICE_I) {
int8_t ctx_skip = 0; // uiCtxSkip = aboveskipped + leftskipped;
int ui;
int16_t num_cand = MRG_MAX_NUM_CANDS;
// Get left and top skipped flags and if they are present and true, increase context number
if (x_ctb > 0 && (&encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)])->skipped) {
if (x_ctb > 0 && (&cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)])->skipped) {
ctx_skip++;
}
if (y_ctb > 0 && (&encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)])->skipped) {
if (y_ctb > 0 && (&cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)])->skipped) {
ctx_skip++;
}
@ -1235,7 +1239,7 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
// ENDIF SKIP
// Prediction mode
if (encoder->in.cur_pic->slicetype != SLICE_I) {
if (cur_pic->slicetype != SLICE_I) {
cabac->ctx = &(cabac->ctx_cu_pred_mode_model);
CABAC_BIN(cabac, (cur_cu->type == CU_INTRA), "PredMode");
}
@ -1282,7 +1286,7 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
uint32_t ref_list_idx;
/*
// Void TEncSbac::codeInterDir( TComDataCU* pcCU, UInt uiAbsPartIdx )
if(encoder->in.cur_pic->slicetype == SLICE_B)
if(cur_pic->slicetype == SLICE_B)
{
// Code Inter Dir
const UInt uiInterDir = pcCU->getInterDir( uiAbsPartIdx ) - 1;
@ -1418,11 +1422,11 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
cu_info *above_cu = 0;
if (x_ctb > 0) {
left_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)];
left_cu = &cur_pic->cu_array[MAX_DEPTH][x_ctb - 1 + y_ctb * (encoder->in.width_in_lcu << MAX_DEPTH)];
}
// Don't take the above CU across the LCU boundary.
if (y_ctb > 0 && (y_ctb & 7) != 0) {
above_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)];
above_cu = &cur_pic->cu_array[MAX_DEPTH][x_ctb + (y_ctb - 1) * (encoder->in.width_in_lcu << MAX_DEPTH)];
}
intra_get_dir_luma_predictor((x_ctb<<3) + (offset[j].x<<2),
@ -1522,9 +1526,9 @@ void encode_coding_tree(const encoder_control * const encoder, cabac_data *cabac
{
unsigned y, x;
pixel *base_y = &encoder->in.cur_pic->y_data[x_ctb * (LCU_WIDTH >> (MAX_DEPTH)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH))) * encoder->in.width];
pixel *base_u = &encoder->in.cur_pic->u_data[(x_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1))) * encoder->in.width / 2)];
pixel *base_v = &encoder->in.cur_pic->v_data[(x_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1))) * encoder->in.width / 2)];
pixel *base_y = &cur_pic->y_data[x_ctb * (LCU_WIDTH >> (MAX_DEPTH)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH))) * encoder->in.width];
pixel *base_u = &cur_pic->u_data[(x_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1))) * encoder->in.width / 2)];
pixel *base_v = &cur_pic->v_data[(x_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1)) + (y_ctb * (LCU_WIDTH >> (MAX_DEPTH + 1))) * encoder->in.width / 2)];
// Luma
for (y = 0; y < LCU_WIDTH >> depth; y++) {
@ -1872,7 +1876,7 @@ void encode_transform_tree(const encoder_control * const encoder, cabac_data* ca
if (cbf_y) {
// Combine inverese quantized coefficients with the prediction to get
// reconstructed image.
//picture_set_block_residual(encoder->in.cur_pic,x_cu,y_cu,depth,1);
//picture_set_block_residual(cur_pic,x_cu,y_cu,depth,1);
i = 0;
for (y = 0; y < width; y++) {
for (x = 0; x < width; x++) {
@ -1976,12 +1980,13 @@ void encode_transform_tree(const encoder_control * const encoder, cabac_data* ca
static void encode_transform_unit(const encoder_control * const encoder, cabac_data *cabac,
int x_pu, int y_pu, int depth, int tr_depth)
{
const picture * const cur_pic = encoder->in.cur_pic;
uint8_t width = LCU_WIDTH >> depth;
uint8_t width_c = (depth == MAX_PU_DEPTH ? width : width >> 1);
int x_cu = x_pu / 2;
int y_cu = y_pu / 2;
cu_info *cur_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x_cu + y_cu * (encoder->in.width_in_lcu << MAX_DEPTH)];
cu_info *cur_cu = &cur_pic->cu_array[MAX_DEPTH][x_cu + y_cu * (encoder->in.width_in_lcu << MAX_DEPTH)];
coefficient coeff_y[LCU_WIDTH*LCU_WIDTH+1];
coefficient coeff_u[LCU_WIDTH*LCU_WIDTH>>2];
@ -2003,7 +2008,7 @@ static void encode_transform_unit(const encoder_control * const encoder, cabac_d
if (cbf_y) {
int x = x_pu * (LCU_WIDTH >> MAX_PU_DEPTH);
int y = y_pu * (LCU_WIDTH >> MAX_PU_DEPTH);
coefficient *orig_pos = &encoder->in.cur_pic->coeff_y[x + y * encoder->in.width];
coefficient *orig_pos = &cur_pic->coeff_y[x + y * encoder->in.width];
for (y = 0; y < width; y++) {
for (x = 0; x < width; x++) {
coeff_y[x+y*width] = orig_pos[x];
@ -2071,8 +2076,8 @@ static void encode_transform_unit(const encoder_control * const encoder, cabac_d
x = x_cu * (LCU_WIDTH >> (MAX_DEPTH + 1));
y = y_cu * (LCU_WIDTH >> (MAX_DEPTH + 1));
}
orig_pos_u = &encoder->in.cur_pic->coeff_u[x + y * (encoder->in.width >> 1)];
orig_pos_v = &encoder->in.cur_pic->coeff_v[x + y * (encoder->in.width >> 1)];
orig_pos_u = &cur_pic->coeff_u[x + y * (encoder->in.width >> 1)];
orig_pos_v = &cur_pic->coeff_v[x + y * (encoder->in.width >> 1)];
for (y = 0; y < (width_c); y++) {
for (x = 0; x < (width_c); x++) {
coeff_u[x+y*(width_c)] = orig_pos_u[x];
@ -2128,7 +2133,8 @@ void encode_transform_coeff(const encoder_control * const encoder, cabac_data *c
{
int32_t x_cu = x_pu / 2;
int32_t y_cu = y_pu / 2;
cu_info *cur_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x_cu + y_cu * (encoder->in.width_in_lcu << MAX_DEPTH)];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *cur_cu = &cur_pic->cu_array[MAX_DEPTH][x_cu + y_cu * (encoder->in.width_in_lcu << MAX_DEPTH)];
// NxN signifies implicit transform split at the first transform level.
// There is a similar implicit split for inter, but it is only used when

View file

@ -166,7 +166,8 @@ void filter_deblock_edge_luma(const encoder_control * const encoder,
int32_t xpos, int32_t ypos,
int8_t depth, int8_t dir)
{
cu_info *cu_q = &encoder->in.cur_pic->cu_array[MAX_DEPTH][(xpos>>MIN_SIZE) + (ypos>>MIN_SIZE) * (encoder->in.width_in_lcu << MAX_DEPTH)];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *cu_q = &cur_pic->cu_array[MAX_DEPTH][(xpos>>MIN_SIZE) + (ypos>>MIN_SIZE) * (encoder->in.width_in_lcu << MAX_DEPTH)];
{
// Return if called with a coordinate which is not at CU or TU boundary.
@ -178,12 +179,12 @@ void filter_deblock_edge_luma(const encoder_control * const encoder,
}
{
int32_t stride = encoder->in.cur_pic->width;
int32_t stride = cur_pic->width;
int32_t offset = stride;
int32_t beta_offset_div2 = encoder->beta_offset_div2;
int32_t tc_offset_div2 = encoder->tc_offset_div2;
// TODO: support 10+bits
pixel *orig_src = &encoder->in.cur_pic->y_recdata[xpos + ypos*stride];
pixel *orig_src = &cur_pic->y_recdata[xpos + ypos*stride];
pixel *src = orig_src;
int32_t step = 1;
cu_info *cu_p = NULL;
@ -223,7 +224,7 @@ void filter_deblock_edge_luma(const encoder_control * const encoder,
}
// CU in the side we are filtering, update every 8-pixels
cu_p = &encoder->in.cur_pic->cu_array[MAX_DEPTH][(x_cu - (dir == EDGE_VER) + (dir == EDGE_HOR ? block_idx>>1 : 0)) +
cu_p = &cur_pic->cu_array[MAX_DEPTH][(x_cu - (dir == EDGE_VER) + (dir == EDGE_HOR ? block_idx>>1 : 0)) +
(y_cu - (dir == EDGE_HOR) + (dir == EDGE_VER ? block_idx>>1 : 0))
* (encoder->in.width_in_lcu << MAX_DEPTH)];
// Filter strength
@ -290,7 +291,8 @@ void filter_deblock_edge_chroma(const encoder_control * const encoder,
int32_t x, int32_t y,
int8_t depth, int8_t dir)
{
cu_info *cu_q = &encoder->in.cur_pic->cu_array[MAX_DEPTH][(x>>(MIN_SIZE-1)) + (y>>(MIN_SIZE-1)) * (encoder->in.width_in_lcu << MAX_DEPTH)];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *cu_q = &cur_pic->cu_array[MAX_DEPTH][(x>>(MIN_SIZE-1)) + (y>>(MIN_SIZE-1)) * (encoder->in.width_in_lcu << MAX_DEPTH)];
// Chroma edges that do not lay on a 8x8 grid are not deblocked.
if (depth >= MAX_DEPTH) {
@ -309,11 +311,11 @@ void filter_deblock_edge_chroma(const encoder_control * const encoder,
// For each subpart
{
int32_t stride = encoder->in.cur_pic->width >> 1;
int32_t stride = cur_pic->width >> 1;
int32_t tc_offset_div2 = encoder->tc_offset_div2;
// TODO: support 10+bits
pixel *src_u = &encoder->in.cur_pic->u_recdata[x + y*stride];
pixel *src_v = &encoder->in.cur_pic->v_recdata[x + y*stride];
pixel *src_u = &cur_pic->u_recdata[x + y*stride];
pixel *src_v = &cur_pic->v_recdata[x + y*stride];
// Init offset and step to EDGE_HOR
int32_t offset = stride;
int32_t step = 1;
@ -342,7 +344,7 @@ void filter_deblock_edge_chroma(const encoder_control * const encoder,
(dir == EDGE_HOR ? x + blk_idx * 4 : x),
(dir == EDGE_VER ? y + blk_idx * 4 : y)
};
cu_p = &encoder->in.cur_pic->cu_array[MAX_DEPTH][(x_cu - (dir == EDGE_VER) + (dir == EDGE_HOR ? blk_idx : 0)) +
cu_p = &cur_pic->cu_array[MAX_DEPTH][(x_cu - (dir == EDGE_VER) + (dir == EDGE_HOR ? blk_idx : 0)) +
(y_cu - (dir == EDGE_HOR) + (dir == EDGE_VER ? blk_idx : 0))
* (encoder->in.width_in_lcu << MAX_DEPTH)];
@ -383,7 +385,8 @@ void filter_deblock_edge_chroma(const encoder_control * const encoder,
*/
void filter_deblock_cu(const encoder_control * const encoder, int32_t x, int32_t y, int8_t depth, int32_t edge)
{
cu_info *cur_cu = &encoder->in.cur_pic->cu_array[MAX_DEPTH][x + y*(encoder->in.width_in_lcu << MAX_DEPTH)];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *cur_cu = &cur_pic->cu_array[MAX_DEPTH][x + y*(encoder->in.width_in_lcu << MAX_DEPTH)];
uint8_t split_flag = (cur_cu->depth > depth) ? 1 : 0;
uint8_t border_x = (encoder->in.width < x*(LCU_WIDTH >> MAX_DEPTH) + (LCU_WIDTH >> depth)) ? 1 : 0;
uint8_t border_y = (encoder->in.height < y*(LCU_WIDTH >> MAX_DEPTH) + (LCU_WIDTH >> depth)) ? 1 : 0;

View file

@ -548,7 +548,7 @@ static void sao_calc_edge_block_dims(const picture *pic, color_index color_i,
rec->x = (rec->x == 0 ? 0 : -1);
}
void sao_reconstruct(picture *pic, const pixel *old_rec,
void sao_reconstruct(picture * pic, const pixel *old_rec,
unsigned x_ctb, unsigned y_ctb,
const sao_info *sao, color_index color_i)
{
@ -861,28 +861,28 @@ void sao_search_luma(const picture *pic, unsigned x_ctb, unsigned y_ctb, sao_inf
void sao_reconstruct_frame(const encoder_control * const encoder)
{
vector2d lcu;
picture *pic = encoder->in.cur_pic;
picture * const cur_pic = encoder->in.cur_pic;
// These are needed because SAO needs the pre-SAO pixels form left and
// top LCUs. Single pixel wide buffers, like what search_lcu takes, would
// be enough though.
pixel *new_y_data = MALLOC(pixel, pic->width * pic->height);
pixel *new_u_data = MALLOC(pixel, (pic->width * pic->height) >> 2);
pixel *new_v_data = MALLOC(pixel, (pic->width * pic->height) >> 2);
memcpy(new_y_data, pic->y_recdata, sizeof(pixel) * pic->width * pic->height);
memcpy(new_u_data, pic->u_recdata, sizeof(pixel) * (pic->width * pic->height) >> 2);
memcpy(new_v_data, pic->v_recdata, sizeof(pixel) * (pic->width * pic->height) >> 2);
pixel *new_y_data = MALLOC(pixel, cur_pic->width * cur_pic->height);
pixel *new_u_data = MALLOC(pixel, (cur_pic->width * cur_pic->height) >> 2);
pixel *new_v_data = MALLOC(pixel, (cur_pic->width * cur_pic->height) >> 2);
memcpy(new_y_data, cur_pic->y_recdata, sizeof(pixel) * cur_pic->width * cur_pic->height);
memcpy(new_u_data, cur_pic->u_recdata, sizeof(pixel) * (cur_pic->width * cur_pic->height) >> 2);
memcpy(new_v_data, cur_pic->v_recdata, sizeof(pixel) * (cur_pic->width * cur_pic->height) >> 2);
for (lcu.y = 0; lcu.y < encoder->in.height_in_lcu; lcu.y++) {
for (lcu.x = 0; lcu.x < encoder->in.width_in_lcu; lcu.x++) {
unsigned stride = encoder->in.width_in_lcu;
sao_info *sao_luma = &pic->sao_luma[lcu.y * stride + lcu.x];
sao_info *sao_chroma = &pic->sao_chroma[lcu.y * stride + lcu.x];
sao_info *sao_luma = &cur_pic->sao_luma[lcu.y * stride + lcu.x];
sao_info *sao_chroma = &cur_pic->sao_chroma[lcu.y * stride + lcu.x];
// sao_do_rdo(encoder, lcu.x, lcu.y, sao_luma, sao_chroma);
sao_reconstruct(pic, new_y_data, lcu.x, lcu.y, sao_luma, COLOR_Y);
sao_reconstruct(pic, new_u_data, lcu.x, lcu.y, sao_chroma, COLOR_U);
sao_reconstruct(pic, new_v_data, lcu.x, lcu.y, sao_chroma, COLOR_V);
sao_reconstruct(cur_pic, new_y_data, lcu.x, lcu.y, sao_luma, COLOR_Y);
sao_reconstruct(cur_pic, new_u_data, lcu.x, lcu.y, sao_chroma, COLOR_U);
sao_reconstruct(cur_pic, new_v_data, lcu.x, lcu.y, sao_chroma, COLOR_V);
}
}

View file

@ -372,7 +372,7 @@ static unsigned search_mv_full(unsigned depth,
*/
static int search_cu_inter(const encoder_control * const encoder, int x, int y, int depth, lcu_t *lcu)
{
picture *cur_pic = encoder->in.cur_pic;
const picture * const cur_pic = encoder->in.cur_pic;
uint32_t ref_idx = 0;
int x_local = (x&0x3f), y_local = (y&0x3f);
int x_cu = x>>3;
@ -668,6 +668,7 @@ static int search_cu_intra(const encoder_control * const encoder,
const int x_px, const int y_px,
const int depth, lcu_t *lcu, cabac_data *cabac)
{
const picture * const cur_pic = encoder->in.cur_pic;
const vector2d lcu_px = { x_px & 0x3f, y_px & 0x3f };
const vector2d lcu_cu = { lcu_px.x >> 3, lcu_px.y >> 3 };
const int8_t cu_width = (LCU_WIDTH >> (depth));
@ -697,8 +698,8 @@ static int search_cu_intra(const encoder_control * const encoder,
// Build reconstructed block to use in prediction with extrapolated borders
intra_build_reference_border(x_px, y_px, cu_width * 2 + 8,
rec_buffer, cu_width * 2 + 8, 0,
encoder->in.cur_pic->width,
encoder->in.cur_pic->height,
cur_pic->width,
cur_pic->height,
lcu);
// Find best intra mode for 2Nx2N.
@ -878,7 +879,7 @@ static int search_cu(const encoder_control * const encoder, cabac_data *cabac, i
if (x + cu_width <= encoder->in.width &&
y + cu_width <= encoder->in.height)
{
picture *cur_pic = encoder->in.cur_pic;
const picture * const cur_pic = encoder->in.cur_pic;
if (cur_pic->slicetype != SLICE_I &&
depth >= MIN_INTER_SEARCH_DEPTH &&
@ -905,7 +906,7 @@ static int search_cu(const encoder_control * const encoder, cabac_data *cabac, i
// mode search of adjacent CUs.
if (cur_cu->type == CU_INTRA) {
lcu_set_intra_mode(&work_tree[depth], x, y, depth, cur_cu->intra[PU_INDEX(x >> 2, y >> 2)].mode, cur_cu->part_size);
intra_recon_lcu(encoder, cabac, x, y, depth,&work_tree[depth],encoder->in.cur_pic->width,encoder->in.cur_pic->height);
intra_recon_lcu(encoder, cabac, x, y, depth,&work_tree[depth], cur_pic->width, cur_pic->height);
} else if (cur_cu->type == CU_INTER) {
inter_recon_lcu(encoder->ref->pics[cur_cu->inter.mv_ref], x, y, LCU_WIDTH>>depth, cur_cu->inter.mv, &work_tree[depth]);
encode_transform_tree(encoder, cabac, x, y, depth, &work_tree[depth]);
@ -969,7 +970,8 @@ static void init_lcu_t(const encoder_control * const encoder, const int x, const
const int x_cu = x >> MAX_DEPTH;
const int y_cu = y >> MAX_DEPTH;
const int cu_array_width = encoder->in.width_in_lcu << MAX_DEPTH;
cu_info *const cu_array = encoder->in.cur_pic->cu_array[MAX_DEPTH];
const picture * const cur_pic = encoder->in.cur_pic;
cu_info *const cu_array = cur_pic->cu_array[MAX_DEPTH];
// Use top-left sub-cu of LCU as pointer to lcu->cu array to make things
// simpler.
@ -1001,7 +1003,7 @@ static void init_lcu_t(const encoder_control * const encoder, const int x, const
}
// Copy top-right CU.
if (y_cu > 0 && x + LCU_WIDTH < encoder->in.cur_pic->width) {
if (y_cu > 0 && x + LCU_WIDTH < cur_pic->width) {
const cu_info *from_cu = &cu_array[(x_cu + LCU_CU_WIDTH) + (y_cu - 1) * cu_array_width];
cu_info *to_cu = &lcu->cu[LCU_T_CU_WIDTH*LCU_T_CU_WIDTH];
memcpy(to_cu, from_cu, sizeof(*to_cu));
@ -1042,7 +1044,7 @@ static void init_lcu_t(const encoder_control * const encoder, const int x, const
// Copy LCU pixels.
{
const picture *pic = encoder->in.cur_pic;
const picture * const pic = encoder->in.cur_pic;
int pic_width = encoder->in.width;
int x_max = MIN(x + LCU_WIDTH, pic_width) - x;
int y_max = MIN(y + LCU_WIDTH, encoder->in.height) - y;
@ -1072,8 +1074,9 @@ static void copy_lcu_to_cu_data(const encoder_control * const encoder, int x_px,
{
const int x_cu = x_px >> MAX_DEPTH;
const int y_cu = y_px >> MAX_DEPTH;
const picture * const cur_pic = encoder->in.cur_pic;
const int cu_array_width = encoder->in.width_in_lcu << MAX_DEPTH;
cu_info *const cu_array = encoder->in.cur_pic->cu_array[MAX_DEPTH];
cu_info *const cu_array = cur_pic->cu_array[MAX_DEPTH];
// Use top-left sub-cu of LCU as pointer to lcu->cu array to make things
// simpler.
@ -1091,7 +1094,7 @@ static void copy_lcu_to_cu_data(const encoder_control * const encoder, int x_px,
// Copy pixels to picture.
{
picture *const pic = encoder->in.cur_pic;
picture * const pic = encoder->in.cur_pic;
const int pic_width = encoder->in.width;
const int x_max = MIN(x_px + LCU_WIDTH, pic_width) - x_px;
const int y_max = MIN(y_px + LCU_WIDTH, encoder->in.height) - y_px;