Added some switches to prevent segmentation fault from reading

This commit is contained in:
Reima Hyvönen 2019-03-22 13:29:57 +02:00 committed by Pauli Oikkonen
parent 203580047d
commit d05e750ebe

View file

@ -123,11 +123,16 @@ static int sao_edge_ddistortion_avx2(const kvz_pixel *orig_data,
tmp_sum_epi32 = _mm256_add_epi32(tmp_sum_epi32, _mm256_andnot_si256(offset_zeros_epi32, _mm256_sub_epi32(tmp1_vec_epi32, tmp2_vec_epi32)));
}
tmp_diff_epi32 = _mm256_setzero_si256();
bool use_6_elements = block_width - x - 1 == 6;
switch (use_6_elements)
{
case true:;
// Load the last 6 pixels to use
const kvz_pixel *c_data = &rec_data[y * block_width + x];
const kvz_pixel *c_data2 = &rec_data[y * block_width + x +2];
const kvz_pixel *c_data2 = &rec_data[y * block_width + x + 2];
const kvz_pixel *c_data4 = &rec_data[y * block_width + x + 4];
kvz_pixel c = c_data[0];
@ -163,6 +168,28 @@ static int sao_edge_ddistortion_avx2(const kvz_pixel *orig_data,
sum += (_mm256_extract_epi32(tmp_sum_epi32, 0) + _mm256_extract_epi32(tmp_sum_epi32, 4));
default:
// Only if theres odd number of pixels left
for (int i = x; i < block_width - 1; ++i) {
const kvz_pixel *c_data = &rec_data[y * block_width + i];
kvz_pixel a = c_data[a_ofs.y * block_width + a_ofs.x];
kvz_pixel c = c_data[0];
kvz_pixel b = c_data[b_ofs.y * block_width + b_ofs.x];
int offset = offsets[sao_calc_eo_cat(a, b, c)];
if (offset != 0) {
int diff = orig_data[y * block_width + i] - c;
// Offset is applied to reconstruction, so it is subtracted from diff.
sum += (diff - offset) * (diff - offset) - diff * diff;
}
}
break;
}
}
return sum;
@ -226,7 +253,7 @@ static void calc_sao_edge_dir_avx2(const kvz_pixel *orig_data,
//--------------------------------------------------------------------------
// v_cat == 0
__m256i mask_epi32 = _mm256_cmpeq_epi32(zeros_epi32, v_cat_epi32);
int temp_cnt = _mm_popcnt_u32(_mm256_movemask_epi8(mask_epi32))/4;
int temp_cnt = _mm_popcnt_u32(_mm256_movemask_epi8(mask_epi32)) / 4;
cat_sum_cnt[1][0] += temp_cnt;
temp_mem_epi32 = _mm256_load_si256((__m256i*)&orig_data[y * block_width + x] - c);
temp_epi32 = _mm256_and_si256(mask_epi32, temp_mem_epi32);
@ -292,6 +319,10 @@ static void calc_sao_edge_dir_avx2(const kvz_pixel *orig_data,
temp = (int*)&tmp_four_values_epi32_sum;
cat_sum_cnt[0][4] += (temp[0] + temp[1]);
bool use_6_elements = block_width - x - 1 == 6;
switch (use_6_elements) {
case true:;
// Load the last 6 pixels to use
const kvz_pixel *c_data = &rec_data[y * block_width + x];
@ -371,11 +402,28 @@ static void calc_sao_edge_dir_avx2(const kvz_pixel *orig_data,
temp = (int*)&tmp_four_values_epi32_sum;
cat_sum_cnt[0][4] += (temp[0] + temp[1]);
break;
default:
// Use when theres odd number of pixels left
for (int i = x; i < block_width - 1; ++i) {
const kvz_pixel *c_data = &rec_data[y * block_width + i];
kvz_pixel a = c_data[a_ofs.y * block_width + a_ofs.x];
kvz_pixel c = c_data[0];
kvz_pixel b = c_data[b_ofs.y * block_width + b_ofs.x];
int eo_cat = sao_calc_eo_cat(a, b, c);
cat_sum_cnt[0][eo_cat] += orig_data[y * block_width + i] - c;
cat_sum_cnt[1][eo_cat] += 1;
}
break;
}
}
}
static void sao_reconstruct_color_avx2(const encoder_control_t * const encoder,
const kvz_pixel *rec_data,
kvz_pixel *new_rec_data,