mirror of
https://github.com/ultravideo/uvg266.git
synced 2024-12-02 21:24:07 +00:00
Move CG reordering code from quant-avx2 to shared header
This commit is contained in:
parent
d02207306d
commit
2fdbbe9730
|
@ -3,7 +3,15 @@
|
||||||
|
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
|
|
||||||
static INLINE __m256i scanord_read_vector(const int16_t *__restrict coeff, const uint32_t *__restrict scan, int8_t scan_mode, int32_t subpos, int32_t width)
|
/*
|
||||||
|
* Reorder coefficients from raster to scan order
|
||||||
|
* Fun fact: Once upon a time, doing this in a loop looked like this:
|
||||||
|
* for (int32_t n = 0; n < width * height; n++) {
|
||||||
|
* coef_reord[n] = coef[scan[n]];
|
||||||
|
* q_coef_reord[n] = q_coef[scan[n]];
|
||||||
|
* }
|
||||||
|
*/
|
||||||
|
static INLINE void scanord_read_vector(const int16_t **__restrict coeffs, const uint32_t *__restrict scan, int8_t scan_mode, int32_t subpos, int32_t width, __m256i *result_vecs, const int n_bufs)
|
||||||
{
|
{
|
||||||
// For vectorized reordering of coef and q_coef
|
// For vectorized reordering of coef and q_coef
|
||||||
const __m128i low128_shuffle_masks[3] = {
|
const __m128i low128_shuffle_masks[3] = {
|
||||||
|
@ -37,12 +45,15 @@ static INLINE __m256i scanord_read_vector(const int16_t *__restrict coeff, const
|
||||||
scan[subpos] + width * 3,
|
scan[subpos] + width * 3,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
for (int i = 0; i < n_bufs; i++) {
|
||||||
|
const int16_t *__restrict coeff = coeffs[i];
|
||||||
|
|
||||||
// NOTE: Upper means "higher in pixel order inside block", which implies
|
// NOTE: Upper means "higher in pixel order inside block", which implies
|
||||||
// lower addresses (note the difference: HIGH and LOW vs UPPER and LOWER),
|
// lower addresses (note the difference: HIGH and LOW vs UPPER and LOWER),
|
||||||
// so upper 128b vector actually becomes the lower part of a 256-bit coeff
|
// so upper 128b vector actually becomes the lower part of a 256-bit coeff
|
||||||
// vector and lower vector the higher part!
|
// vector and lower vector the higher part!
|
||||||
__m128d coeffs_d_upper = _mm_castsi128_pd(_mm_set1_epi8(0));
|
__m128d coeffs_d_upper;
|
||||||
__m128d coeffs_d_lower = _mm_castsi128_pd(_mm_set1_epi8(0));
|
__m128d coeffs_d_lower;
|
||||||
|
|
||||||
__m128i coeffs_upper;
|
__m128i coeffs_upper;
|
||||||
__m128i coeffs_lower;
|
__m128i coeffs_lower;
|
||||||
|
@ -53,6 +64,11 @@ static INLINE __m256i scanord_read_vector(const int16_t *__restrict coeff, const
|
||||||
__m128i coeffs_rearr2_upper;
|
__m128i coeffs_rearr2_upper;
|
||||||
__m128i coeffs_rearr2_lower;
|
__m128i coeffs_rearr2_lower;
|
||||||
|
|
||||||
|
// Zeroing these is actually unnecessary, but the compiler will whine
|
||||||
|
// about uninitialized values otherwise
|
||||||
|
coeffs_d_upper = _mm_setzero_pd();
|
||||||
|
coeffs_d_lower = _mm_setzero_pd();
|
||||||
|
|
||||||
coeffs_d_upper = _mm_loadl_pd(coeffs_d_upper, (double *)(coeff + row_offsets[0]));
|
coeffs_d_upper = _mm_loadl_pd(coeffs_d_upper, (double *)(coeff + row_offsets[0]));
|
||||||
coeffs_d_upper = _mm_loadh_pd(coeffs_d_upper, (double *)(coeff + row_offsets[1]));
|
coeffs_d_upper = _mm_loadh_pd(coeffs_d_upper, (double *)(coeff + row_offsets[1]));
|
||||||
|
|
||||||
|
@ -70,11 +86,12 @@ static INLINE __m256i scanord_read_vector(const int16_t *__restrict coeff, const
|
||||||
coeffs_rearr2_upper = _mm_shuffle_epi8(coeffs_rearr1_upper, invec_rearr_masks_upper[scan_mode]);
|
coeffs_rearr2_upper = _mm_shuffle_epi8(coeffs_rearr1_upper, invec_rearr_masks_upper[scan_mode]);
|
||||||
coeffs_rearr2_lower = _mm_shuffle_epi8(coeffs_rearr1_lower, invec_rearr_masks_lower[scan_mode]);
|
coeffs_rearr2_lower = _mm_shuffle_epi8(coeffs_rearr1_lower, invec_rearr_masks_lower[scan_mode]);
|
||||||
|
|
||||||
// Why, oh why, is there no _mm256_setr_m128i intrinsic in the header that
|
// The Intel Intrinsics Guide talks about _mm256_setr_m128i but my headers
|
||||||
// would do the exact same operation in the exact same way? :(
|
// lack such an instruction. What it does is essentially this anyway.
|
||||||
return _mm256_insertf128_si256(_mm256_castsi128_si256(coeffs_rearr2_upper),
|
result_vecs[i] = _mm256_inserti128_si256(_mm256_castsi128_si256(coeffs_rearr2_upper),
|
||||||
coeffs_rearr2_lower,
|
coeffs_rearr2_lower,
|
||||||
1);
|
1);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If ints is completely zero, returns 16 in *first and -1 in *last
|
// If ints is completely zero, returns 16 in *first and -1 in *last
|
||||||
|
|
|
@ -365,7 +365,7 @@ void kvz_encode_coeff_nxn_avx2(encoder_state_t * const state,
|
||||||
__m256i coeffs_r;
|
__m256i coeffs_r;
|
||||||
for (int32_t i = 0; i <= scan_cg_last; i++) {
|
for (int32_t i = 0; i <= scan_cg_last; i++) {
|
||||||
int32_t subpos = i * 16;
|
int32_t subpos = i * 16;
|
||||||
coeffs_r = scanord_read_vector(coeff, scan, scan_mode, subpos, width);
|
scanord_read_vector(&coeff, scan, scan_mode, subpos, width, &coeffs_r, 1);
|
||||||
_mm256_store_si256((__m256i *)(coeff_reord + subpos), coeffs_r);
|
_mm256_store_si256((__m256i *)(coeff_reord + subpos), coeffs_r);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@
|
||||||
#include <immintrin.h>
|
#include <immintrin.h>
|
||||||
#include <stdlib.h>
|
#include <stdlib.h>
|
||||||
|
|
||||||
|
#include "avx2_common_functions.h"
|
||||||
#include "cu.h"
|
#include "cu.h"
|
||||||
#include "encoder.h"
|
#include "encoder.h"
|
||||||
#include "encoderstate.h"
|
#include "encoderstate.h"
|
||||||
|
@ -66,23 +67,6 @@ static INLINE int32_t hsum32_16x16i(__m256i src)
|
||||||
return hsum32_8x32i(c);
|
return hsum32_8x32i(c);
|
||||||
}
|
}
|
||||||
|
|
||||||
// If ints is completely zero, returns 16 in *first and -1 in *last
|
|
||||||
static INLINE void get_first_last_nz_int16(__m256i ints, int32_t *first, int32_t *last)
|
|
||||||
{
|
|
||||||
// Note that nonzero_bytes will always have both bytes set for a set word
|
|
||||||
// even if said word only had one of its bytes set, because we're doing 16
|
|
||||||
// bit wide comparisons. No big deal, just shift results to the right by one
|
|
||||||
// bit to have the results represent indexes of first set words, not bytes.
|
|
||||||
// Another note, it has to use right shift instead of division to preserve
|
|
||||||
// behavior on an all-zero vector (-1 / 2 == 0, but -1 >> 1 == -1)
|
|
||||||
const __m256i zero = _mm256_setzero_si256();
|
|
||||||
|
|
||||||
__m256i zeros = _mm256_cmpeq_epi16(ints, zero);
|
|
||||||
uint32_t nonzero_bytes = ~((uint32_t)_mm256_movemask_epi8(zeros));
|
|
||||||
*first = ( (int32_t)_tzcnt_u32(nonzero_bytes)) >> 1;
|
|
||||||
*last = (31 - (int32_t)_lzcnt_u32(nonzero_bytes)) >> 1;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rearranges a 16x32b double vector into a format suitable for a stable SIMD
|
// Rearranges a 16x32b double vector into a format suitable for a stable SIMD
|
||||||
// max algorithm:
|
// max algorithm:
|
||||||
// (abcd|efgh) (ijkl|mnop) => (aceg|ikmo) (bdfh|jlnp)
|
// (abcd|efgh) (ijkl|mnop) => (aceg|ikmo) (bdfh|jlnp)
|
||||||
|
@ -311,31 +295,6 @@ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t * __restri
|
||||||
const int32_t add = ((state->frame->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
|
const int32_t add = ((state->frame->slicetype == KVZ_SLICE_I) ? 171 : 85) << (q_bits - 9);
|
||||||
const int32_t q_bits8 = q_bits - 8;
|
const int32_t q_bits8 = q_bits - 8;
|
||||||
|
|
||||||
// For vectorized reordering of coef and q_coef
|
|
||||||
const __m128i low128_shuffle_masks[3] = {
|
|
||||||
_mm_setr_epi8(10,11, 4, 5, 12,13, 0, 1, 6, 7, 14,15, 8, 9, 2, 3),
|
|
||||||
_mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12,13, 14,15),
|
|
||||||
_mm_setr_epi8( 4, 5, 6, 7, 0, 1, 2, 3, 12,13, 14,15, 8, 9, 10,11),
|
|
||||||
};
|
|
||||||
|
|
||||||
const __m128i blend_masks[3] = {
|
|
||||||
_mm_setr_epi16( 0, 0, 0, -1, 0, 0, -1, -1),
|
|
||||||
_mm_setr_epi16( 0, 0, 0, 0, 0, 0, 0, 0),
|
|
||||||
_mm_setr_epi16( 0, 0, -1, -1, 0, 0, -1, -1),
|
|
||||||
};
|
|
||||||
|
|
||||||
const __m128i invec_rearr_masks_upper[3] = {
|
|
||||||
_mm_setr_epi8( 0, 1, 8, 9, 2, 3, 6, 7, 10,11, 4, 5, 12,13, 14,15),
|
|
||||||
_mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12,13, 14,15),
|
|
||||||
_mm_setr_epi8( 0, 1, 8, 9, 4, 5, 12,13, 2, 3, 10,11, 6, 7, 14,15),
|
|
||||||
};
|
|
||||||
|
|
||||||
const __m128i invec_rearr_masks_lower[3] = {
|
|
||||||
_mm_setr_epi8(12,13, 6, 7, 0, 1, 2, 3, 14,15, 4, 5, 8, 9, 10,11),
|
|
||||||
_mm_setr_epi8( 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10,11, 12,13, 14,15),
|
|
||||||
_mm_setr_epi8( 4, 5, 12,13, 0, 1, 8, 9, 6, 7, 14,15, 2, 3, 10,11),
|
|
||||||
};
|
|
||||||
|
|
||||||
assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t
|
assert(quant_coeff[0] <= (1 << 15) - 1 && quant_coeff[0] >= -(1 << 15)); //Assuming flat values to fit int16_t
|
||||||
|
|
||||||
uint32_t ac_sum = 0;
|
uint32_t ac_sum = 0;
|
||||||
|
@ -383,86 +342,15 @@ void kvz_quant_flat_avx2(const encoder_state_t * const state, coeff_t * __restri
|
||||||
if (!encoder->cfg.signhide_enable || ac_sum < 2)
|
if (!encoder->cfg.signhide_enable || ac_sum < 2)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/*
|
|
||||||
* Reorder coef and q_coef for sequential access
|
|
||||||
* Fun fact: Once upon a time, this loop looked like this:
|
|
||||||
* for (int32_t n = 0; n < width * height; n++) {
|
|
||||||
* coef_reord[n] = coef[scan[n]];
|
|
||||||
* q_coef_reord[n] = q_coef[scan[n]];
|
|
||||||
* }
|
|
||||||
*/
|
|
||||||
assert(VEC_WIDTH == SCAN_SET_SIZE);
|
assert(VEC_WIDTH == SCAN_SET_SIZE);
|
||||||
for (int32_t subpos = (width * height - 1) & (~(VEC_WIDTH - 1)); subpos >= 0; subpos -= VEC_WIDTH) {
|
for (int32_t subpos = (width * height - 1) & (~(VEC_WIDTH - 1)); subpos >= 0; subpos -= VEC_WIDTH) {
|
||||||
const size_t row_offsets[4] = {
|
const int16_t *coeffs[2] = {coef, q_coef};
|
||||||
scan[subpos] + width * 0,
|
__m256i result_coeffs[2];
|
||||||
scan[subpos] + width * 1,
|
__m256i v_coef, q_coefs;
|
||||||
scan[subpos] + width * 2,
|
|
||||||
scan[subpos] + width * 3,
|
|
||||||
};
|
|
||||||
|
|
||||||
// NOTE: Upper means "higher in pixel order inside block", which implies
|
scanord_read_vector(coeffs, scan, scan_idx, subpos, width, result_coeffs, 2);
|
||||||
// lower addresses (note the difference: HIGH and LOW vs UPPER and LOWER),
|
v_coef = result_coeffs[0];
|
||||||
// so upper 128b vector actually becomes the lower part of a 256-bit coeff
|
q_coefs = result_coeffs[1];
|
||||||
// vector and lower vector the higher part!
|
|
||||||
__m128d coefs_d_upper = _mm_setzero_pd();
|
|
||||||
__m128d coefs_d_lower = _mm_setzero_pd();
|
|
||||||
__m128d q_coefs_d_upper = _mm_setzero_pd();
|
|
||||||
__m128d q_coefs_d_lower = _mm_setzero_pd();
|
|
||||||
|
|
||||||
__m128i coefs_upper;
|
|
||||||
__m128i coefs_lower;
|
|
||||||
__m128i q_coefs_upper;
|
|
||||||
__m128i q_coefs_lower;
|
|
||||||
|
|
||||||
__m128i coefs_rearr1_upper;
|
|
||||||
__m128i coefs_rearr1_lower;
|
|
||||||
__m128i q_coefs_rearr1_upper;
|
|
||||||
__m128i q_coefs_rearr1_lower;
|
|
||||||
|
|
||||||
__m128i coefs_rearr2_upper;
|
|
||||||
__m128i coefs_rearr2_lower;
|
|
||||||
__m128i q_coefs_rearr2_upper;
|
|
||||||
__m128i q_coefs_rearr2_lower;
|
|
||||||
|
|
||||||
coefs_d_upper = _mm_loadl_pd(coefs_d_upper, (double *)(coef + row_offsets[0]));
|
|
||||||
coefs_d_upper = _mm_loadh_pd(coefs_d_upper, (double *)(coef + row_offsets[1]));
|
|
||||||
q_coefs_d_upper = _mm_loadl_pd(q_coefs_d_upper, (double *)(q_coef + row_offsets[0]));
|
|
||||||
q_coefs_d_upper = _mm_loadh_pd(q_coefs_d_upper, (double *)(q_coef + row_offsets[1]));
|
|
||||||
|
|
||||||
coefs_d_lower = _mm_loadl_pd(coefs_d_lower, (double *)(coef + row_offsets[2]));
|
|
||||||
coefs_d_lower = _mm_loadh_pd(coefs_d_lower, (double *)(coef + row_offsets[3]));
|
|
||||||
q_coefs_d_lower = _mm_loadl_pd(q_coefs_d_lower, (double *)(q_coef + row_offsets[2]));
|
|
||||||
q_coefs_d_lower = _mm_loadh_pd(q_coefs_d_lower, (double *)(q_coef + row_offsets[3]));
|
|
||||||
|
|
||||||
coefs_upper = _mm_castpd_si128(coefs_d_upper);
|
|
||||||
coefs_lower = _mm_castpd_si128(coefs_d_lower);
|
|
||||||
q_coefs_upper = _mm_castpd_si128(q_coefs_d_upper);
|
|
||||||
q_coefs_lower = _mm_castpd_si128(q_coefs_d_lower);
|
|
||||||
|
|
||||||
coefs_lower = _mm_shuffle_epi8(coefs_lower, low128_shuffle_masks[scan_idx]);
|
|
||||||
q_coefs_lower = _mm_shuffle_epi8(q_coefs_lower, low128_shuffle_masks[scan_idx]);
|
|
||||||
|
|
||||||
coefs_rearr1_upper = _mm_blendv_epi8(coefs_upper, coefs_lower, blend_masks[scan_idx]);
|
|
||||||
coefs_rearr1_lower = _mm_blendv_epi8(coefs_lower, coefs_upper, blend_masks[scan_idx]);
|
|
||||||
q_coefs_rearr1_upper = _mm_blendv_epi8(q_coefs_upper, q_coefs_lower, blend_masks[scan_idx]);
|
|
||||||
q_coefs_rearr1_lower = _mm_blendv_epi8(q_coefs_lower, q_coefs_upper, blend_masks[scan_idx]);
|
|
||||||
|
|
||||||
coefs_rearr2_upper = _mm_shuffle_epi8(coefs_rearr1_upper, invec_rearr_masks_upper[scan_idx]);
|
|
||||||
coefs_rearr2_lower = _mm_shuffle_epi8(coefs_rearr1_lower, invec_rearr_masks_lower[scan_idx]);
|
|
||||||
q_coefs_rearr2_upper = _mm_shuffle_epi8(q_coefs_rearr1_upper, invec_rearr_masks_upper[scan_idx]);
|
|
||||||
q_coefs_rearr2_lower = _mm_shuffle_epi8(q_coefs_rearr1_lower, invec_rearr_masks_lower[scan_idx]);
|
|
||||||
|
|
||||||
// Why, oh why, is there no _mm256_setr_m128i intrinsic in the header that
|
|
||||||
// would do the exact same operation in the exact same way? :(
|
|
||||||
__m256i v_coef = _mm256_insertf128_si256(_mm256_castsi128_si256(coefs_rearr2_upper),
|
|
||||||
coefs_rearr2_lower,
|
|
||||||
1);
|
|
||||||
|
|
||||||
__m256i q_coefs = _mm256_insertf128_si256(_mm256_castsi128_si256(q_coefs_rearr2_upper),
|
|
||||||
q_coefs_rearr2_lower,
|
|
||||||
1);
|
|
||||||
|
|
||||||
// Reordering done
|
|
||||||
|
|
||||||
__m256i v_level = _mm256_abs_epi16(v_coef);
|
__m256i v_level = _mm256_abs_epi16(v_coef);
|
||||||
__m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0));
|
__m256i low_a = _mm256_unpacklo_epi16(v_level, _mm256_set1_epi16(0));
|
||||||
|
|
Loading…
Reference in a new issue