From 17babfffa481a438182cb423363009b7ce4f1445 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Reima=20Hyv=C3=B6nen?= Date: Mon, 25 Jun 2018 17:06:16 +0300 Subject: [PATCH] 25.6 working optimation, ~50% faster than original --- src/inter.c | 37 +- src/strategies/avx2/picture-avx2.c | 402 ++++++ src/strategies/generic/picture-generic.c | 80 +- src/strategies/strategies-picture.c | 2 +- src/strategies/strategies-picture.h | 10 +- src/strategies/x86_asm/x86inc.asm | 1456 ++++++++++++++++++++++ tests/bipred_generic_tests.c | 218 ++++ tests/tests_main.c | 3 + 8 files changed, 2146 insertions(+), 62 deletions(-) create mode 100644 src/strategies/x86_asm/x86inc.asm create mode 100644 tests/bipred_generic_tests.c diff --git a/src/inter.c b/src/inter.c index 33c6ff56..9b336bcb 100644 --- a/src/inter.c +++ b/src/inter.c @@ -464,46 +464,21 @@ void kvz_inter_recon_bipred(const encoder_state_t * const state, hi_prec_buf_t* high_precision_rec1 = 0; if (hi_prec_chroma_rec0) high_precision_rec0 = kvz_hi_prec_buf_t_alloc(LCU_WIDTH*LCU_WIDTH); if (hi_prec_chroma_rec1) high_precision_rec1 = kvz_hi_prec_buf_t_alloc(LCU_WIDTH*LCU_WIDTH); + + //Reconstruct both predictors inter_recon_unipred(state, ref1, xpos, ypos, width, height, mv_param[0], lcu, high_precision_rec0); if (!hi_prec_luma_rec0){ - memcpy(temp_lcu_y, lcu->rec.y, sizeof(kvz_pixel) * 64 * 64); + memcpy(temp_lcu_y, lcu->rec.y, sizeof(kvz_pixel) * 64 * 64); // copy to temp_lcu_y } if (!hi_prec_chroma_rec0){ - memcpy(temp_lcu_u, lcu->rec.u, sizeof(kvz_pixel) * 32 * 32); - memcpy(temp_lcu_v, lcu->rec.v, sizeof(kvz_pixel) * 32 * 32); + memcpy(temp_lcu_u, lcu->rec.u, sizeof(kvz_pixel) * 32 * 32); // copy to temp_lcu_u + memcpy(temp_lcu_v, lcu->rec.v, sizeof(kvz_pixel) * 32 * 32); // copy to temp_lcu_v } inter_recon_unipred(state, ref2, xpos, ypos, width, height, mv_param[1], lcu, high_precision_rec1); - kvz_inter_recon_bipred_test(hi_prec_luma_rec0, hi_prec_luma_rec1, hi_prec_chroma_rec0, hi_prec_chroma_rec1, height, width, ypos, xpos, high_precision_rec0, high_precision_rec1, lcu); - - /* // After reconstruction, merge the predictors by taking an average of each pixel - for (temp_y = 0; temp_y < height; ++temp_y) { - int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); - for (temp_x = 0; temp_x < width; ++temp_x) { - int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); - int16_t sample0_y = (hi_prec_luma_rec0 ? high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_y = (hi_prec_luma_rec1 ? high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_y + sample1_y + offset) >> shift); - } - - } - - for (temp_y = 0; temp_y < height >> 1; ++temp_y) { - int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); - for (temp_x = 0; temp_x < width >> 1; ++temp_x) { - int x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); - int16_t sample0_u = (hi_prec_chroma_rec0 ? high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_u = (hi_prec_chroma_rec1 ? high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_u + sample1_u + offset) >> shift); - - int16_t sample0_v = (hi_prec_chroma_rec0 ? high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_v = (hi_prec_chroma_rec1 ? high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_v + sample1_v + offset) >> shift); - } - } - */ + kvz_inter_recon_bipred_generic(hi_prec_luma_rec0, hi_prec_luma_rec1, hi_prec_chroma_rec0, hi_prec_chroma_rec1, height, width, ypos, xpos, high_precision_rec0, high_precision_rec1, lcu, temp_lcu_y, temp_lcu_u, temp_lcu_v); if (high_precision_rec0 != 0) kvz_hi_prec_buf_t_free(high_precision_rec0); if (high_precision_rec1 != 0) kvz_hi_prec_buf_t_free(high_precision_rec1); diff --git a/src/strategies/avx2/picture-avx2.c b/src/strategies/avx2/picture-avx2.c index d64d324d..53363ad3 100644 --- a/src/strategies/avx2/picture-avx2.c +++ b/src/strategies/avx2/picture-avx2.c @@ -25,6 +25,9 @@ #if COMPILE_INTEL_AVX2 #include +#include +#include +#include #include #include "kvazaar.h" @@ -714,6 +717,403 @@ static unsigned pixels_calc_ssd_avx2(const kvz_pixel *const ref, const kvz_pixel } } + +static void inter_recon_bipred_avx2(const int hi_prec_luma_rec0, + const int hi_prec_luma_rec1, + const int hi_prec_chroma_rec0, + const int hi_prec_chroma_rec1, + const int height, + const int width, + const int ypos, + const int xpos, + const hi_prec_buf_t*high_precision_rec0, + const hi_prec_buf_t*high_precision_rec1, + lcu_t* lcu, + kvz_pixel temp_lcu_y[LCU_WIDTH*LCU_WIDTH], + kvz_pixel temp_lcu_u[LCU_WIDTH_C*LCU_WIDTH_C], + kvz_pixel temp_lcu_v[LCU_WIDTH_C*LCU_WIDTH_C]) { + + int y_in_lcu; + int x_in_lcu; + + int shift = 15 - KVZ_BIT_DEPTH; + int offset = 1 << (shift-1); + int shift_left = 14 - KVZ_BIT_DEPTH; + + __m256i offset_epi32 = _mm256_set1_epi32(offset); + __m256i temp_epi32; + __m256i temp_epi16; + + __m256i temp_epi8; + __m256i temp_zeros_256 = _mm256_setzero_si256(); + __m256i temp_y_epi32, temp_u_epi32, temp_v_epi32; + + __m256i temp_epi32_u = _mm256_setzero_si256(); + __m256i temp_epi32_v = _mm256_setzero_si256(); + + __m256i sample0_epi32; + __m256i sample1_epi32; + + __m256i temp_epi16_u = _mm256_setzero_si256(); + __m256i temp_epi16_v = _mm256_setzero_si256(); + + __m256i final_epi8_256 = _mm256_setzero_si256(); + + __m128i offset_4 = _mm_set1_epi32(offset); + __m128i sample_epi32; + __m128i sample0_y_epi32, sample1_y_epi32, sample0_u_epi32, sample1_u_epi32, sample0_v_epi32, sample1_v_epi32; + __m128i temp_zeros_128 = _mm_setzero_si128(); + + __m256i idx = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); + + __m128i final_epi8_128; + + + switch (width) + { + + case 4: + + for (int temp_y = 0; temp_y < height; ++temp_y) { + + y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + x_in_lcu = ((xpos) & ((LCU_WIDTH)-1)); + + + + sample0_y_epi32 = hi_prec_luma_rec0 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu])))), shift_left); + + sample1_y_epi32 = hi_prec_luma_rec1 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu])))), shift_left); + + + // (sample1 + sample2 + offset)>>shift + sample_epi32 = _mm_add_epi32(_mm_add_epi32(sample0_y_epi32, sample1_y_epi32), offset_4); + sample_epi32 = _mm_srai_epi32(sample_epi32, shift); + + final_epi8_128 = _mm_packus_epi16(_mm_packus_epi32(sample_epi32, temp_zeros_128), temp_zeros_128); + + int8_t*temp_int_y = (int8_t*)&final_epi8_128; + + for (int i = 0; i < 4; i++) { + lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu + i] = temp_int_y[i]; + } + + + if (temp_y < height >> 1) { + y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + x_in_lcu = (((xpos >> 1)) & (LCU_WIDTH_C - 1)); + + sample0_u_epi32 = hi_prec_chroma_rec0 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))), shift_left); + + sample1_u_epi32 = hi_prec_chroma_rec1 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))), shift_left); + + // (sample1 + sample2 + offset)>>shift + sample_epi32 = _mm_add_epi32(_mm_add_epi32(sample0_u_epi32, sample1_u_epi32), offset_4); + sample_epi32 = _mm_srai_epi32(sample_epi32, shift); + + __m128i temp_u = _mm_packus_epi16(_mm_packus_epi32(sample_epi32, temp_zeros_128), temp_zeros_128); + int8_t*temp_int_u = (int8_t*)&temp_u; + + sample0_v_epi32 = hi_prec_chroma_rec0 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))), shift_left); + + sample1_v_epi32 = hi_prec_chroma_rec1 ? _mm_cvtepu16_epi32(_mm_loadl_epi64((__m128i*)&(high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))) : + _mm_slli_epi32(_mm_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))), shift_left); + + // (sample1 + sample2 + offset)>>shift + sample_epi32 = _mm_add_epi32(_mm_add_epi32(sample0_v_epi32, sample1_v_epi32), offset_4); + sample_epi32 = _mm_srai_epi32(sample_epi32, shift); + + __m128i temp_v = _mm_packus_epi16(_mm_packus_epi32(sample_epi32, temp_zeros_128), temp_zeros_128); + int8_t*temp_int_v = (int8_t*)&temp_v; + + for (int i = 0; i < 2; i++) { + lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu + i] = temp_int_u[i]; + lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu + i] = temp_int_v[i]; + + } + } + + + } + break; + + default: + + int start_point = 0; + int start_point_uv = 0; + + for (int temp_y = 0; temp_y < height; temp_y += 1) { + temp_epi32 = _mm256_setzero_si256(); + temp_epi16 = _mm256_setzero_si256(); + int temp = 0; + int temp_uv = 0; + + + for (int temp_x = 0; temp_x < width; temp_x += 8) { + + + y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + + // Load total of 8 elements from memory to vector and convert all to 32-bit + sample0_epi32 = hi_prec_luma_rec0 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu])))), shift_left); + + sample1_epi32 = hi_prec_luma_rec1 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32((_mm_loadl_epi64((__m128i*) &(lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu])))), shift_left); + + // (sample1 + sample2 + offset)>>shift + temp_y_epi32 = _mm256_add_epi32(sample0_epi32, sample1_epi32); + temp_y_epi32 = _mm256_add_epi32(temp_y_epi32, offset_epi32); + temp_y_epi32 = _mm256_srai_epi32(temp_y_epi32, shift); + + switch (width) + { + case 8: + + // Pack the bits from 32-bit to 8-bit + temp_epi8 = _mm256_packus_epi16(_mm256_packus_epi32(temp_y_epi32, temp_zeros_256), temp_zeros_256); + temp_epi8 = _mm256_permutevar8x32_epi32(temp_epi8, idx); + final_epi8_128 = _mm_loadu_si128((__m128i*)&temp_epi8); + + // Store 64-bits from vector to memory + _mm_storel_epi64((__m128i*)&(lcu->rec.y[(y_in_lcu)* LCU_WIDTH + x_in_lcu]), final_epi8_128); + + break; + + case 16: + + if (temp == 0) { + + // Store to temporary vector + temp_epi32 = temp_y_epi32; + temp++; + + // Save starting point to memory + start_point = (y_in_lcu)* LCU_WIDTH + x_in_lcu; + } + + else if (temp == 1) { + + // Pack the bits from 32-bit to 8-bit + temp_epi8 = _mm256_packus_epi16(_mm256_packus_epi32(temp_epi32, temp_y_epi32), temp_zeros_256); + + temp_epi8 = _mm256_permutevar8x32_epi32(temp_epi8, idx); + + // Fill 128 bit vector with packed data and store it to memory + __m128i final_epi8_16 = _mm_loadu_si128((__m128i*)&temp_epi8); + + _mm_storeu_si128((__m128i*)&(lcu->rec.y[start_point]), final_epi8_16); + + temp = 0; + } + + + break; + + default: + if (temp == 0) { + + temp_epi32 = temp_y_epi32; + temp++; + + start_point = y_in_lcu* LCU_WIDTH + x_in_lcu; + } + + else if (temp == 1) { + + // Convert packed 16-bit integers to packed 8-bit integers and store result to vector + temp_epi16 = _mm256_packus_epi32(temp_epi32, temp_y_epi32); + temp++; + } + + else if (temp == 2) { + temp_epi32 = temp_y_epi32; + temp++; + } + + else { + + // Convert packed 32-bit integers to packed 8-bit integers and store result to vector + temp_epi8 = _mm256_packus_epi16(temp_epi16, _mm256_packus_epi32(temp_epi32, temp_y_epi32)); + + // Arrange the vector to right order before inserting it + final_epi8_256 = _mm256_permutevar8x32_epi32(temp_epi8, idx); + + + // Store 256-bits of integer data into memory + _mm256_storeu_si256((__m256i*)&(lcu->rec.y[start_point]), final_epi8_256); + temp = 0; + } + } + + if (temp_x < width >> 1 && temp_y < height >> 1) { + y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); + + sample0_epi32 = hi_prec_chroma_rec0 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*) &(temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))), shift_left); + + sample1_epi32 = hi_prec_chroma_rec1 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*) &(lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))), shift_left); + + // (sample1 + sample2 + offset)>>shift + temp_u_epi32 = _mm256_add_epi32(sample0_epi32, sample1_epi32); + temp_u_epi32 = _mm256_add_epi32(temp_u_epi32, offset_epi32); + temp_u_epi32 = _mm256_srai_epi16(temp_u_epi32, shift); + + sample0_epi32 = hi_prec_chroma_rec0 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*) &(temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))), shift_left); + + sample1_epi32 = hi_prec_chroma_rec1 ? (_mm256_cvtepu16_epi32(_mm_load_si128((__m128i*) &(high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu])))) : + _mm256_slli_epi32(_mm256_cvtepu8_epi32(_mm_loadl_epi64((__m128i*) &(lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu]))), shift_left); + + // (sample1 + sample2 + offset)>>shift + temp_v_epi32 = _mm256_add_epi32(sample0_epi32, sample1_epi32); + temp_v_epi32 = _mm256_add_epi32(temp_v_epi32, offset_epi32); + temp_v_epi32 = _mm256_srai_epi32(temp_v_epi32, shift); + + + switch (width) { + + case 8: + + __m256i temp_epi8u = _mm256_packus_epi16(_mm256_packus_epi32(temp_u_epi32, temp_zeros_256), temp_zeros_256); + + int8_t *temp_int_8_u = (int8_t*)&temp_epi8u; + + __m256i temp_epi8v = _mm256_packus_epi16(_mm256_packus_epi32(temp_v_epi32, temp_zeros_256), temp_zeros_256); + + int8_t *temp_int_8_v = (int8_t*)&temp_epi8v; + + + + for (int i = 0; i < 4; i++) { + lcu->rec.u[(y_in_lcu)* LCU_WIDTH_C + x_in_lcu + i] = temp_int_8_u[i]; + lcu->rec.v[(y_in_lcu)* LCU_WIDTH_C + x_in_lcu + i] = temp_int_8_v[i]; + } + + break; + + case 16: + + temp_epi8 = _mm256_packus_epi16(_mm256_packus_epi32(temp_u_epi32, temp_zeros_256), temp_zeros_256); + + temp_epi8 = _mm256_permutevar8x32_epi32(temp_epi8, idx); + final_epi8_128 = _mm_loadu_si128((__m128i*)&temp_epi8); + + // Store 64-bit integer into memory + _mm_storel_epi64((__m128i*)&(lcu->rec.u[(y_in_lcu)* LCU_WIDTH_C + x_in_lcu]), final_epi8_128); + + temp_epi8 = _mm256_packus_epi16(_mm256_packus_epi32(temp_v_epi32, temp_zeros_256), temp_zeros_256); + + temp_epi8 = _mm256_permutevar8x32_epi32(temp_epi8, idx); + final_epi8_128 = _mm_loadu_si128((__m128i*)&temp_epi8); + + // Store 64-bit integer into memory + _mm_storel_epi64((__m128i*)&(lcu->rec.v[(y_in_lcu)* LCU_WIDTH_C + x_in_lcu]), final_epi8_128); + + break; + + case 32: + + if (temp_uv == 0) { + + // Store to temporary vector + temp_epi32_u = temp_u_epi32; + temp_epi32_v = temp_v_epi32; + + // Save starting point to memory + start_point_uv = (y_in_lcu)* LCU_WIDTH_C + x_in_lcu; + + temp_uv++; + } + + else{ + + // Pack the bits from 32-bit to 8-bit + __m256i temp_epi8_u = _mm256_packus_epi16(_mm256_packus_epi32(temp_epi32_u, temp_u_epi32), temp_zeros_256); + __m256i temp_epi8_v = _mm256_packus_epi16(_mm256_packus_epi32(temp_epi32_v, temp_v_epi32), temp_zeros_256); + + temp_epi8_u = _mm256_permutevar8x32_epi32(temp_epi8_u, idx); + temp_epi8_v = _mm256_permutevar8x32_epi32(temp_epi8_v, idx); + + // Fill 128 bit vector with packed data and store it to memory + __m128i final_epi8_u = _mm_loadu_si128((__m128i*)&temp_epi8_u); + _mm_storeu_si128((__m128i*)&(lcu->rec.u[start_point_uv]), final_epi8_u); + + // Fill 128 bit vector with packed data and store it to memory + __m128i final_epi8_v = _mm_loadu_si128((__m128i*)&temp_epi8_v); + _mm_storeu_si128((__m128i*)&(lcu->rec.v[start_point_uv]), final_epi8_v); + + temp_uv = 0; + } + break; + + default: + if (temp_uv == 0) { + + // Store to temporary vector + temp_epi32_u = temp_u_epi32; + temp_epi32_v = temp_v_epi32; + + // Save starting point to memory + start_point_uv = (y_in_lcu)* LCU_WIDTH_C + x_in_lcu; + + temp_uv++; + } + + else if (temp_uv == 1) { + + // Convert packed 16-bit integers to packed 8-bit integers and store result to vector + temp_epi16_u = _mm256_packus_epi32(temp_epi32_u, temp_u_epi32); + temp_epi16_v = _mm256_packus_epi32(temp_epi32_v, temp_v_epi32); + temp_uv++; + } + + else if (temp_uv == 2) { + + temp_epi32_u = temp_u_epi32; + temp_epi32_v = temp_v_epi32; + temp_uv++; + } + + else { + // Pack 32 bit to 8 bit + __m256i temp_epi8_u = _mm256_packus_epi16(temp_epi16_u, _mm256_packus_epi32(temp_epi32_u, temp_u_epi32)); + __m256i temp_epi8_v = _mm256_packus_epi16(temp_epi16_v, _mm256_packus_epi32(temp_epi32_v, temp_v_epi32)); + + // Arrange the vector to right order before inserting it + final_epi8_256 = _mm256_permutevar8x32_epi32(temp_epi8_u, idx); + + // Store 256-bits of integer data into memory + _mm256_storeu_si256((__m256i*)&(lcu->rec.u[start_point_uv]), final_epi8_256); + + // Arrange the vector to right order before inserting it + final_epi8_256 = _mm256_permutevar8x32_epi32(temp_epi8_v, idx); + + // Store 256-bits of integer data into memory + _mm256_storeu_si256((__m256i*)&(lcu->rec.v[start_point_uv]), final_epi8_256); + + temp_uv = 0; + } + + + } + } + + + } + } + + } +} + #endif //COMPILE_INTEL_AVX2 @@ -746,6 +1146,8 @@ int kvz_strategy_register_picture_avx2(void* opaque, uint8_t bitdepth) success &= kvz_strategyselector_register(opaque, "satd_any_size_quad", "avx2", 40, &satd_any_size_quad_avx2); success &= kvz_strategyselector_register(opaque, "pixels_calc_ssd", "avx2", 40, &pixels_calc_ssd_avx2); + success &= kvz_strategyselector_register(opaque, "inter_recon_bipred", "avx2", 40, &inter_recon_bipred_avx2); + } #endif return success; diff --git a/src/strategies/generic/picture-generic.c b/src/strategies/generic/picture-generic.c index 993ab2bb..0c58b4ef 100644 --- a/src/strategies/generic/picture-generic.c +++ b/src/strategies/generic/picture-generic.c @@ -539,53 +539,79 @@ static void inter_recon_bipred_generic(const int hi_prec_luma_rec0, const int hi_prec_luma_rec1, const int hi_prec_chroma_rec0, const int hi_prec_chroma_rec1, - int height, - int width, - int ypos, - int xpos, + int32_t height, + int32_t width, + int32_t ypos, + int32_t xpos, const hi_prec_buf_t*high_precision_rec0, const hi_prec_buf_t*high_precision_rec1, - lcu_t* lcu) { + lcu_t* lcu, + kvz_pixel temp_lcu_y[LCU_WIDTH*LCU_WIDTH], + kvz_pixel temp_lcu_u[LCU_WIDTH_C*LCU_WIDTH_C], + kvz_pixel temp_lcu_v[LCU_WIDTH_C*LCU_WIDTH_C]) { - kvz_pixel temp_lcu_y[LCU_WIDTH*LCU_WIDTH]; - kvz_pixel temp_lcu_u[LCU_WIDTH_C*LCU_WIDTH_C]; - kvz_pixel temp_lcu_v[LCU_WIDTH_C*LCU_WIDTH_C]; - - int temp_x, temp_y; int shift = 15 - KVZ_BIT_DEPTH; int offset = 1 << (shift - 1); - int y_in_lcu1; - int y_in_lcu2; + + int y_in_lcu; + int x_in_lcu; //After reconstruction, merge the predictors by taking an average of each pixel - for (temp_y = 0; temp_y < height; ++temp_y) { - y_in_lcu1 = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (int temp_y = 0; temp_y < height; ++temp_y) { - for (temp_x = 0; temp_x < width; ++temp_x) { - int x_in_lcu1 = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + for (int temp_x = 0; temp_x < width; ++temp_x) { + y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); - int16_t sample0_y = (hi_prec_luma_rec0 ? high_precision_rec0->y[y_in_lcu1 * LCU_WIDTH + x_in_lcu1] : (temp_lcu_y[y_in_lcu1 * LCU_WIDTH + x_in_lcu1] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_y = (hi_prec_luma_rec1 ? high_precision_rec1->y[y_in_lcu1 * LCU_WIDTH + x_in_lcu1] : (lcu->rec.y[y_in_lcu1 * LCU_WIDTH + x_in_lcu1] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.y[y_in_lcu1 * LCU_WIDTH + x_in_lcu1] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_y + sample1_y + offset) >> shift); + int16_t sample0_y = (hi_prec_luma_rec0 ? high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_y = (hi_prec_luma_rec1 ? high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + + lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_y + sample1_y + offset) >> shift); if (temp_x < width >> 1 && temp_y < height >> 1) { - y_in_lcu2 = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); - int x_in_lcu2 = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); + y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); - int16_t sample0_u = (hi_prec_chroma_rec0 ? high_precision_rec0->u[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] : (temp_lcu_u[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_u = (hi_prec_chroma_rec1 ? high_precision_rec1->u[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] : (lcu->rec.u[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.u[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_u + sample1_u + offset) >> shift); + int16_t sample0_u = (hi_prec_chroma_rec0 ? high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_u = (hi_prec_chroma_rec1 ? high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_u + sample1_u + offset) >> shift); - int16_t sample0_v = (hi_prec_chroma_rec0 ? high_precision_rec0->v[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] : (temp_lcu_v[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] << (14 - KVZ_BIT_DEPTH))); - int16_t sample1_v = (hi_prec_chroma_rec1 ? high_precision_rec1->v[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] : (lcu->rec.v[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] << (14 - KVZ_BIT_DEPTH))); - lcu->rec.v[y_in_lcu2 * LCU_WIDTH_C + x_in_lcu2] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_v + sample1_v + offset) >> shift); + int16_t sample0_v = (hi_prec_chroma_rec0 ? high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_v = (hi_prec_chroma_rec1 ? high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_v + sample1_v + offset) >> shift); } } + } + /* + for (int temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (int temp_x = 0; temp_x < width; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + int16_t sample0_y = (hi_prec_luma_rec0 ? high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_y = (hi_prec_luma_rec1 ? high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + lcu->rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_y + sample1_y + offset) >> shift); + } } + for (int temp_y = 0; temp_y < height >> 1; ++temp_y) { + int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + for (int temp_x = 0; temp_x < width >> 1; ++temp_x) { + int x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); + int16_t sample0_u = (hi_prec_chroma_rec0 ? high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_u = (hi_prec_chroma_rec1 ? high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + lcu->rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_u + sample1_u + offset) >> shift); + + int16_t sample0_v = (hi_prec_chroma_rec0 ? high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_v = (hi_prec_chroma_rec1 ? high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + lcu->rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_v + sample1_v + offset) >> shift); + + + + } + }*/ } diff --git a/src/strategies/strategies-picture.c b/src/strategies/strategies-picture.c index 5e0f960a..c16d9f5e 100644 --- a/src/strategies/strategies-picture.c +++ b/src/strategies/strategies-picture.c @@ -61,7 +61,7 @@ cost_pixel_any_size_multi_func * kvz_satd_any_size_quad = 0; pixels_calc_ssd_func * kvz_pixels_calc_ssd = 0; -inter_recon_bipred_func * kvz_inter_recon_bipred_test = 0; +inter_recon_bipred_func * kvz_inter_recon_bipred_generic = 0; int kvz_strategy_register_picture(void* opaque, uint8_t bitdepth) { diff --git a/src/strategies/strategies-picture.h b/src/strategies/strategies-picture.h index 8461acfb..b1ff805a 100644 --- a/src/strategies/strategies-picture.h +++ b/src/strategies/strategies-picture.h @@ -124,7 +124,10 @@ typedef void (inter_recon_bipred_func)(const int hi_prec_luma_rec0, int xpos, const hi_prec_buf_t*high_precision_rec0, const hi_prec_buf_t*high_precision_rec1, - lcu_t* lcu); + lcu_t* lcu, + kvz_pixel temp_lcu_y[LCU_WIDTH*LCU_WIDTH], + kvz_pixel temp_lcu_u[LCU_WIDTH_C*LCU_WIDTH_C], + kvz_pixel temp_lcu_v[LCU_WIDTH_C*LCU_WIDTH_C]); @@ -160,7 +163,7 @@ extern cost_pixel_any_size_multi_func *kvz_satd_any_size_quad; extern pixels_calc_ssd_func *kvz_pixels_calc_ssd; -extern inter_recon_bipred_func * kvz_inter_recon_bipred_test; +extern inter_recon_bipred_func * kvz_inter_recon_bipred_generic; int kvz_strategy_register_picture(void* opaque, uint8_t bitdepth); cost_pixel_nxn_func * kvz_pixels_get_satd_func(unsigned n); @@ -192,7 +195,8 @@ cost_pixel_nxn_multi_func * kvz_pixels_get_sad_dual_func(unsigned n); {"satd_32x32_dual", (void**) &kvz_satd_32x32_dual}, \ {"satd_64x64_dual", (void**) &kvz_satd_64x64_dual}, \ {"satd_any_size_quad", (void**) &kvz_satd_any_size_quad}, \ - {"inter_recon_bipred", (void**) &kvz_inter_recon_bipred_test}, \ + {"pixels_calc_ssd", (void**) &kvz_pixels_calc_ssd}, \ + {"inter_recon_bipred", (void**) &kvz_inter_recon_bipred_generic}, \ diff --git a/src/strategies/x86_asm/x86inc.asm b/src/strategies/x86_asm/x86inc.asm new file mode 100644 index 00000000..b105e576 --- /dev/null +++ b/src/strategies/x86_asm/x86inc.asm @@ -0,0 +1,1456 @@ +;***************************************************************************** +;* x86inc.asm: x264asm abstraction layer +;***************************************************************************** +;* Copyright (C) 2005-2014 x264 project +;* +;* Authors: Loren Merritt +;* Anton Mitrofanov +;* Jason Garrett-Glaser +;* Henrik Gramner +;* +;* Permission to use, copy, modify, and/or distribute this software for any +;* purpose with or without fee is hereby granted, provided that the above +;* copyright notice and this permission notice appear in all copies. +;* +;* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +;* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +;* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +;* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +;* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +;* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +;* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +;***************************************************************************** + +; This is a header file for the x264ASM assembly language, which uses +; NASM/YASM syntax combined with a large number of macros to provide easy +; abstraction between different calling conventions (x86_32, win64, linux64). +; It also has various other useful features to simplify writing the kind of +; DSP functions that are most often used in x264. + +; Unlike the rest of x264, this file is available under an ISC license, as it +; has significant usefulness outside of x264 and we want it to be available +; to the largest audience possible. Of course, if you modify it for your own +; purposes to add a new feature, we strongly encourage contributing a patch +; as this feature might be useful for others as well. Send patches or ideas +; to x264-devel@videolan.org . + +%ifndef private_prefix + %define private_prefix kvz +%endif + +%ifndef public_prefix + %define public_prefix private_prefix +%endif + +%define WIN64 0 +%define UNIX64 0 +%if ARCH_X86_64 + %ifidn __OUTPUT_FORMAT__,win32 + %define WIN64 1 + %elifidn __OUTPUT_FORMAT__,win64 + %define WIN64 1 + %elifidn __OUTPUT_FORMAT__,x64 + %define WIN64 1 + %else + %define UNIX64 1 + %endif +%endif + +%ifdef PREFIX + %define mangle(x) _ %+ x +%else + %define mangle(x) x +%endif + +%macro SECTION_RODATA 0-1 16 + SECTION .rodata align=%1 +%endmacro + +%macro SECTION_TEXT 0-1 16 + SECTION .text align=%1 +%endmacro + +%if WIN64 + %define PIC +%elif ARCH_X86_64 == 0 +; x86_32 doesn't require PIC. +; Some distros prefer shared objects to be PIC, but nothing breaks if +; the code contains a few textrels, so we'll skip that complexity. + %undef PIC +%endif +%ifdef PIC + default rel +%endif + +%macro CPUNOP 1 + %ifdef __YASM_MAJOR__ + CPU %1 + %endif +%endmacro + +; Always use long nops (reduces 0x90 spam in disassembly on x86_32) +CPUNOP amdnop + +; Macros to eliminate most code duplication between x86_32 and x86_64: +; Currently this works only for leaf functions which load all their arguments +; into registers at the start, and make no other use of the stack. Luckily that +; covers most of x264's asm. + +; PROLOGUE: +; %1 = number of arguments. loads them from stack if needed. +; %2 = number of registers used. pushes callee-saved regs if needed. +; %3 = number of xmm registers used. pushes callee-saved xmm regs if needed. +; %4 = (optional) stack size to be allocated. If not aligned (x86-32 ICC 10.x, +; MSVC or YMM), the stack will be manually aligned (to 16 or 32 bytes), +; and an extra register will be allocated to hold the original stack +; pointer (to not invalidate r0m etc.). To prevent the use of an extra +; register as stack pointer, request a negative stack size. +; %4+/%5+ = list of names to define to registers +; PROLOGUE can also be invoked by adding the same options to cglobal + +; e.g. +; cglobal foo, 2,3,0, dst, src, tmp +; declares a function (foo), taking two args (dst and src) and one local variable (tmp) + +; TODO Some functions can use some args directly from the stack. If they're the +; last args then you can just not declare them, but if they're in the middle +; we need more flexible macro. + +; RET: +; Pops anything that was pushed by PROLOGUE, and returns. + +; REP_RET: +; Use this instead of RET if it's a branch target. + +; registers: +; rN and rNq are the native-size register holding function argument N +; rNd, rNw, rNb are dword, word, and byte size +; rNh is the high 8 bits of the word size +; rNm is the original location of arg N (a register or on the stack), dword +; rNmp is native size + +%macro DECLARE_REG 2-3 + %define r%1q %2 + %define r%1d %2d + %define r%1w %2w + %define r%1b %2b + %define r%1h %2h + %if %0 == 2 + %define r%1m %2d + %define r%1mp %2 + %elif ARCH_X86_64 ; memory + %define r%1m [rstk + stack_offset + %3] + %define r%1mp qword r %+ %1 %+ m + %else + %define r%1m [rstk + stack_offset + %3] + %define r%1mp dword r %+ %1 %+ m + %endif + %define r%1 %2 +%endmacro + +%macro DECLARE_REG_SIZE 3 + %define r%1q r%1 + %define e%1q r%1 + %define r%1d e%1 + %define e%1d e%1 + %define r%1w %1 + %define e%1w %1 + %define r%1h %3 + %define e%1h %3 + %define r%1b %2 + %define e%1b %2 +%if ARCH_X86_64 == 0 + %define r%1 e%1 +%endif +%endmacro + +DECLARE_REG_SIZE ax, al, ah +DECLARE_REG_SIZE bx, bl, bh +DECLARE_REG_SIZE cx, cl, ch +DECLARE_REG_SIZE dx, dl, dh +DECLARE_REG_SIZE si, sil, null +DECLARE_REG_SIZE di, dil, null +DECLARE_REG_SIZE bp, bpl, null + +; t# defines for when per-arch register allocation is more complex than just function arguments + +%macro DECLARE_REG_TMP 1-* + %assign %%i 0 + %rep %0 + CAT_XDEFINE t, %%i, r%1 + %assign %%i %%i+1 + %rotate 1 + %endrep +%endmacro + +%macro DECLARE_REG_TMP_SIZE 0-* + %rep %0 + %define t%1q t%1 %+ q + %define t%1d t%1 %+ d + %define t%1w t%1 %+ w + %define t%1h t%1 %+ h + %define t%1b t%1 %+ b + %rotate 1 + %endrep +%endmacro + +DECLARE_REG_TMP_SIZE 0,1,2,3,4,5,6,7,8,9,10,11,12,13,14 + +%if ARCH_X86_64 + %define gprsize 8 +%else + %define gprsize 4 +%endif + +%macro PUSH 1 + push %1 + %ifidn rstk, rsp + %assign stack_offset stack_offset+gprsize + %endif +%endmacro + +%macro POP 1 + pop %1 + %ifidn rstk, rsp + %assign stack_offset stack_offset-gprsize + %endif +%endmacro + +%macro PUSH_IF_USED 1-* + %rep %0 + %if %1 < regs_used + PUSH r%1 + %endif + %rotate 1 + %endrep +%endmacro + +%macro POP_IF_USED 1-* + %rep %0 + %if %1 < regs_used + pop r%1 + %endif + %rotate 1 + %endrep +%endmacro + +%macro LOAD_IF_USED 1-* + %rep %0 + %if %1 < num_args + mov r%1, r %+ %1 %+ mp + %endif + %rotate 1 + %endrep +%endmacro + +%macro SUB 2 + sub %1, %2 + %ifidn %1, rstk + %assign stack_offset stack_offset+(%2) + %endif +%endmacro + +%macro ADD 2 + add %1, %2 + %ifidn %1, rstk + %assign stack_offset stack_offset-(%2) + %endif +%endmacro + +%macro movifnidn 2 + %ifnidn %1, %2 + mov %1, %2 + %endif +%endmacro + +%macro movsxdifnidn 2 + %ifnidn %1, %2 + movsxd %1, %2 + %endif +%endmacro + +%macro ASSERT 1 + %if (%1) == 0 + %error assert failed + %endif +%endmacro + +%macro DEFINE_ARGS 0-* + %ifdef n_arg_names + %assign %%i 0 + %rep n_arg_names + CAT_UNDEF arg_name %+ %%i, q + CAT_UNDEF arg_name %+ %%i, d + CAT_UNDEF arg_name %+ %%i, w + CAT_UNDEF arg_name %+ %%i, h + CAT_UNDEF arg_name %+ %%i, b + CAT_UNDEF arg_name %+ %%i, m + CAT_UNDEF arg_name %+ %%i, mp + CAT_UNDEF arg_name, %%i + %assign %%i %%i+1 + %endrep + %endif + + %xdefine %%stack_offset stack_offset + %undef stack_offset ; so that the current value of stack_offset doesn't get baked in by xdefine + %assign %%i 0 + %rep %0 + %xdefine %1q r %+ %%i %+ q + %xdefine %1d r %+ %%i %+ d + %xdefine %1w r %+ %%i %+ w + %xdefine %1h r %+ %%i %+ h + %xdefine %1b r %+ %%i %+ b + %xdefine %1m r %+ %%i %+ m + %xdefine %1mp r %+ %%i %+ mp + CAT_XDEFINE arg_name, %%i, %1 + %assign %%i %%i+1 + %rotate 1 + %endrep + %xdefine stack_offset %%stack_offset + %assign n_arg_names %0 +%endmacro + +%macro ALLOC_STACK 1-2 0 ; stack_size, n_xmm_regs (for win64 only) + %ifnum %1 + %if %1 != 0 + %assign %%stack_alignment ((mmsize + 15) & ~15) + %assign stack_size %1 + %if stack_size < 0 + %assign stack_size -stack_size + %endif + %assign stack_size_padded stack_size + %if WIN64 + %assign stack_size_padded stack_size_padded + 32 ; reserve 32 bytes for shadow space + %if mmsize != 8 + %assign xmm_regs_used %2 + %if xmm_regs_used > 8 + %assign stack_size_padded stack_size_padded + (xmm_regs_used-8)*16 + %endif + %endif + %endif + %if mmsize <= 16 && HAVE_ALIGNED_STACK + %assign stack_size_padded stack_size_padded + %%stack_alignment - gprsize - (stack_offset & (%%stack_alignment - 1)) + SUB rsp, stack_size_padded + %else + %assign %%reg_num (regs_used - 1) + %xdefine rstk r %+ %%reg_num + ; align stack, and save original stack location directly above + ; it, i.e. in [rsp+stack_size_padded], so we can restore the + ; stack in a single instruction (i.e. mov rsp, rstk or mov + ; rsp, [rsp+stack_size_padded]) + mov rstk, rsp + %if %1 < 0 ; need to store rsp on stack + sub rsp, gprsize+stack_size_padded + and rsp, ~(%%stack_alignment-1) + %xdefine rstkm [rsp+stack_size_padded] + mov rstkm, rstk + %else ; can keep rsp in rstk during whole function + sub rsp, stack_size_padded + and rsp, ~(%%stack_alignment-1) + %xdefine rstkm rstk + %endif + %endif + WIN64_PUSH_XMM + %endif + %endif +%endmacro + +%macro SETUP_STACK_POINTER 1 + %ifnum %1 + %if %1 != 0 && (HAVE_ALIGNED_STACK == 0 || mmsize == 32) + %if %1 > 0 + %assign regs_used (regs_used + 1) + %elif ARCH_X86_64 && regs_used == num_args && num_args <= 4 + UNIX64 * 2 + %warning "Stack pointer will overwrite register argument" + %endif + %endif + %endif +%endmacro + +%macro DEFINE_ARGS_INTERNAL 3+ + %ifnum %2 + DEFINE_ARGS %3 + %elif %1 == 4 + DEFINE_ARGS %2 + %elif %1 > 4 + DEFINE_ARGS %2, %3 + %endif +%endmacro + +%if WIN64 ; Windows x64 ;================================================= + +DECLARE_REG 0, rcx +DECLARE_REG 1, rdx +DECLARE_REG 2, R8 +DECLARE_REG 3, R9 +DECLARE_REG 4, R10, 40 +DECLARE_REG 5, R11, 48 +DECLARE_REG 6, rax, 56 +DECLARE_REG 7, rdi, 64 +DECLARE_REG 8, rsi, 72 +DECLARE_REG 9, rbx, 80 +DECLARE_REG 10, rbp, 88 +DECLARE_REG 11, R12, 96 +DECLARE_REG 12, R13, 104 +DECLARE_REG 13, R14, 112 +DECLARE_REG 14, R15, 120 + +%macro PROLOGUE 2-5+ 0 ; #args, #regs, #xmm_regs, [stack_size,] arg_names... + %assign num_args %1 + %assign regs_used %2 + ASSERT regs_used >= num_args + SETUP_STACK_POINTER %4 + ASSERT regs_used <= 15 + PUSH_IF_USED 7, 8, 9, 10, 11, 12, 13, 14 + ALLOC_STACK %4, %3 + %if mmsize != 8 && stack_size == 0 + WIN64_SPILL_XMM %3 + %endif + LOAD_IF_USED 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 + DEFINE_ARGS_INTERNAL %0, %4, %5 +%endmacro + +%macro WIN64_PUSH_XMM 0 + ; Use the shadow space to store XMM6 and XMM7, the rest needs stack space allocated. + %if xmm_regs_used > 6 + movaps [rstk + stack_offset + 8], xmm6 + %endif + %if xmm_regs_used > 7 + movaps [rstk + stack_offset + 24], xmm7 + %endif + %if xmm_regs_used > 8 + %assign %%i 8 + %rep xmm_regs_used-8 + movaps [rsp + (%%i-8)*16 + stack_size + 32], xmm %+ %%i + %assign %%i %%i+1 + %endrep + %endif +%endmacro + +%macro WIN64_SPILL_XMM 1 + %assign xmm_regs_used %1 + ASSERT xmm_regs_used <= 16 + %if xmm_regs_used > 8 + %assign stack_size_padded (xmm_regs_used-8)*16 + (~stack_offset&8) + 32 + SUB rsp, stack_size_padded + %endif + WIN64_PUSH_XMM +%endmacro + +%macro WIN64_RESTORE_XMM_INTERNAL 1 + %assign %%pad_size 0 + %if xmm_regs_used > 8 + %assign %%i xmm_regs_used + %rep xmm_regs_used-8 + %assign %%i %%i-1 + movaps xmm %+ %%i, [%1 + (%%i-8)*16 + stack_size + 32] + %endrep + %endif + %if stack_size_padded > 0 + %if stack_size > 0 && (mmsize == 32 || HAVE_ALIGNED_STACK == 0) + mov rsp, rstkm + %else + add %1, stack_size_padded + %assign %%pad_size stack_size_padded + %endif + %endif + %if xmm_regs_used > 7 + movaps xmm7, [%1 + stack_offset - %%pad_size + 24] + %endif + %if xmm_regs_used > 6 + movaps xmm6, [%1 + stack_offset - %%pad_size + 8] + %endif +%endmacro + +%macro WIN64_RESTORE_XMM 1 + WIN64_RESTORE_XMM_INTERNAL %1 + %assign stack_offset (stack_offset-stack_size_padded) + %assign xmm_regs_used 0 +%endmacro + +%define has_epilogue regs_used > 7 || xmm_regs_used > 6 || mmsize == 32 || stack_size > 0 + +%macro RET 0 + WIN64_RESTORE_XMM_INTERNAL rsp + POP_IF_USED 14, 13, 12, 11, 10, 9, 8, 7 +%if mmsize == 32 + vzeroupper +%endif + AUTO_REP_RET +%endmacro + +%elif ARCH_X86_64 ; *nix x64 ;============================================= + +DECLARE_REG 0, rdi +DECLARE_REG 1, rsi +DECLARE_REG 2, rdx +DECLARE_REG 3, rcx +DECLARE_REG 4, R8 +DECLARE_REG 5, R9 +DECLARE_REG 6, rax, 8 +DECLARE_REG 7, R10, 16 +DECLARE_REG 8, R11, 24 +DECLARE_REG 9, rbx, 32 +DECLARE_REG 10, rbp, 40 +DECLARE_REG 11, R12, 48 +DECLARE_REG 12, R13, 56 +DECLARE_REG 13, R14, 64 +DECLARE_REG 14, R15, 72 + +%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... + %assign num_args %1 + %assign regs_used %2 + ASSERT regs_used >= num_args + SETUP_STACK_POINTER %4 + ASSERT regs_used <= 15 + PUSH_IF_USED 9, 10, 11, 12, 13, 14 + ALLOC_STACK %4 + LOAD_IF_USED 6, 7, 8, 9, 10, 11, 12, 13, 14 + DEFINE_ARGS_INTERNAL %0, %4, %5 +%endmacro + +%define has_epilogue regs_used > 9 || mmsize == 32 || stack_size > 0 + +%macro RET 0 +%if stack_size_padded > 0 +%if mmsize == 32 || HAVE_ALIGNED_STACK == 0 + mov rsp, rstkm +%else + add rsp, stack_size_padded +%endif +%endif + POP_IF_USED 14, 13, 12, 11, 10, 9 +%if mmsize == 32 + vzeroupper +%endif + AUTO_REP_RET +%endmacro + +%else ; X86_32 ;============================================================== + +DECLARE_REG 0, eax, 4 +DECLARE_REG 1, ecx, 8 +DECLARE_REG 2, edx, 12 +DECLARE_REG 3, ebx, 16 +DECLARE_REG 4, esi, 20 +DECLARE_REG 5, edi, 24 +DECLARE_REG 6, ebp, 28 +%define rsp esp + +%macro DECLARE_ARG 1-* + %rep %0 + %define r%1m [rstk + stack_offset + 4*%1 + 4] + %define r%1mp dword r%1m + %rotate 1 + %endrep +%endmacro + +DECLARE_ARG 7, 8, 9, 10, 11, 12, 13, 14 + +%macro PROLOGUE 2-5+ ; #args, #regs, #xmm_regs, [stack_size,] arg_names... + %assign num_args %1 + %assign regs_used %2 + ASSERT regs_used >= num_args + %if num_args > 7 + %assign num_args 7 + %endif + %if regs_used > 7 + %assign regs_used 7 + %endif + SETUP_STACK_POINTER %4 + ASSERT regs_used <= 7 + PUSH_IF_USED 3, 4, 5, 6 + ALLOC_STACK %4 + LOAD_IF_USED 0, 1, 2, 3, 4, 5, 6 + DEFINE_ARGS_INTERNAL %0, %4, %5 +%endmacro + +%define has_epilogue regs_used > 3 || mmsize == 32 || stack_size > 0 + +%macro RET 0 +%if stack_size_padded > 0 +%if mmsize == 32 || HAVE_ALIGNED_STACK == 0 + mov rsp, rstkm +%else + add rsp, stack_size_padded +%endif +%endif + POP_IF_USED 6, 5, 4, 3 +%if mmsize == 32 + vzeroupper +%endif + AUTO_REP_RET +%endmacro + +%endif ;====================================================================== + +%if WIN64 == 0 +%macro WIN64_SPILL_XMM 1 +%endmacro +%macro WIN64_RESTORE_XMM 1 +%endmacro +%macro WIN64_PUSH_XMM 0 +%endmacro +%endif + +; On AMD cpus <=K10, an ordinary ret is slow if it immediately follows either +; a branch or a branch target. So switch to a 2-byte form of ret in that case. +; We can automatically detect "follows a branch", but not a branch target. +; (SSSE3 is a sufficient condition to know that your cpu doesn't have this problem.) +%macro REP_RET 0 + %if has_epilogue + RET + %else + rep ret + %endif +%endmacro + +%define last_branch_adr $$ +%macro AUTO_REP_RET 0 + %ifndef cpuflags + times ((last_branch_adr-$)>>31)+1 rep ; times 1 iff $ != last_branch_adr. + %elif notcpuflag(ssse3) + times ((last_branch_adr-$)>>31)+1 rep + %endif + ret +%endmacro + +%macro BRANCH_INSTR 0-* + %rep %0 + %macro %1 1-2 %1 + %2 %1 + %%branch_instr: + %xdefine last_branch_adr %%branch_instr + %endmacro + %rotate 1 + %endrep +%endmacro + +BRANCH_INSTR jz, je, jnz, jne, jl, jle, jnl, jnle, jg, jge, jng, jnge, ja, jae, jna, jnae, jb, jbe, jnb, jnbe, jc, jnc, js, jns, jo, jno, jp, jnp + +%macro TAIL_CALL 2 ; callee, is_nonadjacent + %if has_epilogue + call %1 + RET + %elif %2 + jmp %1 + %endif +%endmacro + +;============================================================================= +; arch-independent part +;============================================================================= + +%assign function_align 16 + +; Begin a function. +; Applies any symbol mangling needed for C linkage, and sets up a define such that +; subsequent uses of the function name automatically refer to the mangled version. +; Appends cpuflags to the function name if cpuflags has been specified. +; The "" empty default parameter is a workaround for nasm, which fails if SUFFIX +; is empty and we call cglobal_internal with just %1 %+ SUFFIX (without %2). +%macro cglobal 1-2+ "" ; name, [PROLOGUE args] + cglobal_internal 1, %1 %+ SUFFIX, %2 +%endmacro +%macro cvisible 1-2+ "" ; name, [PROLOGUE args] + cglobal_internal 0, %1 %+ SUFFIX, %2 +%endmacro +%macro cglobal_internal 2-3+ + %if %1 + %xdefine %%FUNCTION_PREFIX private_prefix + %xdefine %%VISIBILITY hidden + %else + %xdefine %%FUNCTION_PREFIX public_prefix + %xdefine %%VISIBILITY + %endif + %ifndef cglobaled_%2 + %xdefine %2 mangle(%%FUNCTION_PREFIX %+ _ %+ %2) + %xdefine %2.skip_prologue %2 %+ .skip_prologue + CAT_XDEFINE cglobaled_, %2, 1 + %endif + %xdefine current_function %2 + %ifidn __OUTPUT_FORMAT__,elf + global %2:function %%VISIBILITY + %else + global %2 + %endif + align function_align + %2: + RESET_MM_PERMUTATION ; needed for x86-64, also makes disassembly somewhat nicer + %xdefine rstk rsp ; copy of the original stack pointer, used when greater alignment than the known stack alignment is required + %assign stack_offset 0 ; stack pointer offset relative to the return address + %assign stack_size 0 ; amount of stack space that can be freely used inside a function + %assign stack_size_padded 0 ; total amount of allocated stack space, including space for callee-saved xmm registers on WIN64 and alignment padding + %assign xmm_regs_used 0 ; number of XMM registers requested, used for dealing with callee-saved registers on WIN64 + %ifnidn %3, "" + PROLOGUE %3 + %endif +%endmacro + +%macro cextern 1 + %xdefine %1 mangle(private_prefix %+ _ %+ %1) + CAT_XDEFINE cglobaled_, %1, 1 + extern %1 +%endmacro + +; like cextern, but without the prefix +%macro cextern_naked 1 + %xdefine %1 mangle(%1) + CAT_XDEFINE cglobaled_, %1, 1 + extern %1 +%endmacro + +%macro const 1-2+ + %xdefine %1 mangle(private_prefix %+ _ %+ %1) + %ifidn __OUTPUT_FORMAT__,elf + global %1:data hidden + %else + global %1 + %endif + %1: %2 +%endmacro + +; This is needed for ELF, otherwise the GNU linker assumes the stack is +; executable by default. +%ifidn __OUTPUT_FORMAT__,elf +SECTION .note.GNU-stack noalloc noexec nowrite progbits +%endif + +; cpuflags + +%assign cpuflags_mmx (1<<0) +%assign cpuflags_mmx2 (1<<1) | cpuflags_mmx +%assign cpuflags_3dnow (1<<2) | cpuflags_mmx +%assign cpuflags_3dnowext (1<<3) | cpuflags_3dnow +%assign cpuflags_sse (1<<4) | cpuflags_mmx2 +%assign cpuflags_sse2 (1<<5) | cpuflags_sse +%assign cpuflags_sse2slow (1<<6) | cpuflags_sse2 +%assign cpuflags_sse3 (1<<7) | cpuflags_sse2 +%assign cpuflags_ssse3 (1<<8) | cpuflags_sse3 +%assign cpuflags_sse4 (1<<9) | cpuflags_ssse3 +%assign cpuflags_sse42 (1<<10)| cpuflags_sse4 +%assign cpuflags_avx (1<<11)| cpuflags_sse42 +%assign cpuflags_xop (1<<12)| cpuflags_avx +%assign cpuflags_fma4 (1<<13)| cpuflags_avx +%assign cpuflags_avx2 (1<<14)| cpuflags_avx +%assign cpuflags_fma3 (1<<15)| cpuflags_avx + +%assign cpuflags_cache32 (1<<16) +%assign cpuflags_cache64 (1<<17) +%assign cpuflags_slowctz (1<<18) +%assign cpuflags_lzcnt (1<<19) +%assign cpuflags_aligned (1<<20) ; not a cpu feature, but a function variant +%assign cpuflags_atom (1<<21) +%assign cpuflags_bmi1 (1<<22)|cpuflags_lzcnt +%assign cpuflags_bmi2 (1<<23)|cpuflags_bmi1 + +%define cpuflag(x) ((cpuflags & (cpuflags_ %+ x)) == (cpuflags_ %+ x)) +%define notcpuflag(x) ((cpuflags & (cpuflags_ %+ x)) != (cpuflags_ %+ x)) + +; Takes up to 2 cpuflags from the above list. +; All subsequent functions (up to the next INIT_CPUFLAGS) is built for the specified cpu. +; You shouldn't need to invoke this macro directly, it's a subroutine for INIT_MMX &co. +%macro INIT_CPUFLAGS 0-2 + CPUNOP amdnop + %if %0 >= 1 + %xdefine cpuname %1 + %assign cpuflags cpuflags_%1 + %if %0 >= 2 + %xdefine cpuname %1_%2 + %assign cpuflags cpuflags | cpuflags_%2 + %endif + %xdefine SUFFIX _ %+ cpuname + %if cpuflag(avx) + %assign avx_enabled 1 + %endif + %if (mmsize == 16 && notcpuflag(sse2)) || (mmsize == 32 && notcpuflag(avx2)) + %define mova movaps + %define movu movups + %define movnta movntps + %endif + %if cpuflag(aligned) + %define movu mova + %elifidn %1, sse3 + %define movu lddqu + %endif + %if ARCH_X86_64 == 0 && notcpuflag(sse2) + CPUNOP basicnop + %endif + %else + %xdefine SUFFIX + %undef cpuname + %undef cpuflags + %endif +%endmacro + +; Merge mmx and sse* +; m# is a simd register of the currently selected size +; xm# is the corresponding xmm register if mmsize >= 16, otherwise the same as m# +; ym# is the corresponding ymm register if mmsize >= 32, otherwise the same as m# +; (All 3 remain in sync through SWAP.) + +%macro CAT_XDEFINE 3 + %xdefine %1%2 %3 +%endmacro + +%macro CAT_UNDEF 2 + %undef %1%2 +%endmacro + +%macro INIT_MMX 0-1+ + %assign avx_enabled 0 + %define RESET_MM_PERMUTATION INIT_MMX %1 + %define mmsize 8 + %define num_mmregs 8 + %define mova movq + %define movu movq + %define movh movd + %define movnta movntq + %assign %%i 0 + %rep 8 + CAT_XDEFINE m, %%i, mm %+ %%i + CAT_XDEFINE nmm, %%i, %%i + %assign %%i %%i+1 + %endrep + %rep 8 + CAT_UNDEF m, %%i + CAT_UNDEF nmm, %%i + %assign %%i %%i+1 + %endrep + INIT_CPUFLAGS %1 +%endmacro + +%macro INIT_XMM 0-1+ + %assign avx_enabled 0 + %define RESET_MM_PERMUTATION INIT_XMM %1 + %define mmsize 16 + %define num_mmregs 8 + %if ARCH_X86_64 + %define num_mmregs 16 + %endif + %define mova movdqa + %define movu movdqu + %define movh movq + %define movnta movntdq + %assign %%i 0 + %rep num_mmregs + CAT_XDEFINE m, %%i, xmm %+ %%i + CAT_XDEFINE nxmm, %%i, %%i + %assign %%i %%i+1 + %endrep + INIT_CPUFLAGS %1 +%endmacro + +%macro INIT_YMM 0-1+ + %assign avx_enabled 1 + %define RESET_MM_PERMUTATION INIT_YMM %1 + %define mmsize 32 + %define num_mmregs 8 + %if ARCH_X86_64 + %define num_mmregs 16 + %endif + %define mova movdqa + %define movu movdqu + %undef movh + %define movnta movntdq + %assign %%i 0 + %rep num_mmregs + CAT_XDEFINE m, %%i, ymm %+ %%i + CAT_XDEFINE nymm, %%i, %%i + %assign %%i %%i+1 + %endrep + INIT_CPUFLAGS %1 +%endmacro + +INIT_XMM + +%macro DECLARE_MMCAST 1 + %define mmmm%1 mm%1 + %define mmxmm%1 mm%1 + %define mmymm%1 mm%1 + %define xmmmm%1 mm%1 + %define xmmxmm%1 xmm%1 + %define xmmymm%1 xmm%1 + %define ymmmm%1 mm%1 + %define ymmxmm%1 xmm%1 + %define ymmymm%1 ymm%1 + %define xm%1 xmm %+ m%1 + %define ym%1 ymm %+ m%1 +%endmacro + +%assign i 0 +%rep 16 + DECLARE_MMCAST i +%assign i i+1 +%endrep + +; I often want to use macros that permute their arguments. e.g. there's no +; efficient way to implement butterfly or transpose or dct without swapping some +; arguments. +; +; I would like to not have to manually keep track of the permutations: +; If I insert a permutation in the middle of a function, it should automatically +; change everything that follows. For more complex macros I may also have multiple +; implementations, e.g. the SSE2 and SSSE3 versions may have different permutations. +; +; Hence these macros. Insert a PERMUTE or some SWAPs at the end of a macro that +; permutes its arguments. It's equivalent to exchanging the contents of the +; registers, except that this way you exchange the register names instead, so it +; doesn't cost any cycles. + +%macro PERMUTE 2-* ; takes a list of pairs to swap +%rep %0/2 + %xdefine %%tmp%2 m%2 + %rotate 2 +%endrep +%rep %0/2 + %xdefine m%1 %%tmp%2 + CAT_XDEFINE n, m%1, %1 + %rotate 2 +%endrep +%endmacro + +%macro SWAP 2+ ; swaps a single chain (sometimes more concise than pairs) +%ifnum %1 ; SWAP 0, 1, ... + SWAP_INTERNAL_NUM %1, %2 +%else ; SWAP m0, m1, ... + SWAP_INTERNAL_NAME %1, %2 +%endif +%endmacro + +%macro SWAP_INTERNAL_NUM 2-* + %rep %0-1 + %xdefine %%tmp m%1 + %xdefine m%1 m%2 + %xdefine m%2 %%tmp + CAT_XDEFINE n, m%1, %1 + CAT_XDEFINE n, m%2, %2 + %rotate 1 + %endrep +%endmacro + +%macro SWAP_INTERNAL_NAME 2-* + %xdefine %%args n %+ %1 + %rep %0-1 + %xdefine %%args %%args, n %+ %2 + %rotate 1 + %endrep + SWAP_INTERNAL_NUM %%args +%endmacro + +; If SAVE_MM_PERMUTATION is placed at the end of a function, then any later +; calls to that function will automatically load the permutation, so values can +; be returned in mmregs. +%macro SAVE_MM_PERMUTATION 0-1 + %if %0 + %xdefine %%f %1_m + %else + %xdefine %%f current_function %+ _m + %endif + %assign %%i 0 + %rep num_mmregs + CAT_XDEFINE %%f, %%i, m %+ %%i + %assign %%i %%i+1 + %endrep +%endmacro + +%macro LOAD_MM_PERMUTATION 1 ; name to load from + %ifdef %1_m0 + %assign %%i 0 + %rep num_mmregs + CAT_XDEFINE m, %%i, %1_m %+ %%i + CAT_XDEFINE n, m %+ %%i, %%i + %assign %%i %%i+1 + %endrep + %endif +%endmacro + +; Append cpuflags to the callee's name iff the appended name is known and the plain name isn't +%macro call 1 + call_internal %1, %1 %+ SUFFIX +%endmacro +%macro call_internal 2 + %xdefine %%i %1 + %ifndef cglobaled_%1 + %ifdef cglobaled_%2 + %xdefine %%i %2 + %endif + %endif + call %%i + LOAD_MM_PERMUTATION %%i +%endmacro + +; Substitutions that reduce instruction size but are functionally equivalent +%macro add 2 + %ifnum %2 + %if %2==128 + sub %1, -128 + %else + add %1, %2 + %endif + %else + add %1, %2 + %endif +%endmacro + +%macro sub 2 + %ifnum %2 + %if %2==128 + add %1, -128 + %else + sub %1, %2 + %endif + %else + sub %1, %2 + %endif +%endmacro + +;============================================================================= +; AVX abstraction layer +;============================================================================= + +%assign i 0 +%rep 16 + %if i < 8 + CAT_XDEFINE sizeofmm, i, 8 + %endif + CAT_XDEFINE sizeofxmm, i, 16 + CAT_XDEFINE sizeofymm, i, 32 +%assign i i+1 +%endrep +%undef i + +%macro CHECK_AVX_INSTR_EMU 3-* + %xdefine %%opcode %1 + %xdefine %%dst %2 + %rep %0-2 + %ifidn %%dst, %3 + %error non-avx emulation of ``%%opcode'' is not supported + %endif + %rotate 1 + %endrep +%endmacro + +;%1 == instruction +;%2 == 1 if float, 0 if int +;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise +;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not +;%5+: operands +%macro RUN_AVX_INSTR 5-8+ + %ifnum sizeof%6 + %assign %%sizeofreg sizeof%6 + %elifnum sizeof%5 + %assign %%sizeofreg sizeof%5 + %else + %assign %%sizeofreg mmsize + %endif + %assign %%emulate_avx 0 + %if avx_enabled && %%sizeofreg >= 16 + %xdefine %%instr v%1 + %else + %xdefine %%instr %1 + %if %0 >= 7+%3 + %assign %%emulate_avx 1 + %endif + %endif + + %if %%emulate_avx + %xdefine %%src1 %6 + %xdefine %%src2 %7 + %ifnidn %5, %6 + %if %0 >= 8 + CHECK_AVX_INSTR_EMU {%1 %5, %6, %7, %8}, %5, %7, %8 + %else + CHECK_AVX_INSTR_EMU {%1 %5, %6, %7}, %5, %7 + %endif + %if %4 && %3 == 0 + %ifnid %7 + ; 3-operand AVX instructions with a memory arg can only have it in src2, + ; whereas SSE emulation prefers to have it in src1 (i.e. the mov). + ; So, if the instruction is commutative with a memory arg, swap them. + %xdefine %%src1 %7 + %xdefine %%src2 %6 + %endif + %endif + %if %%sizeofreg == 8 + MOVQ %5, %%src1 + %elif %2 + MOVAPS %5, %%src1 + %else + MOVDQA %5, %%src1 + %endif + %endif + %if %0 >= 8 + %1 %5, %%src2, %8 + %else + %1 %5, %%src2 + %endif + %elif %0 >= 8 + %%instr %5, %6, %7, %8 + %elif %0 == 7 + %%instr %5, %6, %7 + %elif %0 == 6 + %%instr %5, %6 + %else + %%instr %5 + %endif +%endmacro + +;%1 == instruction +;%2 == 1 if float, 0 if int +;%3 == 1 if non-destructive or 4-operand (xmm, xmm, xmm, imm), 0 otherwise +;%4 == 1 if commutative (i.e. doesn't matter which src arg is which), 0 if not +%macro AVX_INSTR 1-4 0, 1, 0 + %macro %1 1-9 fnord, fnord, fnord, fnord, %1, %2, %3, %4 + %ifidn %2, fnord + RUN_AVX_INSTR %6, %7, %8, %9, %1 + %elifidn %3, fnord + RUN_AVX_INSTR %6, %7, %8, %9, %1, %2 + %elifidn %4, fnord + RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3 + %elifidn %5, fnord + RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4 + %else + RUN_AVX_INSTR %6, %7, %8, %9, %1, %2, %3, %4, %5 + %endif + %endmacro +%endmacro + +; Instructions with both VEX and non-VEX encodings +; Non-destructive instructions are written without parameters +AVX_INSTR addpd, 1, 0, 1 +AVX_INSTR addps, 1, 0, 1 +AVX_INSTR addsd, 1, 0, 1 +AVX_INSTR addss, 1, 0, 1 +AVX_INSTR addsubpd, 1, 0, 0 +AVX_INSTR addsubps, 1, 0, 0 +AVX_INSTR aesdec, 0, 0, 0 +AVX_INSTR aesdeclast, 0, 0, 0 +AVX_INSTR aesenc, 0, 0, 0 +AVX_INSTR aesenclast, 0, 0, 0 +AVX_INSTR aesimc +AVX_INSTR aeskeygenassist +AVX_INSTR andnpd, 1, 0, 0 +AVX_INSTR andnps, 1, 0, 0 +AVX_INSTR andpd, 1, 0, 1 +AVX_INSTR andps, 1, 0, 1 +AVX_INSTR blendpd, 1, 0, 0 +AVX_INSTR blendps, 1, 0, 0 +AVX_INSTR blendvpd, 1, 0, 0 +AVX_INSTR blendvps, 1, 0, 0 +AVX_INSTR cmppd, 1, 1, 0 +AVX_INSTR cmpps, 1, 1, 0 +AVX_INSTR cmpsd, 1, 1, 0 +AVX_INSTR cmpss, 1, 1, 0 +AVX_INSTR comisd +AVX_INSTR comiss +AVX_INSTR cvtdq2pd +AVX_INSTR cvtdq2ps +AVX_INSTR cvtpd2dq +AVX_INSTR cvtpd2ps +AVX_INSTR cvtps2dq +AVX_INSTR cvtps2pd +AVX_INSTR cvtsd2si +AVX_INSTR cvtsd2ss +AVX_INSTR cvtsi2sd +AVX_INSTR cvtsi2ss +AVX_INSTR cvtss2sd +AVX_INSTR cvtss2si +AVX_INSTR cvttpd2dq +AVX_INSTR cvttps2dq +AVX_INSTR cvttsd2si +AVX_INSTR cvttss2si +AVX_INSTR divpd, 1, 0, 0 +AVX_INSTR divps, 1, 0, 0 +AVX_INSTR divsd, 1, 0, 0 +AVX_INSTR divss, 1, 0, 0 +AVX_INSTR dppd, 1, 1, 0 +AVX_INSTR dpps, 1, 1, 0 +AVX_INSTR extractps +AVX_INSTR haddpd, 1, 0, 0 +AVX_INSTR haddps, 1, 0, 0 +AVX_INSTR hsubpd, 1, 0, 0 +AVX_INSTR hsubps, 1, 0, 0 +AVX_INSTR insertps, 1, 1, 0 +AVX_INSTR lddqu +AVX_INSTR ldmxcsr +AVX_INSTR maskmovdqu +AVX_INSTR maxpd, 1, 0, 1 +AVX_INSTR maxps, 1, 0, 1 +AVX_INSTR maxsd, 1, 0, 1 +AVX_INSTR maxss, 1, 0, 1 +AVX_INSTR minpd, 1, 0, 1 +AVX_INSTR minps, 1, 0, 1 +AVX_INSTR minsd, 1, 0, 1 +AVX_INSTR minss, 1, 0, 1 +AVX_INSTR movapd +AVX_INSTR movaps +AVX_INSTR movd +AVX_INSTR movddup +AVX_INSTR movdqa +AVX_INSTR movdqu +AVX_INSTR movhlps, 1, 0, 0 +AVX_INSTR movhpd, 1, 0, 0 +AVX_INSTR movhps, 1, 0, 0 +AVX_INSTR movlhps, 1, 0, 0 +AVX_INSTR movlpd, 1, 0, 0 +AVX_INSTR movlps, 1, 0, 0 +AVX_INSTR movmskpd +AVX_INSTR movmskps +AVX_INSTR movntdq +AVX_INSTR movntdqa +AVX_INSTR movntpd +AVX_INSTR movntps +AVX_INSTR movq +AVX_INSTR movsd, 1, 0, 0 +AVX_INSTR movshdup +AVX_INSTR movsldup +AVX_INSTR movss, 1, 0, 0 +AVX_INSTR movupd +AVX_INSTR movups +AVX_INSTR mpsadbw, 0, 1, 0 +AVX_INSTR mulpd, 1, 0, 1 +AVX_INSTR mulps, 1, 0, 1 +AVX_INSTR mulsd, 1, 0, 1 +AVX_INSTR mulss, 1, 0, 1 +AVX_INSTR orpd, 1, 0, 1 +AVX_INSTR orps, 1, 0, 1 +AVX_INSTR pabsb +AVX_INSTR pabsd +AVX_INSTR pabsw +AVX_INSTR packsswb, 0, 0, 0 +AVX_INSTR packssdw, 0, 0, 0 +AVX_INSTR packuswb, 0, 0, 0 +AVX_INSTR packusdw, 0, 0, 0 +AVX_INSTR paddb, 0, 0, 1 +AVX_INSTR paddw, 0, 0, 1 +AVX_INSTR paddd, 0, 0, 1 +AVX_INSTR paddq, 0, 0, 1 +AVX_INSTR paddsb, 0, 0, 1 +AVX_INSTR paddsw, 0, 0, 1 +AVX_INSTR paddusb, 0, 0, 1 +AVX_INSTR paddusw, 0, 0, 1 +AVX_INSTR palignr, 0, 1, 0 +AVX_INSTR pand, 0, 0, 1 +AVX_INSTR pandn, 0, 0, 0 +AVX_INSTR pavgb, 0, 0, 1 +AVX_INSTR pavgw, 0, 0, 1 +AVX_INSTR pblendvb, 0, 0, 0 +AVX_INSTR pblendw, 0, 1, 0 +AVX_INSTR pclmulqdq, 0, 1, 0 +AVX_INSTR pcmpestri +AVX_INSTR pcmpestrm +AVX_INSTR pcmpistri +AVX_INSTR pcmpistrm +AVX_INSTR pcmpeqb, 0, 0, 1 +AVX_INSTR pcmpeqw, 0, 0, 1 +AVX_INSTR pcmpeqd, 0, 0, 1 +AVX_INSTR pcmpeqq, 0, 0, 1 +AVX_INSTR pcmpgtb, 0, 0, 0 +AVX_INSTR pcmpgtw, 0, 0, 0 +AVX_INSTR pcmpgtd, 0, 0, 0 +AVX_INSTR pcmpgtq, 0, 0, 0 +AVX_INSTR pextrb +AVX_INSTR pextrd +AVX_INSTR pextrq +AVX_INSTR pextrw +AVX_INSTR phaddw, 0, 0, 0 +AVX_INSTR phaddd, 0, 0, 0 +AVX_INSTR phaddsw, 0, 0, 0 +AVX_INSTR phminposuw +AVX_INSTR phsubw, 0, 0, 0 +AVX_INSTR phsubd, 0, 0, 0 +AVX_INSTR phsubsw, 0, 0, 0 +AVX_INSTR pinsrb, 0, 1, 0 +AVX_INSTR pinsrd, 0, 1, 0 +AVX_INSTR pinsrq, 0, 1, 0 +AVX_INSTR pinsrw, 0, 1, 0 +AVX_INSTR pmaddwd, 0, 0, 1 +AVX_INSTR pmaddubsw, 0, 0, 0 +AVX_INSTR pmaxsb, 0, 0, 1 +AVX_INSTR pmaxsw, 0, 0, 1 +AVX_INSTR pmaxsd, 0, 0, 1 +AVX_INSTR pmaxub, 0, 0, 1 +AVX_INSTR pmaxuw, 0, 0, 1 +AVX_INSTR pmaxud, 0, 0, 1 +AVX_INSTR pminsb, 0, 0, 1 +AVX_INSTR pminsw, 0, 0, 1 +AVX_INSTR pminsd, 0, 0, 1 +AVX_INSTR pminub, 0, 0, 1 +AVX_INSTR pminuw, 0, 0, 1 +AVX_INSTR pminud, 0, 0, 1 +AVX_INSTR pmovmskb +AVX_INSTR pmovsxbw +AVX_INSTR pmovsxbd +AVX_INSTR pmovsxbq +AVX_INSTR pmovsxwd +AVX_INSTR pmovsxwq +AVX_INSTR pmovsxdq +AVX_INSTR pmovzxbw +AVX_INSTR pmovzxbd +AVX_INSTR pmovzxbq +AVX_INSTR pmovzxwd +AVX_INSTR pmovzxwq +AVX_INSTR pmovzxdq +AVX_INSTR pmuldq, 0, 0, 1 +AVX_INSTR pmulhrsw, 0, 0, 1 +AVX_INSTR pmulhuw, 0, 0, 1 +AVX_INSTR pmulhw, 0, 0, 1 +AVX_INSTR pmullw, 0, 0, 1 +AVX_INSTR pmulld, 0, 0, 1 +AVX_INSTR pmuludq, 0, 0, 1 +AVX_INSTR por, 0, 0, 1 +AVX_INSTR psadbw, 0, 0, 1 +AVX_INSTR pshufb, 0, 0, 0 +AVX_INSTR pshufd +AVX_INSTR pshufhw +AVX_INSTR pshuflw +AVX_INSTR psignb, 0, 0, 0 +AVX_INSTR psignw, 0, 0, 0 +AVX_INSTR psignd, 0, 0, 0 +AVX_INSTR psllw, 0, 0, 0 +AVX_INSTR pslld, 0, 0, 0 +AVX_INSTR psllq, 0, 0, 0 +AVX_INSTR pslldq, 0, 0, 0 +AVX_INSTR psraw, 0, 0, 0 +AVX_INSTR psrad, 0, 0, 0 +AVX_INSTR psrlw, 0, 0, 0 +AVX_INSTR psrld, 0, 0, 0 +AVX_INSTR psrlq, 0, 0, 0 +AVX_INSTR psrldq, 0, 0, 0 +AVX_INSTR psubb, 0, 0, 0 +AVX_INSTR psubw, 0, 0, 0 +AVX_INSTR psubd, 0, 0, 0 +AVX_INSTR psubq, 0, 0, 0 +AVX_INSTR psubsb, 0, 0, 0 +AVX_INSTR psubsw, 0, 0, 0 +AVX_INSTR psubusb, 0, 0, 0 +AVX_INSTR psubusw, 0, 0, 0 +AVX_INSTR ptest +AVX_INSTR punpckhbw, 0, 0, 0 +AVX_INSTR punpckhwd, 0, 0, 0 +AVX_INSTR punpckhdq, 0, 0, 0 +AVX_INSTR punpckhqdq, 0, 0, 0 +AVX_INSTR punpcklbw, 0, 0, 0 +AVX_INSTR punpcklwd, 0, 0, 0 +AVX_INSTR punpckldq, 0, 0, 0 +AVX_INSTR punpcklqdq, 0, 0, 0 +AVX_INSTR pxor, 0, 0, 1 +AVX_INSTR rcpps, 1, 0, 0 +AVX_INSTR rcpss, 1, 0, 0 +AVX_INSTR roundpd +AVX_INSTR roundps +AVX_INSTR roundsd +AVX_INSTR roundss +AVX_INSTR rsqrtps, 1, 0, 0 +AVX_INSTR rsqrtss, 1, 0, 0 +AVX_INSTR shufpd, 1, 1, 0 +AVX_INSTR shufps, 1, 1, 0 +AVX_INSTR sqrtpd, 1, 0, 0 +AVX_INSTR sqrtps, 1, 0, 0 +AVX_INSTR sqrtsd, 1, 0, 0 +AVX_INSTR sqrtss, 1, 0, 0 +AVX_INSTR stmxcsr +AVX_INSTR subpd, 1, 0, 0 +AVX_INSTR subps, 1, 0, 0 +AVX_INSTR subsd, 1, 0, 0 +AVX_INSTR subss, 1, 0, 0 +AVX_INSTR ucomisd +AVX_INSTR ucomiss +AVX_INSTR unpckhpd, 1, 0, 0 +AVX_INSTR unpckhps, 1, 0, 0 +AVX_INSTR unpcklpd, 1, 0, 0 +AVX_INSTR unpcklps, 1, 0, 0 +AVX_INSTR xorpd, 1, 0, 1 +AVX_INSTR xorps, 1, 0, 1 + +; 3DNow instructions, for sharing code between AVX, SSE and 3DN +AVX_INSTR pfadd, 1, 0, 1 +AVX_INSTR pfsub, 1, 0, 0 +AVX_INSTR pfmul, 1, 0, 1 + +; base-4 constants for shuffles +%assign i 0 +%rep 256 + %assign j ((i>>6)&3)*1000 + ((i>>4)&3)*100 + ((i>>2)&3)*10 + (i&3) + %if j < 10 + CAT_XDEFINE q000, j, i + %elif j < 100 + CAT_XDEFINE q00, j, i + %elif j < 1000 + CAT_XDEFINE q0, j, i + %else + CAT_XDEFINE q, j, i + %endif +%assign i i+1 +%endrep +%undef i +%undef j + +%macro FMA_INSTR 3 + %macro %1 4-7 %1, %2, %3 + %if cpuflag(xop) + v%5 %1, %2, %3, %4 + %else + %6 %1, %2, %3 + %7 %1, %4 + %endif + %endmacro +%endmacro + +FMA_INSTR pmacsdd, pmulld, paddd +FMA_INSTR pmacsww, pmullw, paddw +FMA_INSTR pmadcswd, pmaddwd, paddd + +; convert FMA4 to FMA3 if possible +%macro FMA4_INSTR 4 + %macro %1 4-8 %1, %2, %3, %4 + %if cpuflag(fma4) + v%5 %1, %2, %3, %4 + %elifidn %1, %2 + v%6 %1, %4, %3 ; %1 = %1 * %3 + %4 + %elifidn %1, %3 + v%7 %1, %2, %4 ; %1 = %2 * %1 + %4 + %elifidn %1, %4 + v%8 %1, %2, %3 ; %1 = %2 * %3 + %1 + %else + %error fma3 emulation of ``%5 %1, %2, %3, %4'' is not supported + %endif + %endmacro +%endmacro + +FMA4_INSTR fmaddpd, fmadd132pd, fmadd213pd, fmadd231pd +FMA4_INSTR fmaddps, fmadd132ps, fmadd213ps, fmadd231ps +FMA4_INSTR fmaddsd, fmadd132sd, fmadd213sd, fmadd231sd +FMA4_INSTR fmaddss, fmadd132ss, fmadd213ss, fmadd231ss + +FMA4_INSTR fmaddsubpd, fmaddsub132pd, fmaddsub213pd, fmaddsub231pd +FMA4_INSTR fmaddsubps, fmaddsub132ps, fmaddsub213ps, fmaddsub231ps +FMA4_INSTR fmsubaddpd, fmsubadd132pd, fmsubadd213pd, fmsubadd231pd +FMA4_INSTR fmsubaddps, fmsubadd132ps, fmsubadd213ps, fmsubadd231ps + +FMA4_INSTR fmsubpd, fmsub132pd, fmsub213pd, fmsub231pd +FMA4_INSTR fmsubps, fmsub132ps, fmsub213ps, fmsub231ps +FMA4_INSTR fmsubsd, fmsub132sd, fmsub213sd, fmsub231sd +FMA4_INSTR fmsubss, fmsub132ss, fmsub213ss, fmsub231ss + +FMA4_INSTR fnmaddpd, fnmadd132pd, fnmadd213pd, fnmadd231pd +FMA4_INSTR fnmaddps, fnmadd132ps, fnmadd213ps, fnmadd231ps +FMA4_INSTR fnmaddsd, fnmadd132sd, fnmadd213sd, fnmadd231sd +FMA4_INSTR fnmaddss, fnmadd132ss, fnmadd213ss, fnmadd231ss + +FMA4_INSTR fnmsubpd, fnmsub132pd, fnmsub213pd, fnmsub231pd +FMA4_INSTR fnmsubps, fnmsub132ps, fnmsub213ps, fnmsub231ps +FMA4_INSTR fnmsubsd, fnmsub132sd, fnmsub213sd, fnmsub231sd +FMA4_INSTR fnmsubss, fnmsub132ss, fnmsub213ss, fnmsub231ss + +; workaround: vpbroadcastq is broken in x86_32 due to a yasm bug +%if ARCH_X86_64 == 0 +%macro vpbroadcastq 2 +%if sizeof%1 == 16 + movddup %1, %2 +%else + vbroadcastsd %1, %2 +%endif +%endmacro +%endif diff --git a/tests/bipred_generic_tests.c b/tests/bipred_generic_tests.c new file mode 100644 index 00000000..189b34af --- /dev/null +++ b/tests/bipred_generic_tests.c @@ -0,0 +1,218 @@ +/***************************************************************************** +* This file is part of Kvazaar HEVC encoder. +* +* Copyright (C) 2017 Tampere University of Technology and others (see +* COPYING file). +* +* Kvazaar is free software: you can redistribute it and/or modify +* it under the terms of the GNU Lesser General Public License version 2.1 as +* published by the Free Software Foundation. +* +* Kvazaar is distributed in the hope that it will be useful, +* but WITHOUT ANY WARRANTY; without even the implied warranty of +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +* Lesser General Public License for more details. +* +* You should have received a copy of the GNU General Public License +* along with Kvazaar. If not, see . +****************************************************************************/ + +#include "greatest/greatest.h" + +#include "test_strategies.h" +#include "strategies/generic/picture-generic.h" +#include +#include + + +static lcu_t expected_test_result; +static lcu_t result; + +static lcu_t lcu1; + +int temp1, temp2, temp3, temp4; + +int16_t mv_param[2][2] = { { 7,7 },{ 7,7 } }; +int width = 4; +int height = 4; +int xpos = 0; +int ypos = 0; + +kvz_pixel temp_lcu_y[LCU_WIDTH*LCU_WIDTH]; +kvz_pixel temp_lcu_u[LCU_WIDTH_C*LCU_WIDTH_C]; +kvz_pixel temp_lcu_v[LCU_WIDTH_C*LCU_WIDTH_C]; + +int hi_prec_luma_rec0; +int hi_prec_luma_rec1; +int hi_prec_chroma_rec0; +int hi_prec_chroma_rec1; + +hi_prec_buf_t* high_precision_rec0 = 0; +hi_prec_buf_t* high_precision_rec1 = 0; + +int temp_x, temp_y; + + + +static void setup() +{ + + memset(lcu1.rec.y, 0, sizeof(kvz_pixel) * 64 * 64); + memset(lcu1.rec.u, 0, sizeof(kvz_pixel) * 32 * 32); + memset(lcu1.rec.v, 0, sizeof(kvz_pixel) * 32 * 32); + + for (int i = 0; i < LCU_WIDTH*LCU_WIDTH; i++) { + temp_lcu_y[i] = rand() %256; + lcu1.rec.y[i] = rand() % 256; + } + + for (int i = 0; i < LCU_WIDTH_C*LCU_WIDTH_C; i++) { + temp_lcu_u[i] = rand() % 256; + temp_lcu_v[i] = rand() % 256; + lcu1.rec.v[i] = rand() % 256; + lcu1.rec.u[i] = rand() % 256; + } + + + + memset(expected_test_result.rec.y, 0, sizeof(kvz_pixel) * 64 * 64); + memset(expected_test_result.rec.u, 0, sizeof(kvz_pixel) * 32 * 32); + memset(expected_test_result.rec.v, 0, sizeof(kvz_pixel) * 32 * 32); + + memcpy(expected_test_result.rec.y, lcu1.rec.y, sizeof(kvz_pixel) * 64 * 64); + memcpy(expected_test_result.rec.u, lcu1.rec.u, sizeof(kvz_pixel) * 32 * 32); + memcpy(expected_test_result.rec.v, lcu1.rec.v, sizeof(kvz_pixel) * 32 * 32); + + // Setup is not optimized working function from picture-generic.c. + + + int shift = 15 - KVZ_BIT_DEPTH; + int offset = 1 << (shift - 1); + + hi_prec_luma_rec0 = 0;// mv_param[0][0] & 3 || mv_param[0][1] & 3; + hi_prec_luma_rec1 = 0;// mv_param[1][0] & 3 || mv_param[1][1] & 3; + + hi_prec_chroma_rec0 = 0; // mv_param[0][0] & 7 || mv_param[0][1] & 7; + hi_prec_chroma_rec1 =0; // mv_param[1][0] & 7 || mv_param[1][1] & 7; + + if (hi_prec_chroma_rec0) high_precision_rec0 = kvz_hi_prec_buf_t_alloc(LCU_WIDTH*LCU_WIDTH); + if (hi_prec_chroma_rec1) high_precision_rec1 = kvz_hi_prec_buf_t_alloc(LCU_WIDTH*LCU_WIDTH); + + + + + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (temp_x = 0; temp_x < width; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + int16_t sample0_y = (hi_prec_luma_rec0 ? high_precision_rec0->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (temp_lcu_y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_y = (hi_prec_luma_rec1 ? high_precision_rec1->y[y_in_lcu * LCU_WIDTH + x_in_lcu] : (expected_test_result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + expected_test_result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_y + sample1_y + offset) >> shift); + } + + } + for (temp_y = 0; temp_y < height >> 1; ++temp_y) { + int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + for (temp_x = 0; temp_x < width >> 1; ++temp_x) { + int x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); + int16_t sample0_u = (hi_prec_chroma_rec0 ? high_precision_rec0->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_u = (hi_prec_chroma_rec1 ? high_precision_rec1->u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (expected_test_result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + expected_test_result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_u + sample1_u + offset) >> shift); + + int16_t sample0_v = (hi_prec_chroma_rec0 ? high_precision_rec0->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (temp_lcu_v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + int16_t sample1_v = (hi_prec_chroma_rec1 ? high_precision_rec1->v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] : (expected_test_result.rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] << (14 - KVZ_BIT_DEPTH))); + expected_test_result.rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu] = (kvz_pixel)kvz_fast_clip_32bit_to_pixel((sample0_v + sample1_v + offset) >> shift); + + + + } + } +} + + +TEST test_inter_recon_bipred_generic() +{ + + memset(result.rec.y, 0, sizeof(kvz_pixel) * 64 * 64); + memset(result.rec.u, 0, sizeof(kvz_pixel) * 32 * 32); + memset(result.rec.v, 0, sizeof(kvz_pixel) * 32 * 32); + + + memcpy(result.rec.y, lcu1.rec.y, sizeof(kvz_pixel) * 64 * 64); + memcpy(result.rec.u, lcu1.rec.u, sizeof(kvz_pixel) * 32 * 32); + memcpy(result.rec.v, lcu1.rec.v, sizeof(kvz_pixel) * 32 * 32); + + + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (temp_x = 0; temp_x < width; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + printf("%d ", (expected_test_result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu])); + } + } + printf("\n"); + /* + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + for (temp_x = 0; temp_x < width >> 1; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH_C)-1)); + printf("%d ", (expected_test_result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])); + } + } + printf("\n");*/ + + + kvz_inter_recon_bipred_generic(hi_prec_luma_rec0, hi_prec_luma_rec1, hi_prec_chroma_rec0, hi_prec_chroma_rec1, width, height, xpos, ypos, high_precision_rec0, high_precision_rec1, &result, temp_lcu_y, temp_lcu_u, temp_lcu_v); + + /* + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + for (temp_x = 0; temp_x < width >> 1; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH_C)-1)); + printf("%d ", (result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu])); + } + } + printf("\n");*/ + + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (temp_x = 0; temp_x < width; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + printf("%d ", (result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu])); + } + } + printf("\n"); + + for (temp_y = 0; temp_y < height; ++temp_y) { + int y_in_lcu = ((ypos + temp_y) & ((LCU_WIDTH)-1)); + for (temp_x = 0; temp_x < width; ++temp_x) { + int x_in_lcu = ((xpos + temp_x) & ((LCU_WIDTH)-1)); + ASSERT_EQ_FMT(expected_test_result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu], result.rec.y[y_in_lcu * LCU_WIDTH + x_in_lcu], "%d"); + } + } + + for (temp_y = 0; temp_y < height >> 1; ++temp_y) { + int y_in_lcu = (((ypos >> 1) + temp_y) & (LCU_WIDTH_C - 1)); + for (temp_x = 0; temp_x < width >> 1; ++temp_x) { + int x_in_lcu = (((xpos >> 1) + temp_x) & (LCU_WIDTH_C - 1)); + ASSERT_EQ_FMT(expected_test_result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu], result.rec.u[y_in_lcu * LCU_WIDTH_C + x_in_lcu], "%d"); + ASSERT_EQ_FMT(expected_test_result.rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu], result.rec.v[y_in_lcu * LCU_WIDTH_C + x_in_lcu], "%d"); + } + } + + PASS(); +} + +SUITE(bipred_generic_tests) +{ + setup(); + + for (volatile int i = 0; i < strategies.count; ++i) { + if (strcmp(strategies.strategies[i].type, "inter_recon_bipred") != 0) { + continue; + } + + kvz_inter_recon_bipred_generic = strategies.strategies[i].fptr; + RUN_TEST(test_inter_recon_bipred_generic); + } +} diff --git a/tests/tests_main.c b/tests/tests_main.c index 14dae936..1a948289 100644 --- a/tests/tests_main.c +++ b/tests/tests_main.c @@ -32,6 +32,7 @@ extern SUITE(dct_tests); extern SUITE(coeff_sum_tests); extern SUITE(mv_cand_tests); +extern SUITE(bipred_generic_tests); int main(int argc, char **argv) { @@ -57,5 +58,7 @@ int main(int argc, char **argv) RUN_SUITE(mv_cand_tests); + RUN_SUITE(bipred_generic_tests); + GREATEST_MAIN_END(); }