audioconvert: avoid even more precision loss in S32 to F32 conversion

There's really no point in doing that s25_32 intermediate step,
to be honest i don't have a clue why the original implementation
did that \_(ツ)_/¯.

Both `S25_SCALE` and `S32_SCALE` are powers of two,
and thus are both exactly representable as floats,
and reprocial of power-of-two is also exactly representable,
so it's not like that rescaling results in precision loss.

This additionally avoids right-shift, and thus is even faster.

As `test_lossless_s32_lossless_subset` shows,
if the integer is in the form of s25+shift,
the maximal absolute error became even lower,
but not zero, because F32->S32 still goes through S25 intermediate.
I think we could theoretically do better,
but then the clamping becomes pretty finicky,
so i don't feel like touching that here.
This commit is contained in:
Roman Lebedev 2024-06-14 06:05:18 +03:00
parent c517865864
commit f4c89b1b40
No known key found for this signature in database
GPG Key ID: 083C3EBB4A1689E0
4 changed files with 26 additions and 37 deletions

View File

@ -316,7 +316,7 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0], *d1 = dst[1], *d2 = dst[2], *d3 = dst[3];
uint32_t n, unrolled;
__m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[4], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -334,11 +334,6 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[2] = _mm256_i32gather_epi32((int*)&s[2], mask1, 4);
in[3] = _mm256_i32gather_epi32((int*)&s[3], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
in[2] = _mm256_srai_epi32(in[2], 7);
in[3] = _mm256_srai_epi32(in[3], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
out[2] = _mm256_cvtepi32_ps(in[2]);
@ -357,11 +352,11 @@ conv_s32_to_f32d_4s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 8*n_channels;
}
for(; n < n_samples; n++) {
__m128 out[4], factor = _mm_set1_ps(1.0f / S25_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0] >> 7);
out[1] = _mm_cvtsi32_ss(factor, s[1] >> 7);
out[2] = _mm_cvtsi32_ss(factor, s[2] >> 7);
out[3] = _mm_cvtsi32_ss(factor, s[3] >> 7);
__m128 out[4], factor = _mm_set1_ps(1.0f / S32_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0]);
out[1] = _mm_cvtsi32_ss(factor, s[1]);
out[2] = _mm_cvtsi32_ss(factor, s[2]);
out[3] = _mm_cvtsi32_ss(factor, s[3]);
out[0] = _mm_mul_ss(out[0], factor);
out[1] = _mm_mul_ss(out[1], factor);
out[2] = _mm_mul_ss(out[2], factor);
@ -382,7 +377,7 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0], *d1 = dst[1];
uint32_t n, unrolled;
__m256i in[4];
__m256 out[4], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[4], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -396,9 +391,6 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[0] = _mm256_i32gather_epi32((int*)&s[0], mask1, 4);
in[1] = _mm256_i32gather_epi32((int*)&s[1], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
@ -411,9 +403,9 @@ conv_s32_to_f32d_2s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 8*n_channels;
}
for(; n < n_samples; n++) {
__m128 out[2], factor = _mm_set1_ps(1.0f / S25_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0] >> 7);
out[1] = _mm_cvtsi32_ss(factor, s[1] >> 7);
__m128 out[2], factor = _mm_set1_ps(1.0f / S32_SCALE);
out[0] = _mm_cvtsi32_ss(factor, s[0]);
out[1] = _mm_cvtsi32_ss(factor, s[1]);
out[0] = _mm_mul_ss(out[0], factor);
out[1] = _mm_mul_ss(out[1], factor);
_mm_store_ss(&d0[n], out[0]);
@ -430,7 +422,7 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0];
uint32_t n, unrolled;
__m256i in[2];
__m256 out[2], factor = _mm256_set1_ps(1.0f / S25_SCALE);
__m256 out[2], factor = _mm256_set1_ps(1.0f / S32_SCALE);
__m256i mask1 = _mm256_setr_epi32(0*n_channels, 1*n_channels, 2*n_channels, 3*n_channels,
4*n_channels, 5*n_channels, 6*n_channels, 7*n_channels);
@ -443,9 +435,6 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
in[0] = _mm256_i32gather_epi32(&s[0*n_channels], mask1, 4);
in[1] = _mm256_i32gather_epi32(&s[8*n_channels], mask1, 4);
in[0] = _mm256_srai_epi32(in[0], 7);
in[1] = _mm256_srai_epi32(in[1], 7);
out[0] = _mm256_cvtepi32_ps(in[0]);
out[1] = _mm256_cvtepi32_ps(in[1]);
@ -458,8 +447,8 @@ conv_s32_to_f32d_1s_avx2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s += 16*n_channels;
}
for(; n < n_samples; n++) {
__m128 out, factor = _mm_set1_ps(1.0f / S25_SCALE);
out = _mm_cvtsi32_ss(factor, s[0] >> 7);
__m128 out, factor = _mm_set1_ps(1.0f / S32_SCALE);
out = _mm_cvtsi32_ss(factor, s[0]);
out = _mm_mul_ss(out, factor);
_mm_store_ss(&d0[n], out);
s += n_channels;

View File

@ -335,7 +335,7 @@ conv_s32_to_f32d_1s_sse2(void *data, void * SPA_RESTRICT dst[], const void * SPA
float *d0 = dst[0];
uint32_t n, unrolled;
__m128i in;
__m128 out, factor = _mm_set1_ps(1.0f / S25_SCALE);
__m128 out, factor = _mm_set1_ps(1.0f / S32_SCALE);
if (SPA_IS_ALIGNED(d0, 16))
unrolled = n_samples & ~3;
@ -347,14 +347,13 @@ conv_s32_to_f32d_1s_sse2(void *data, void * SPA_RESTRICT dst[], const void * SPA
s[1*n_channels],
s[2*n_channels],
s[3*n_channels]);
in = _mm_srai_epi32(in, 7);
out = _mm_cvtepi32_ps(in);
out = _mm_mul_ps(out, factor);
_mm_store_ps(&d0[n], out);
s += 4*n_channels;
}
for(; n < n_samples; n++) {
out = _mm_cvtsi32_ss(factor, s[0]>>7);
out = _mm_cvtsi32_ss(factor, s[0]);
out = _mm_mul_ss(out, factor);
_mm_store_ss(&d0[n], out);
s += n_channels;

View File

@ -119,9 +119,10 @@
#define S32_TO_S24_32(v) (((int32_t)(v)) >> 8)
#define S32_MIN (S24_MIN * 256)
#define S32_MAX (S24_MAX * 256)
#define S32_TO_F32(v) ITOF(int32_t, S32_TO_S25_32(v), S25_SCALE, 0.0f)
#define S32_MIN -2147483648
#define S32_MAX 2147483647
#define S32_SCALE 2147483648.0f
#define S32_TO_F32(v) ITOF(int32_t, v, S32_SCALE, 0.0f)
#define S32S_TO_F32(v) S32_TO_F32(bswap_32(v))
#define F32_TO_S32_D(v,d) S25_32_TO_S32(F32_TO_S25_32_D(v,d))
#define F32_TO_S32(v) F32_TO_S32_D(v, 0.0f)

View File

@ -336,7 +336,7 @@ static void test_s32_f32(void)
0xFFFFFF00, 0x0200, 0xFFFFFE00
};
static const float out[] = { 0.e+00f, 9.99999940395355224609375e-01f, -1.e+00f,
static const float out[] = { 0.e+00f, 1.e+00f, -1.e+00f,
9.9999988079071044921875e-01f, -9.9999988079071044921875e-01f, 5.e-01f,
-5.e-01f, 5.9604644775390625e-08f, -5.9604644775390625e-08f,
1.1920928955078125e-07f, -1.1920928955078125e-07f,
@ -672,15 +672,15 @@ static void test_lossless_s32(void)
{
int64_t i;
int32_t max_abs_err = -1;
const int32_t expected_max_abs_err = 127;
fprintf(stderr, "test %s:\n", __func__);
for (i = S32_MIN; i < S32_MAX; i += (expected_max_abs_err >> 1)) {
for (i = S32_MIN; i < S32_MAX; i += 63) {
float v = S32_TO_F32(i);
int32_t t = F32_TO_S32(v);
max_abs_err = SPA_MAX(max_abs_err, SPA_ABS(i - t));
spa_assert_se(SPA_ABS(i - t) <= 126);
// NOTE: 126 is the maximal absolute error given step=1,
// for wider steps it may (errneously) be lower,
// because we may not check some integer that would bump it.
}
spa_assert_se(max_abs_err == expected_max_abs_err);
}
static void test_lossless_s32_lossless_subset(void)
@ -700,7 +700,7 @@ static void test_lossless_s32_lossless_subset(void)
}
}
spa_assert_se(!all_lossless);
spa_assert_se(max_abs_err == 127);
spa_assert_se(max_abs_err == 64);
}
static void test_lossless_u32(void)