util/bufferiszero:

- Remove sse4.1 and avx512 variants
   - Reorganize for early test for acceleration
   - Remove useless prefetches
   - Optimize sse2, avx2 and integer variants
   - Add simd acceleration for aarch64
   - Add bufferiszero-bench
 -----BEGIN PGP SIGNATURE-----
 
 iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmY0/qMdHHJpY2hhcmQu
 aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+ULQf/T2JSdvG6/EjDCf4N
 cnSGiUV2MIeByw8tkrc/fWCNdlulHhk9gbg9l+f2muwK8H/k2BdynbrQnt1Ymmtk
 xzM6+PNOcByaovSAkvNweZVbrQX36Yih9S7f3n+xcxfVuvvYhKSLHXLkeqO96LMd
 rN+WRpxhReaU3n8/FO7o3S26SRpk7X9kRfShaT7U7ytHGjGsXUvMKIRs30hbsJTB
 yjed0a0u54FoSlN6AEqjWdgzaWP8nT65+8Yxe3dzB9hx09UiolZo60eHqYy7Mkno
 N6aMOB6gUUbCiKZ3Qk+1zEX97vl26NH3zt5tIIJTWDoIkC3f9qbg1x5hwWLQ3rra
 rM8h8w==
 =DnZO
 -----END PGP SIGNATURE-----

Merge tag 'pull-misc-20240503' of https://gitlab.com/rth7680/qemu into staging

util/bufferiszero:
  - Remove sse4.1 and avx512 variants
  - Reorganize for early test for acceleration
  - Remove useless prefetches
  - Optimize sse2, avx2 and integer variants
  - Add simd acceleration for aarch64
  - Add bufferiszero-bench

# -----BEGIN PGP SIGNATURE-----
#
# iQFRBAABCgA7FiEEekgeeIaLTbaoWgXAZN846K9+IV8FAmY0/qMdHHJpY2hhcmQu
# aGVuZGVyc29uQGxpbmFyby5vcmcACgkQZN846K9+IV+ULQf/T2JSdvG6/EjDCf4N
# cnSGiUV2MIeByw8tkrc/fWCNdlulHhk9gbg9l+f2muwK8H/k2BdynbrQnt1Ymmtk
# xzM6+PNOcByaovSAkvNweZVbrQX36Yih9S7f3n+xcxfVuvvYhKSLHXLkeqO96LMd
# rN+WRpxhReaU3n8/FO7o3S26SRpk7X9kRfShaT7U7ytHGjGsXUvMKIRs30hbsJTB
# yjed0a0u54FoSlN6AEqjWdgzaWP8nT65+8Yxe3dzB9hx09UiolZo60eHqYy7Mkno
# N6aMOB6gUUbCiKZ3Qk+1zEX97vl26NH3zt5tIIJTWDoIkC3f9qbg1x5hwWLQ3rra
# rM8h8w==
# =DnZO
# -----END PGP SIGNATURE-----
# gpg: Signature made Fri 03 May 2024 08:11:31 AM PDT
# gpg:                using RSA key 7A481E78868B4DB6A85A05C064DF38E8AF7E215F
# gpg:                issuer "richard.henderson@linaro.org"
# gpg: Good signature from "Richard Henderson <richard.henderson@linaro.org>" [ultimate]

* tag 'pull-misc-20240503' of https://gitlab.com/rth7680/qemu:
  tests/bench: Add bufferiszero-bench
  util/bufferiszero: Add simd acceleration for aarch64
  util/bufferiszero: Simplify test_buffer_is_zero_next_accel
  util/bufferiszero: Introduce biz_accel_fn typedef
  util/bufferiszero: Improve scalar variant
  util/bufferiszero: Optimize SSE2 and AVX2 variants
  util/bufferiszero: Remove useless prefetches
  util/bufferiszero: Reorganize for early test for acceleration
  util/bufferiszero: Remove AVX512 variant
  util/bufferiszero: Remove SSE4.1 variant

Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2024-05-03 08:13:51 -07:00
commit 909aff7eaf
4 changed files with 324 additions and 221 deletions

View file

@ -187,9 +187,39 @@ char *freq_to_str(uint64_t freq_hz);
/* used to print char* safely */
#define STR_OR_NULL(str) ((str) ? (str) : "null")
bool buffer_is_zero(const void *buf, size_t len);
/*
* Check if a buffer is all zeroes.
*/
bool buffer_is_zero_ool(const void *vbuf, size_t len);
bool buffer_is_zero_ge256(const void *vbuf, size_t len);
bool test_buffer_is_zero_next_accel(void);
static inline bool buffer_is_zero_sample3(const char *buf, size_t len)
{
/*
* For any reasonably sized buffer, these three samples come from
* three different cachelines. In qemu-img usage, we find that
* each byte eliminates more than half of all buffer testing.
* It is therefore critical to performance that the byte tests
* short-circuit, so that we do not pull in additional cache lines.
* Do not "optimize" this to !(a | b | c).
*/
return !buf[0] && !buf[len - 1] && !buf[len / 2];
}
#ifdef __OPTIMIZE__
static inline bool buffer_is_zero(const void *buf, size_t len)
{
return (__builtin_constant_p(len) && len >= 256
? buffer_is_zero_sample3(buf, len) &&
buffer_is_zero_ge256(buf, len)
: buffer_is_zero_ool(buf, len));
}
#else
#define buffer_is_zero buffer_is_zero_ool
#endif
/*
* Implementation of ULEB128 (http://en.wikipedia.org/wiki/LEB128)
* Input is limited to 14-bit numbers

View file

@ -0,0 +1,47 @@
/*
* QEMU buffer_is_zero speed benchmark
*
* This work is licensed under the terms of the GNU GPL, version 2 or
* (at your option) any later version. See the COPYING file in the
* top-level directory.
*/
#include "qemu/osdep.h"
#include "qemu/cutils.h"
#include "qemu/units.h"
static void test(const void *opaque)
{
size_t max = 64 * KiB;
void *buf = g_malloc0(max);
int accel_index = 0;
do {
if (accel_index != 0) {
g_test_message("%s", ""); /* gnu_printf Werror for simple "" */
}
for (size_t len = 1 * KiB; len <= max; len *= 4) {
double total = 0.0;
g_test_timer_start();
do {
buffer_is_zero_ge256(buf, len);
total += len;
} while (g_test_timer_elapsed() < 0.5);
total /= MiB;
g_test_message("buffer_is_zero #%d: %2zuKB %8.0f MB/sec",
accel_index, len / (size_t)KiB,
total / g_test_timer_last());
}
accel_index++;
} while (test_buffer_is_zero_next_accel());
g_free(buf);
}
int main(int argc, char **argv)
{
g_test_init(&argc, &argv, NULL);
g_test_add_data_func("/cutils/bufferiszero/speed", NULL, test);
return g_test_run();
}

View file

@ -21,6 +21,7 @@ benchs = {}
if have_block
benchs += {
'bufferiszero-bench': [],
'benchmark-crypto-hash': [crypto],
'benchmark-crypto-hmac': [crypto],
'benchmark-crypto-cipher': [crypto],

View file

@ -26,265 +26,290 @@
#include "qemu/bswap.h"
#include "host/cpuinfo.h"
static bool
buffer_zero_int(const void *buf, size_t len)
typedef bool (*biz_accel_fn)(const void *, size_t);
static bool buffer_is_zero_int_lt256(const void *buf, size_t len)
{
if (unlikely(len < 8)) {
/* For a very small buffer, simply accumulate all the bytes. */
const unsigned char *p = buf;
const unsigned char *e = buf + len;
unsigned char t = 0;
uint64_t t;
const uint64_t *p, *e;
do {
t |= *p++;
} while (p < e);
return t == 0;
} else {
/* Otherwise, use the unaligned memory access functions to
handle the beginning and end of the buffer, with a couple
of loops handling the middle aligned section. */
uint64_t t = ldq_he_p(buf);
const uint64_t *p = (uint64_t *)(((uintptr_t)buf + 8) & -8);
const uint64_t *e = (uint64_t *)(((uintptr_t)buf + len) & -8);
for (; p + 8 <= e; p += 8) {
__builtin_prefetch(p + 8);
if (t) {
return false;
}
t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
}
while (p < e) {
t |= *p++;
}
t |= ldq_he_p(buf + len - 8);
return t == 0;
/*
* Use unaligned memory access functions to handle
* the beginning and end of the buffer.
*/
if (unlikely(len <= 8)) {
return (ldl_he_p(buf) | ldl_he_p(buf + len - 4)) == 0;
}
t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
/* Read 0 to 31 aligned words from the middle. */
while (p < e) {
t |= *p++;
}
return t == 0;
}
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
static bool buffer_is_zero_int_ge256(const void *buf, size_t len)
{
/*
* Use unaligned memory access functions to handle
* the beginning and end of the buffer.
*/
uint64_t t = ldq_he_p(buf) | ldq_he_p(buf + len - 8);
const uint64_t *p = QEMU_ALIGN_PTR_DOWN(buf + 8, 8);
const uint64_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 8);
/* Collect a partial block at the tail end. */
t |= e[-7] | e[-6] | e[-5] | e[-4] | e[-3] | e[-2] | e[-1];
/*
* Loop over 64 byte blocks.
* With the head and tail removed, e - p >= 30,
* so the loop must iterate at least 3 times.
*/
do {
if (t) {
return false;
}
t = p[0] | p[1] | p[2] | p[3] | p[4] | p[5] | p[6] | p[7];
p += 8;
} while (p < e - 7);
return t == 0;
}
#if defined(CONFIG_AVX2_OPT) || defined(__SSE2__)
#include <immintrin.h>
/* Note that each of these vectorized functions require len >= 64. */
/* Helper for preventing the compiler from reassociating
chains of binary vector operations. */
#define SSE_REASSOC_BARRIER(vec0, vec1) asm("" : "+x"(vec0), "+x"(vec1))
/* Note that these vectorized functions may assume len >= 256. */
static bool __attribute__((target("sse2")))
buffer_zero_sse2(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
__m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
__m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
__m128i zero = _mm_setzero_si128();
/* Unaligned loads at head/tail. */
__m128i v = *(__m128i_u *)(buf);
__m128i w = *(__m128i_u *)(buf + len - 16);
/* Align head/tail to 16-byte boundaries. */
const __m128i *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
const __m128i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
__m128i zero = { 0 };
/* Loop over 16-byte aligned blocks of 64. */
while (likely(p <= e)) {
__builtin_prefetch(p);
t = _mm_cmpeq_epi8(t, zero);
if (unlikely(_mm_movemask_epi8(t) != 0xFFFF)) {
/* Collect a partial block at tail end. */
v |= e[-1]; w |= e[-2];
SSE_REASSOC_BARRIER(v, w);
v |= e[-3]; w |= e[-4];
SSE_REASSOC_BARRIER(v, w);
v |= e[-5]; w |= e[-6];
SSE_REASSOC_BARRIER(v, w);
v |= e[-7]; v |= w;
/*
* Loop over complete 128-byte blocks.
* With the head and tail removed, e - p >= 14, so the loop
* must iterate at least once.
*/
do {
v = _mm_cmpeq_epi8(v, zero);
if (unlikely(_mm_movemask_epi8(v) != 0xFFFF)) {
return false;
}
t = p[-4] | p[-3] | p[-2] | p[-1];
p += 4;
}
v = p[0]; w = p[1];
SSE_REASSOC_BARRIER(v, w);
v |= p[2]; w |= p[3];
SSE_REASSOC_BARRIER(v, w);
v |= p[4]; w |= p[5];
SSE_REASSOC_BARRIER(v, w);
v |= p[6]; w |= p[7];
SSE_REASSOC_BARRIER(v, w);
v |= w;
p += 8;
} while (p < e - 7);
/* Finish the aligned tail. */
t |= e[-3];
t |= e[-2];
t |= e[-1];
/* Finish the unaligned tail. */
t |= _mm_loadu_si128(buf + len - 16);
return _mm_movemask_epi8(_mm_cmpeq_epi8(t, zero)) == 0xFFFF;
return _mm_movemask_epi8(_mm_cmpeq_epi8(v, zero)) == 0xFFFF;
}
#ifdef CONFIG_AVX2_OPT
static bool __attribute__((target("sse4")))
buffer_zero_sse4(const void *buf, size_t len)
{
__m128i t = _mm_loadu_si128(buf);
__m128i *p = (__m128i *)(((uintptr_t)buf + 5 * 16) & -16);
__m128i *e = (__m128i *)(((uintptr_t)buf + len) & -16);
/* Loop over 16-byte aligned blocks of 64. */
while (likely(p <= e)) {
__builtin_prefetch(p);
if (unlikely(!_mm_testz_si128(t, t))) {
return false;
}
t = p[-4] | p[-3] | p[-2] | p[-1];
p += 4;
}
/* Finish the aligned tail. */
t |= e[-3];
t |= e[-2];
t |= e[-1];
/* Finish the unaligned tail. */
t |= _mm_loadu_si128(buf + len - 16);
return _mm_testz_si128(t, t);
}
static bool __attribute__((target("avx2")))
buffer_zero_avx2(const void *buf, size_t len)
{
/* Begin with an unaligned head of 32 bytes. */
__m256i t = _mm256_loadu_si256(buf);
__m256i *p = (__m256i *)(((uintptr_t)buf + 5 * 32) & -32);
__m256i *e = (__m256i *)(((uintptr_t)buf + len) & -32);
/* Unaligned loads at head/tail. */
__m256i v = *(__m256i_u *)(buf);
__m256i w = *(__m256i_u *)(buf + len - 32);
/* Align head/tail to 32-byte boundaries. */
const __m256i *p = QEMU_ALIGN_PTR_DOWN(buf + 32, 32);
const __m256i *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 32);
__m256i zero = { 0 };
/* Loop over 32-byte aligned blocks of 128. */
while (p <= e) {
__builtin_prefetch(p);
if (unlikely(!_mm256_testz_si256(t, t))) {
/* Collect a partial block at tail end. */
v |= e[-1]; w |= e[-2];
SSE_REASSOC_BARRIER(v, w);
v |= e[-3]; w |= e[-4];
SSE_REASSOC_BARRIER(v, w);
v |= e[-5]; w |= e[-6];
SSE_REASSOC_BARRIER(v, w);
v |= e[-7]; v |= w;
/* Loop over complete 256-byte blocks. */
for (; p < e - 7; p += 8) {
/* PTEST is not profitable here. */
v = _mm256_cmpeq_epi8(v, zero);
if (unlikely(_mm256_movemask_epi8(v) != 0xFFFFFFFF)) {
return false;
}
t = p[-4] | p[-3] | p[-2] | p[-1];
p += 4;
} ;
v = p[0]; w = p[1];
SSE_REASSOC_BARRIER(v, w);
v |= p[2]; w |= p[3];
SSE_REASSOC_BARRIER(v, w);
v |= p[4]; w |= p[5];
SSE_REASSOC_BARRIER(v, w);
v |= p[6]; w |= p[7];
SSE_REASSOC_BARRIER(v, w);
v |= w;
}
/* Finish the last block of 128 unaligned. */
t |= _mm256_loadu_si256(buf + len - 4 * 32);
t |= _mm256_loadu_si256(buf + len - 3 * 32);
t |= _mm256_loadu_si256(buf + len - 2 * 32);
t |= _mm256_loadu_si256(buf + len - 1 * 32);
return _mm256_testz_si256(t, t);
return _mm256_movemask_epi8(_mm256_cmpeq_epi8(v, zero)) == 0xFFFFFFFF;
}
#endif /* CONFIG_AVX2_OPT */
#ifdef CONFIG_AVX512F_OPT
static bool __attribute__((target("avx512f")))
buffer_zero_avx512(const void *buf, size_t len)
{
/* Begin with an unaligned head of 64 bytes. */
__m512i t = _mm512_loadu_si512(buf);
__m512i *p = (__m512i *)(((uintptr_t)buf + 5 * 64) & -64);
__m512i *e = (__m512i *)(((uintptr_t)buf + len) & -64);
/* Loop over 64-byte aligned blocks of 256. */
while (p <= e) {
__builtin_prefetch(p);
if (unlikely(_mm512_test_epi64_mask(t, t))) {
return false;
}
t = p[-4] | p[-3] | p[-2] | p[-1];
p += 4;
}
t |= _mm512_loadu_si512(buf + len - 4 * 64);
t |= _mm512_loadu_si512(buf + len - 3 * 64);
t |= _mm512_loadu_si512(buf + len - 2 * 64);
t |= _mm512_loadu_si512(buf + len - 1 * 64);
return !_mm512_test_epi64_mask(t, t);
}
#endif /* CONFIG_AVX512F_OPT */
/*
* Make sure that these variables are appropriately initialized when
* SSE2 is enabled on the compiler command-line, but the compiler is
* too old to support CONFIG_AVX2_OPT.
*/
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
# define INIT_USED 0
# define INIT_LENGTH 0
# define INIT_ACCEL buffer_zero_int
#else
# ifndef __SSE2__
# error "ISA selection confusion"
# endif
# define INIT_USED CPUINFO_SSE2
# define INIT_LENGTH 64
# define INIT_ACCEL buffer_zero_sse2
#endif
static unsigned used_accel = INIT_USED;
static unsigned length_to_accel = INIT_LENGTH;
static bool (*buffer_accel)(const void *, size_t) = INIT_ACCEL;
static unsigned __attribute__((noinline))
select_accel_cpuinfo(unsigned info)
{
/* Array is sorted in order of algorithm preference. */
static const struct {
unsigned bit;
unsigned len;
bool (*fn)(const void *, size_t);
} all[] = {
#ifdef CONFIG_AVX512F_OPT
{ CPUINFO_AVX512F, 256, buffer_zero_avx512 },
#endif
static biz_accel_fn const accel_table[] = {
buffer_is_zero_int_ge256,
buffer_zero_sse2,
#ifdef CONFIG_AVX2_OPT
{ CPUINFO_AVX2, 128, buffer_zero_avx2 },
{ CPUINFO_SSE4, 64, buffer_zero_sse4 },
buffer_zero_avx2,
#endif
{ CPUINFO_SSE2, 64, buffer_zero_sse2 },
{ CPUINFO_ALWAYS, 0, buffer_zero_int },
};
};
for (unsigned i = 0; i < ARRAY_SIZE(all); ++i) {
if (info & all[i].bit) {
length_to_accel = all[i].len;
buffer_accel = all[i].fn;
return all[i].bit;
}
static unsigned best_accel(void)
{
unsigned info = cpuinfo_init();
#ifdef CONFIG_AVX2_OPT
if (info & CPUINFO_AVX2) {
return 2;
}
return 0;
}
#if defined(CONFIG_AVX512F_OPT) || defined(CONFIG_AVX2_OPT)
static void __attribute__((constructor)) init_accel(void)
{
used_accel = select_accel_cpuinfo(cpuinfo_init());
}
#endif /* CONFIG_AVX2_OPT */
bool test_buffer_is_zero_next_accel(void)
{
/*
* Accumulate the accelerators that we've already tested, and
* remove them from the set to test this round. We'll get back
* a zero from select_accel_cpuinfo when there are no more.
*/
unsigned used = select_accel_cpuinfo(cpuinfo & ~used_accel);
used_accel |= used;
return used;
}
static bool select_accel_fn(const void *buf, size_t len)
{
if (likely(len >= length_to_accel)) {
return buffer_accel(buf, len);
}
return buffer_zero_int(buf, len);
}
#else
#define select_accel_fn buffer_zero_int
bool test_buffer_is_zero_next_accel(void)
{
return false;
}
#endif
return info & CPUINFO_SSE2 ? 1 : 0;
}
#elif defined(__aarch64__) && defined(__ARM_NEON)
#include <arm_neon.h>
/*
* Checks if a buffer is all zeroes
* Helper for preventing the compiler from reassociating
* chains of binary vector operations.
*/
bool buffer_is_zero(const void *buf, size_t len)
#define REASSOC_BARRIER(vec0, vec1) asm("" : "+w"(vec0), "+w"(vec1))
static bool buffer_is_zero_simd(const void *buf, size_t len)
{
uint32x4_t t0, t1, t2, t3;
/* Align head/tail to 16-byte boundaries. */
const uint32x4_t *p = QEMU_ALIGN_PTR_DOWN(buf + 16, 16);
const uint32x4_t *e = QEMU_ALIGN_PTR_DOWN(buf + len - 1, 16);
/* Unaligned loads at head/tail. */
t0 = vld1q_u32(buf) | vld1q_u32(buf + len - 16);
/* Collect a partial block at tail end. */
t1 = e[-7] | e[-6];
t2 = e[-5] | e[-4];
t3 = e[-3] | e[-2];
t0 |= e[-1];
REASSOC_BARRIER(t0, t1);
REASSOC_BARRIER(t2, t3);
t0 |= t1;
t2 |= t3;
REASSOC_BARRIER(t0, t2);
t0 |= t2;
/*
* Loop over complete 128-byte blocks.
* With the head and tail removed, e - p >= 14, so the loop
* must iterate at least once.
*/
do {
/*
* Reduce via UMAXV. Whatever the actual result,
* it will only be zero if all input bytes are zero.
*/
if (unlikely(vmaxvq_u32(t0) != 0)) {
return false;
}
t0 = p[0] | p[1];
t1 = p[2] | p[3];
t2 = p[4] | p[5];
t3 = p[6] | p[7];
REASSOC_BARRIER(t0, t1);
REASSOC_BARRIER(t2, t3);
t0 |= t1;
t2 |= t3;
REASSOC_BARRIER(t0, t2);
t0 |= t2;
p += 8;
} while (p < e - 7);
return vmaxvq_u32(t0) == 0;
}
#define best_accel() 1
static biz_accel_fn const accel_table[] = {
buffer_is_zero_int_ge256,
buffer_is_zero_simd,
};
#else
#define best_accel() 0
static biz_accel_fn const accel_table[1] = {
buffer_is_zero_int_ge256
};
#endif
static biz_accel_fn buffer_is_zero_accel;
static unsigned accel_index;
bool buffer_is_zero_ool(const void *buf, size_t len)
{
if (unlikely(len == 0)) {
return true;
}
if (!buffer_is_zero_sample3(buf, len)) {
return false;
}
/* All bytes are covered for any len <= 3. */
if (unlikely(len <= 3)) {
return true;
}
/* Fetch the beginning of the buffer while we select the accelerator. */
__builtin_prefetch(buf);
/* Use an optimized zero check if possible. Note that this also
includes a check for an unrolled loop over 64-bit integers. */
return select_accel_fn(buf, len);
if (likely(len >= 256)) {
return buffer_is_zero_accel(buf, len);
}
return buffer_is_zero_int_lt256(buf, len);
}
bool buffer_is_zero_ge256(const void *buf, size_t len)
{
return buffer_is_zero_accel(buf, len);
}
bool test_buffer_is_zero_next_accel(void)
{
if (accel_index != 0) {
buffer_is_zero_accel = accel_table[--accel_index];
return true;
}
return false;
}
static void __attribute__((constructor)) init_accel(void)
{
accel_index = best_accel();
buffer_is_zero_accel = accel_table[accel_index];
}