1
0
mirror of https://gitlab.com/qemu-project/qemu synced 2024-07-08 20:17:27 +00:00

meson: Detect atomic128 support with optimization

There is an edge condition prior to gcc13 for which optimization
is required to generate 16-byte atomic sequences.  Detect this.

Reviewed-by: Peter Maydell <peter.maydell@linaro.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
This commit is contained in:
Richard Henderson 2022-11-05 11:34:58 +00:00
parent 35c653c402
commit e61f1efeb7
2 changed files with 59 additions and 22 deletions

View File

@ -16,6 +16,23 @@
#endif
#define HAVE_al8_fast (ATOMIC_REG_SIZE >= 8)
/*
* If __alignof(unsigned __int128) < 16, GCC may refuse to inline atomics
* that are supported by the host, e.g. s390x. We can force the pointer to
* have our known alignment with __builtin_assume_aligned, however prior to
* GCC 13 that was only reliable with optimization enabled. See
* https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
*/
#if defined(CONFIG_ATOMIC128_OPT)
# if !defined(__OPTIMIZE__)
# define ATTRIBUTE_ATOMIC128_OPT __attribute__((optimize("O1")))
# endif
# define CONFIG_ATOMIC128
#endif
#ifndef ATTRIBUTE_ATOMIC128_OPT
# define ATTRIBUTE_ATOMIC128_OPT
#endif
#if defined(CONFIG_ATOMIC128)
# define HAVE_al16_fast true
#else
@ -152,7 +169,8 @@ static inline uint64_t load_atomic8(void *pv)
*
* Atomically load 16 aligned bytes from @pv.
*/
static inline Int128 load_atomic16(void *pv)
static inline Int128 ATTRIBUTE_ATOMIC128_OPT
load_atomic16(void *pv)
{
#ifdef CONFIG_ATOMIC128
__uint128_t *p = __builtin_assume_aligned(pv, 16);
@ -356,7 +374,8 @@ static uint64_t load_atom_extract_al16_or_exit(CPUArchState *env, uintptr_t ra,
* cross an 16-byte boundary then the access must be 16-byte atomic,
* otherwise the access must be 8-byte atomic.
*/
static inline uint64_t load_atom_extract_al16_or_al8(void *pv, int s)
static inline uint64_t ATTRIBUTE_ATOMIC128_OPT
load_atom_extract_al16_or_al8(void *pv, int s)
{
#if defined(CONFIG_ATOMIC128)
uintptr_t pi = (uintptr_t)pv;
@ -692,7 +711,8 @@ static inline void store_atomic8(void *pv, uint64_t val)
*
* Atomically store 16 aligned bytes to @pv.
*/
static inline void store_atomic16(void *pv, Int128Alias val)
static inline void ATTRIBUTE_ATOMIC128_OPT
store_atomic16(void *pv, Int128Alias val)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu = __builtin_assume_aligned(pv, 16);
@ -790,7 +810,8 @@ static void store_atom_insert_al8(uint64_t *p, uint64_t val, uint64_t msk)
*
* Atomically store @val to @p masked by @msk.
*/
static void store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
static void ATTRIBUTE_ATOMIC128_OPT
store_atom_insert_al16(Int128 *ps, Int128Alias val, Int128Alias msk)
{
#if defined(CONFIG_ATOMIC128)
__uint128_t *pu, old, new;

View File

@ -2259,23 +2259,21 @@ config_host_data.set('HAVE_BROKEN_SIZE_MAX', not cc.compiles('''
return printf("%zu", SIZE_MAX);
}''', args: ['-Werror']))
atomic_test = '''
# See if 64-bit atomic operations are supported.
# Note that without __atomic builtins, we can only
# assume atomic loads/stores max at pointer size.
config_host_data.set('CONFIG_ATOMIC64', cc.links('''
#include <stdint.h>
int main(void)
{
@0@ x = 0, y = 0;
uint64_t x = 0, y = 0;
y = __atomic_load_n(&x, __ATOMIC_RELAXED);
__atomic_store_n(&x, y, __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&x, &y, x, 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
__atomic_exchange_n(&x, y, __ATOMIC_RELAXED);
__atomic_fetch_add(&x, y, __ATOMIC_RELAXED);
return 0;
}'''
# See if 64-bit atomic operations are supported.
# Note that without __atomic builtins, we can only
# assume atomic loads/stores max at pointer size.
config_host_data.set('CONFIG_ATOMIC64', cc.links(atomic_test.format('uint64_t')))
}'''))
has_int128 = cc.links('''
__int128_t a;
@ -2293,21 +2291,39 @@ if has_int128
# "do we have 128-bit atomics which are handled inline and specifically not
# via libatomic". The reason we can't use libatomic is documented in the
# comment starting "GCC is a house divided" in include/qemu/atomic128.h.
has_atomic128 = cc.links(atomic_test.format('unsigned __int128'))
# We only care about these operations on 16-byte aligned pointers, so
# force 16-byte alignment of the pointer, which may be greater than
# __alignof(unsigned __int128) for the host.
atomic_test_128 = '''
int main(int ac, char **av) {
unsigned __int128 *p = __builtin_assume_aligned(av[ac - 1], sizeof(16));
p[1] = __atomic_load_n(&p[0], __ATOMIC_RELAXED);
__atomic_store_n(&p[2], p[3], __ATOMIC_RELAXED);
__atomic_compare_exchange_n(&p[4], &p[5], p[6], 0, __ATOMIC_RELAXED, __ATOMIC_RELAXED);
return 0;
}'''
has_atomic128 = cc.links(atomic_test_128)
config_host_data.set('CONFIG_ATOMIC128', has_atomic128)
if not has_atomic128
has_cmpxchg128 = cc.links('''
int main(void)
{
unsigned __int128 x = 0, y = 0;
__sync_val_compare_and_swap_16(&x, y, x);
return 0;
}
''')
# Even with __builtin_assume_aligned, the above test may have failed
# without optimization enabled. Try again with optimizations locally
# enabled for the function. See
# https://gcc.gnu.org/bugzilla/show_bug.cgi?id=107389
has_atomic128_opt = cc.links('__attribute__((optimize("O1")))' + atomic_test_128)
config_host_data.set('CONFIG_ATOMIC128_OPT', has_atomic128_opt)
config_host_data.set('CONFIG_CMPXCHG128', has_cmpxchg128)
if not has_atomic128_opt
config_host_data.set('CONFIG_CMPXCHG128', cc.links('''
int main(void)
{
unsigned __int128 x = 0, y = 0;
__sync_val_compare_and_swap_16(&x, y, x);
return 0;
}
'''))
endif
endif
endif