AK: Specialize Atomic<Integral> for clang compatibility

While Clang claims to implement GCC's atomics libcall API, a small
incompatibility caused our builds to fail on Clang.

Clang requires requires the operands to its fixed-size functions to be
integer types, while GCC will take any type with the same size and
alignment as the various integer primitives. This was problematic, as
atomic `enum class`es would not compile.

Furthermore, Clang does not like if only one operand pointer is marked
volatile. Because it only affects the standalone atomic functions, that
will be fixed in a later commit.

As an added benefit, the code is more type-safe, as it won't let us
perform arithmetic on non-integer types. Types with overloaded
arithmetic types won't cause unexpected behavior anymore.

The constructors for the various atomic types can now be used in
constant expressions.
This commit is contained in:
Daniel Bertalan 2021-06-19 16:28:42 +02:00 committed by Ali Mohammad Pur
parent 985adcca38
commit d6138df490

View file

@ -6,6 +6,7 @@
#pragma once
#include <AK/Concepts.h>
#include <AK/Platform.h>
#include <AK/Types.h>
@ -143,7 +144,76 @@ public:
Atomic(const Atomic&) = delete;
Atomic(Atomic&&) = delete;
Atomic(T val) noexcept
constexpr Atomic(T val) noexcept
: m_value(val)
{
}
volatile T* ptr() noexcept
{
return &m_value;
}
T exchange(T desired, MemoryOrder order = DefaultMemoryOrder) volatile noexcept
{
// We use this hack to prevent unnecessary initialization, even if T has a default constructor.
// NOTE: Will need to investigate if it pessimizes the generated assembly.
alignas(T) u8 buffer[sizeof(T)];
T* ret = reinterpret_cast<T*>(buffer);
__atomic_exchange(&m_value, &desired, ret, order);
return *ret;
}
[[nodiscard]] bool compare_exchange_strong(T& expected, T desired, MemoryOrder order = DefaultMemoryOrder) volatile noexcept
{
if (order == memory_order_acq_rel || order == memory_order_release)
return __atomic_compare_exchange(&m_value, &expected, &desired, false, memory_order_release, memory_order_acquire);
else
return __atomic_compare_exchange(&m_value, &expected, &desired, false, order, order);
}
ALWAYS_INLINE operator T() const volatile noexcept
{
return load();
}
ALWAYS_INLINE T load(MemoryOrder order = DefaultMemoryOrder) const volatile noexcept
{
alignas(T) u8 buffer[sizeof(T)];
T* ret = reinterpret_cast<T*>(buffer);
__atomic_load(&m_value, ret, order);
return *ret;
}
ALWAYS_INLINE T operator=(T desired) volatile noexcept
{
store(desired);
return desired;
}
ALWAYS_INLINE void store(T desired, MemoryOrder order = DefaultMemoryOrder) volatile noexcept
{
__atomic_store(&m_value, &desired, order);
}
ALWAYS_INLINE bool is_lock_free() const volatile noexcept
{
return __atomic_is_lock_free(sizeof(m_value), &m_value);
}
};
template<Integral T, MemoryOrder DefaultMemoryOrder>
class Atomic<T, DefaultMemoryOrder> {
T m_value { 0 };
public:
Atomic() noexcept = default;
Atomic& operator=(const Atomic&) volatile = delete;
Atomic& operator=(Atomic&&) volatile = delete;
Atomic(const Atomic&) = delete;
Atomic(Atomic&&) = delete;
constexpr Atomic(T val) noexcept
: m_value(val)
{
}
@ -274,7 +344,7 @@ public:
Atomic(const Atomic&) = delete;
Atomic(Atomic&&) = delete;
Atomic(T* val) noexcept
constexpr Atomic(T* val) noexcept
: m_value(val)
{
}
@ -363,7 +433,6 @@ public:
return __atomic_is_lock_free(sizeof(m_value), &m_value);
}
};
}
using AK::Atomic;