linux/tools/arch/arm64/include/asm/barrier.h
Daniel Borkmann 6b7a21140f tools: add smp_* barrier variants to include infrastructure
Add the definition for smp_rmb(), smp_wmb(), and smp_mb() to the
tools include infrastructure: this patch adds the implementation
for x86-64 and arm64, and have it fall back as currently is for
other archs which do not have it implemented at this point. The
x86-64 one uses lock + add combination for smp_mb() with address
below red zone.

This is on top of 09d62154f6 ("tools, perf: add and use optimized
ring_buffer_{read_head, write_tail} helpers"), which didn't touch
smp_* barrier implementations. Magnus recently rightfully reported
however that the latter on x86-64 still wrongly falls back to sfence,
lfence and mfence respectively, thus fix that for applications under
tools making use of these to avoid such ugly surprises. The main
header under tools (include/asm/barrier.h) will in that case not
select the fallback implementation.

Reported-by: Magnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Signed-off-by: Alexei Starovoitov <ast@kernel.org>
2019-04-11 14:45:50 -07:00

99 lines
2.8 KiB
C

/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
/*
* From tools/perf/perf-sys.h, last modified in:
* f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
*
* XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
* a case like for arm32 where we do things differently in userspace?
*/
#define mb() asm volatile("dmb ish" ::: "memory")
#define wmb() asm volatile("dmb ishst" ::: "memory")
#define rmb() asm volatile("dmb ishld" ::: "memory")
/*
* Kernel uses dmb variants on arm64 for smp_*() barriers. Pretty much the same
* implementation as above mb()/wmb()/rmb(), though for the latter kernel uses
* dsb. In any case, should above mb()/wmb()/rmb() change, make sure the below
* smp_*() don't.
*/
#define smp_mb() asm volatile("dmb ish" ::: "memory")
#define smp_wmb() asm volatile("dmb ishst" ::: "memory")
#define smp_rmb() asm volatile("dmb ishld" ::: "memory")
#define smp_store_release(p, v) \
do { \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__val = (v) }; \
\
switch (sizeof(*p)) { \
case 1: \
asm volatile ("stlrb %w1, %0" \
: "=Q" (*p) \
: "r" (*(__u8_alias_t *)__u.__c) \
: "memory"); \
break; \
case 2: \
asm volatile ("stlrh %w1, %0" \
: "=Q" (*p) \
: "r" (*(__u16_alias_t *)__u.__c) \
: "memory"); \
break; \
case 4: \
asm volatile ("stlr %w1, %0" \
: "=Q" (*p) \
: "r" (*(__u32_alias_t *)__u.__c) \
: "memory"); \
break; \
case 8: \
asm volatile ("stlr %1, %0" \
: "=Q" (*p) \
: "r" (*(__u64_alias_t *)__u.__c) \
: "memory"); \
break; \
default: \
/* Only to shut up gcc ... */ \
mb(); \
break; \
} \
} while (0)
#define smp_load_acquire(p) \
({ \
union { typeof(*p) __val; char __c[1]; } __u = \
{ .__c = { 0 } }; \
\
switch (sizeof(*p)) { \
case 1: \
asm volatile ("ldarb %w0, %1" \
: "=r" (*(__u8_alias_t *)__u.__c) \
: "Q" (*p) : "memory"); \
break; \
case 2: \
asm volatile ("ldarh %w0, %1" \
: "=r" (*(__u16_alias_t *)__u.__c) \
: "Q" (*p) : "memory"); \
break; \
case 4: \
asm volatile ("ldar %w0, %1" \
: "=r" (*(__u32_alias_t *)__u.__c) \
: "Q" (*p) : "memory"); \
break; \
case 8: \
asm volatile ("ldar %0, %1" \
: "=r" (*(__u64_alias_t *)__u.__c) \
: "Q" (*p) : "memory"); \
break; \
default: \
/* Only to shut up gcc ... */ \
mb(); \
break; \
} \
__u.__val; \
})
#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */