linux/arch/alpha/include/asm/spinlock.h
Peter Zijlstra 726328d92a locking/spinlock, arch: Update and fix spin_unlock_wait() implementations
This patch updates/fixes all spin_unlock_wait() implementations.

The update is in semantics; where it previously was only a control
dependency, we now upgrade to a full load-acquire to match the
store-release from the spin_unlock() we waited on. This ensures that
when spin_unlock_wait() returns, we're guaranteed to observe the full
critical section we waited on.

This fixes a number of spin_unlock_wait() users that (not
unreasonably) rely on this.

I also fixed a number of ticket lock versions to only wait on the
current lock holder, instead of for a full unlock, as this is
sufficient.

Furthermore; again for ticket locks; I added an smp_rmb() in between
the initial ticket load and the spin loop testing the current value
because I could not convince myself the address dependency is
sufficient, esp. if the loads are of different sizes.

I'm more than happy to remove this smp_rmb() again if people are
certain the address dependency does indeed work as expected.

Note: PPC32 will be fixed independently

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: chris@zankel.net
Cc: cmetcalf@mellanox.com
Cc: davem@davemloft.net
Cc: dhowells@redhat.com
Cc: james.hogan@imgtec.com
Cc: jejb@parisc-linux.org
Cc: linux@armlinux.org.uk
Cc: mpe@ellerman.id.au
Cc: ralf@linux-mips.org
Cc: realmz6@gmail.com
Cc: rkuo@codeaurora.org
Cc: rth@twiddle.net
Cc: schwidefsky@de.ibm.com
Cc: tony.luck@intel.com
Cc: vgupta@synopsys.com
Cc: ysato@users.sourceforge.jp
Cc: linux-kernel@vger.kernel.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
2016-06-14 11:55:15 +02:00

182 lines
3.3 KiB
C

#ifndef _ALPHA_SPINLOCK_H
#define _ALPHA_SPINLOCK_H
#include <linux/kernel.h>
#include <asm/current.h>
#include <asm/barrier.h>
#include <asm/processor.h>
/*
* Simple spin lock operations. There are two variants, one clears IRQ's
* on the local processor, one does not.
*
* We make no fairness assumptions. They have a cost.
*/
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
#define arch_spin_is_locked(x) ((x)->lock != 0)
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
{
smp_cond_load_acquire(&lock->lock, !VAL);
}
static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
{
return lock.lock == 0;
}
static inline void arch_spin_unlock(arch_spinlock_t * lock)
{
mb();
lock->lock = 0;
}
static inline void arch_spin_lock(arch_spinlock_t * lock)
{
long tmp;
__asm__ __volatile__(
"1: ldl_l %0,%1\n"
" bne %0,2f\n"
" lda %0,1\n"
" stl_c %0,%1\n"
" beq %0,2f\n"
" mb\n"
".subsection 2\n"
"2: ldl %0,%1\n"
" bne %0,2b\n"
" br 1b\n"
".previous"
: "=&r" (tmp), "=m" (lock->lock)
: "m"(lock->lock) : "memory");
}
static inline int arch_spin_trylock(arch_spinlock_t *lock)
{
return !test_and_set_bit(0, &lock->lock);
}
/***********************************************************/
static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}
static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
" blbs %1,6f\n"
" subl %1,2,%1\n"
" stl_c %1,%0\n"
" beq %1,6f\n"
" mb\n"
".subsection 2\n"
"6: ldl %1,%0\n"
" blbs %1,6b\n"
" br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx)
: "m" (*lock) : "memory");
}
static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
" bne %1,6f\n"
" lda %1,1\n"
" stl_c %1,%0\n"
" beq %1,6f\n"
" mb\n"
".subsection 2\n"
"6: ldl %1,%0\n"
" bne %1,6b\n"
" br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx)
: "m" (*lock) : "memory");
}
static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
" lda %2,0\n"
" blbs %1,2f\n"
" subl %1,2,%2\n"
" stl_c %2,%0\n"
" beq %2,6f\n"
"2: mb\n"
".subsection 2\n"
"6: br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx), "=&r" (success)
: "m" (*lock) : "memory");
return success;
}
static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
__asm__ __volatile__(
"1: ldl_l %1,%0\n"
" lda %2,0\n"
" bne %1,2f\n"
" lda %2,1\n"
" stl_c %2,%0\n"
" beq %2,6f\n"
"2: mb\n"
".subsection 2\n"
"6: br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx), "=&r" (success)
: "m" (*lock) : "memory");
return success;
}
static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
" mb\n"
"1: ldl_l %1,%0\n"
" addl %1,2,%1\n"
" stl_c %1,%0\n"
" beq %1,6f\n"
".subsection 2\n"
"6: br 1b\n"
".previous"
: "=m" (*lock), "=&r" (regx)
: "m" (*lock) : "memory");
}
static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#endif /* _ALPHA_SPINLOCK_H */