diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h index 6e0a850aa9d3..91d4a4d9258c 100644 --- a/arch/alpha/include/asm/cmpxchg.h +++ b/arch/alpha/include/asm/cmpxchg.h @@ -6,15 +6,15 @@ * Atomic exchange routines. */ -#define ____xchg(type, args...) __xchg ## type ## _local(args) +#define ____xchg(type, args...) __arch_xchg ## type ## _local(args) #define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args) #include #define xchg_local(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \ - sizeof(*(ptr))); \ + (__typeof__(*(ptr))) __arch_xchg_local((ptr), (unsigned long)_x_,\ + sizeof(*(ptr))); \ }) #define arch_cmpxchg_local(ptr, o, n) \ @@ -34,7 +34,7 @@ #undef ____xchg #undef ____cmpxchg -#define ____xchg(type, args...) __xchg ##type(args) +#define ____xchg(type, args...) __arch_xchg ##type(args) #define ____cmpxchg(type, args...) __cmpxchg ##type(args) #include @@ -48,7 +48,7 @@ __typeof__(*(ptr)) _x_ = (x); \ smp_mb(); \ __ret = (__typeof__(*(ptr))) \ - __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ + __arch_xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \ smp_mb(); \ __ret; \ }) diff --git a/arch/alpha/include/asm/local.h b/arch/alpha/include/asm/local.h index fab26a1c93d5..0fcaad642cc3 100644 --- a/arch/alpha/include/asm/local.h +++ b/arch/alpha/include/asm/local.h @@ -52,8 +52,16 @@ static __inline__ long local_sub_return(long i, local_t * l) return result; } -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) +static __inline__ long local_cmpxchg(local_t *l, long old, long new) +{ + return cmpxchg_local(&l->a.counter, old, new); +} + +static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new) +{ + return try_cmpxchg_local(&l->a.counter, (s64 *)old, new); +} + #define local_xchg(l, n) (xchg_local(&((l)->a.counter), (n))) /** diff --git a/arch/arc/include/asm/cmpxchg.h b/arch/arc/include/asm/cmpxchg.h index c5b544a5fe81..e138fde067de 100644 --- a/arch/arc/include/asm/cmpxchg.h +++ b/arch/arc/include/asm/cmpxchg.h @@ -85,7 +85,7 @@ */ #ifdef CONFIG_ARC_HAS_LLSC -#define __xchg(ptr, val) \ +#define __arch_xchg(ptr, val) \ ({ \ __asm__ __volatile__( \ " ex %0, [%1] \n" /* set new value */ \ @@ -102,7 +102,7 @@ \ switch(sizeof(*(_p_))) { \ case 4: \ - _val_ = __xchg(_p_, _val_); \ + _val_ = __arch_xchg(_p_, _val_); \ break; \ default: \ BUILD_BUG(); \ diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h index 4dfe538dfc68..44667bdb4707 100644 --- a/arch/arm/include/asm/cmpxchg.h +++ b/arch/arm/include/asm/cmpxchg.h @@ -25,7 +25,8 @@ #define swp_is_buggy #endif -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size) +static inline unsigned long +__arch_xchg(unsigned long x, volatile void *ptr, int size) { extern void __bad_xchg(volatile void *, int); unsigned long ret; @@ -115,8 +116,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size } #define arch_xchg_relaxed(ptr, x) ({ \ - (__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), \ - sizeof(*(ptr))); \ + (__typeof__(*(ptr)))__arch_xchg((unsigned long)(x), (ptr), \ + sizeof(*(ptr))); \ }) #include diff --git a/arch/arm64/include/asm/cmpxchg.h b/arch/arm64/include/asm/cmpxchg.h index 497acf134d99..c6bc5d8ec3ca 100644 --- a/arch/arm64/include/asm/cmpxchg.h +++ b/arch/arm64/include/asm/cmpxchg.h @@ -62,9 +62,8 @@ __XCHG_CASE( , , mb_, 64, dmb ish, nop, , a, l, "memory") #undef __XCHG_CASE #define __XCHG_GEN(sfx) \ -static __always_inline unsigned long __xchg##sfx(unsigned long x, \ - volatile void *ptr, \ - int size) \ +static __always_inline unsigned long \ +__arch_xchg##sfx(unsigned long x, volatile void *ptr, int size) \ { \ switch (size) { \ case 1: \ @@ -93,7 +92,7 @@ __XCHG_GEN(_mb) ({ \ __typeof__(*(ptr)) __ret; \ __ret = (__typeof__(*(ptr))) \ - __xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __arch_xchg##sfx((unsigned long)(x), (ptr), sizeof(*(ptr))); \ __ret; \ }) diff --git a/arch/hexagon/include/asm/cmpxchg.h b/arch/hexagon/include/asm/cmpxchg.h index cdb705e1496a..bf6cf5579cf4 100644 --- a/arch/hexagon/include/asm/cmpxchg.h +++ b/arch/hexagon/include/asm/cmpxchg.h @@ -9,7 +9,7 @@ #define _ASM_CMPXCHG_H /* - * __xchg - atomically exchange a register and a memory location + * __arch_xchg - atomically exchange a register and a memory location * @x: value to swap * @ptr: pointer to memory * @size: size of the value @@ -19,8 +19,8 @@ * Note: there was an errata for V2 about .new's and memw_locked. * */ -static inline unsigned long __xchg(unsigned long x, volatile void *ptr, - int size) +static inline unsigned long +__arch_xchg(unsigned long x, volatile void *ptr, int size) { unsigned long retval; @@ -42,8 +42,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, * Atomically swap the contents of a register with memory. Should be atomic * between multiple CPU's and within interrupts on the same CPU. */ -#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), \ - sizeof(*(ptr)))) +#define arch_xchg(ptr, v) ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(v), (ptr), \ + sizeof(*(ptr)))) /* * see rt-mutex-design.txt; cmpxchg supposedly checks if *ptr == A and swaps. diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h index 94ef84429843..8b2e644ef6a1 100644 --- a/arch/ia64/include/asm/cmpxchg.h +++ b/arch/ia64/include/asm/cmpxchg.h @@ -5,7 +5,7 @@ #include #define arch_xchg(ptr, x) \ -({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) +({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) #define arch_cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) #define arch_cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n)) diff --git a/arch/ia64/include/uapi/asm/cmpxchg.h b/arch/ia64/include/uapi/asm/cmpxchg.h index 259ae57570bf..85cba138311f 100644 --- a/arch/ia64/include/uapi/asm/cmpxchg.h +++ b/arch/ia64/include/uapi/asm/cmpxchg.h @@ -23,7 +23,7 @@ */ extern void ia64_xchg_called_with_bad_pointer(void); -#define __xchg(x, ptr, size) \ +#define __arch_xchg(x, ptr, size) \ ({ \ unsigned long __xchg_result; \ \ @@ -51,7 +51,7 @@ extern void ia64_xchg_called_with_bad_pointer(void); #ifndef __KERNEL__ #define xchg(ptr, x) \ -({(__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) +({(__typeof__(*(ptr))) __arch_xchg((unsigned long) (x), (ptr), sizeof(*(ptr)));}) #endif /* diff --git a/arch/loongarch/include/asm/cmpxchg.h b/arch/loongarch/include/asm/cmpxchg.h index ecfa6cf79806..979fde61bba8 100644 --- a/arch/loongarch/include/asm/cmpxchg.h +++ b/arch/loongarch/include/asm/cmpxchg.h @@ -62,7 +62,7 @@ static inline unsigned int __xchg_small(volatile void *ptr, unsigned int val, } static __always_inline unsigned long -__xchg(volatile void *ptr, unsigned long x, int size) +__arch_xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { case 1: @@ -87,7 +87,7 @@ __xchg(volatile void *ptr, unsigned long x, int size) __typeof__(*(ptr)) __res; \ \ __res = (__typeof__(*(ptr))) \ - __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ + __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ \ __res; \ }) diff --git a/arch/loongarch/include/asm/local.h b/arch/loongarch/include/asm/local.h index 65fbbae9fc4d..83e995b30e47 100644 --- a/arch/loongarch/include/asm/local.h +++ b/arch/loongarch/include/asm/local.h @@ -56,8 +56,17 @@ static inline long local_sub_return(long i, local_t *l) return result; } -#define local_cmpxchg(l, o, n) \ - ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +static inline long local_cmpxchg(local_t *l, long old, long new) +{ + return cmpxchg_local(&l->a.counter, old, new); +} + +static inline bool local_try_cmpxchg(local_t *l, long *old, long new) +{ + typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old; + return try_cmpxchg_local(&l->a.counter, __old, new); +} + #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) /** diff --git a/arch/m68k/include/asm/cmpxchg.h b/arch/m68k/include/asm/cmpxchg.h index 6cf464cdab06..d7f3de9c5d6f 100644 --- a/arch/m68k/include/asm/cmpxchg.h +++ b/arch/m68k/include/asm/cmpxchg.h @@ -9,7 +9,7 @@ extern unsigned long __invalid_xchg_size(unsigned long, volatile void *, int); #ifndef CONFIG_RMW_INSNS -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size) { unsigned long flags, tmp; @@ -40,7 +40,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz return x; } #else -static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size) +static inline unsigned long __arch_xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: @@ -75,7 +75,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz } #endif -#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) +#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) #include diff --git a/arch/mips/include/asm/cmpxchg.h b/arch/mips/include/asm/cmpxchg.h index 7ec9493b2861..feed343ad483 100644 --- a/arch/mips/include/asm/cmpxchg.h +++ b/arch/mips/include/asm/cmpxchg.h @@ -68,7 +68,7 @@ extern unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size); static __always_inline -unsigned long __xchg(volatile void *ptr, unsigned long x, int size) +unsigned long __arch_xchg(volatile void *ptr, unsigned long x, int size) { switch (size) { case 1: @@ -102,7 +102,7 @@ unsigned long __xchg(volatile void *ptr, unsigned long x, int size) smp_mb__before_llsc(); \ \ __res = (__typeof__(*(ptr))) \ - __xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ + __arch_xchg((ptr), (unsigned long)(x), sizeof(*(ptr))); \ \ smp_llsc_mb(); \ \ diff --git a/arch/mips/include/asm/local.h b/arch/mips/include/asm/local.h index 08366b1fd273..5daf6fe8e3e9 100644 --- a/arch/mips/include/asm/local.h +++ b/arch/mips/include/asm/local.h @@ -94,8 +94,17 @@ static __inline__ long local_sub_return(long i, local_t * l) return result; } -#define local_cmpxchg(l, o, n) \ - ((long)cmpxchg_local(&((l)->a.counter), (o), (n))) +static __inline__ long local_cmpxchg(local_t *l, long old, long new) +{ + return cmpxchg_local(&l->a.counter, old, new); +} + +static __inline__ bool local_try_cmpxchg(local_t *l, long *old, long new) +{ + typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old; + return try_cmpxchg_local(&l->a.counter, __old, new); +} + #define local_xchg(l, n) (atomic_long_xchg((&(l)->a), (n))) /** diff --git a/arch/openrisc/include/asm/cmpxchg.h b/arch/openrisc/include/asm/cmpxchg.h index 79fd16162ccb..8ee151c072e4 100644 --- a/arch/openrisc/include/asm/cmpxchg.h +++ b/arch/openrisc/include/asm/cmpxchg.h @@ -147,8 +147,8 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old, extern unsigned long __xchg_called_with_bad_pointer(void) __compiletime_error("Bad argument size for xchg"); -static inline unsigned long __xchg(volatile void *ptr, unsigned long with, - int size) +static inline unsigned long +__arch_xchg(volatile void *ptr, unsigned long with, int size) { switch (size) { case 1: @@ -163,9 +163,9 @@ static inline unsigned long __xchg(volatile void *ptr, unsigned long with, #define arch_xchg(ptr, with) \ ({ \ - (__typeof__(*(ptr))) __xchg((ptr), \ - (unsigned long)(with), \ - sizeof(*(ptr))); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), \ + (unsigned long)(with), \ + sizeof(*(ptr))); \ }) #endif /* __ASM_OPENRISC_CMPXCHG_H */ diff --git a/arch/parisc/include/asm/cmpxchg.h b/arch/parisc/include/asm/cmpxchg.h index 5f274be10567..c1d776bb16b4 100644 --- a/arch/parisc/include/asm/cmpxchg.h +++ b/arch/parisc/include/asm/cmpxchg.h @@ -22,7 +22,7 @@ extern unsigned long __xchg64(unsigned long, volatile unsigned long *); /* optimizer better get rid of switch since size is a constant */ static inline unsigned long -__xchg(unsigned long x, volatile void *ptr, int size) +__arch_xchg(unsigned long x, volatile void *ptr, int size) { switch (size) { #ifdef CONFIG_64BIT @@ -49,7 +49,7 @@ __xchg(unsigned long x, volatile void *ptr, int size) __typeof__(*(ptr)) __ret; \ __typeof__(*(ptr)) _x_ = (x); \ __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ + __arch_xchg((unsigned long)_x_, (ptr), sizeof(*(ptr))); \ __ret; \ }) diff --git a/arch/powerpc/include/asm/cmpxchg.h b/arch/powerpc/include/asm/cmpxchg.h index d0ea0571e79a..dbb50c06f0bf 100644 --- a/arch/powerpc/include/asm/cmpxchg.h +++ b/arch/powerpc/include/asm/cmpxchg.h @@ -229,7 +229,7 @@ __xchg_local(void *ptr, unsigned long x, unsigned int size) return __xchg_u64_local(ptr, x); #endif } - BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg"); + BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local"); return x; } @@ -248,7 +248,7 @@ __xchg_relaxed(void *ptr, unsigned long x, unsigned int size) return __xchg_u64_relaxed(ptr, x); #endif } - BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_local"); + BUILD_BUG_ON_MSG(1, "Unsupported size for __xchg_relaxed"); return x; } #define arch_xchg_local(ptr,x) \ diff --git a/arch/powerpc/include/asm/local.h b/arch/powerpc/include/asm/local.h index bc4bd19b7fc2..45492fb5bf22 100644 --- a/arch/powerpc/include/asm/local.h +++ b/arch/powerpc/include/asm/local.h @@ -90,6 +90,17 @@ static __inline__ long local_cmpxchg(local_t *l, long o, long n) return t; } +static __inline__ bool local_try_cmpxchg(local_t *l, long *po, long n) +{ + long o = *po, r; + + r = local_cmpxchg(l, o, n); + if (unlikely(r != o)) + *po = r; + + return likely(r == o); +} + static __inline__ long local_xchg(local_t *l, long n) { long t; diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h index 0dfe9d857a76..bba472928b53 100644 --- a/arch/riscv/include/asm/atomic.h +++ b/arch/riscv/include/asm/atomic.h @@ -261,7 +261,7 @@ c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ static __always_inline \ c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ { \ - return __xchg(&(v->counter), n, size); \ + return __arch_xchg(&(v->counter), n, size); \ } \ static __always_inline \ c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h index 12debce235e5..2f4726d3cfcc 100644 --- a/arch/riscv/include/asm/cmpxchg.h +++ b/arch/riscv/include/asm/cmpxchg.h @@ -114,7 +114,7 @@ _x_, sizeof(*(ptr))); \ }) -#define __xchg(ptr, new, size) \ +#define __arch_xchg(ptr, new, size) \ ({ \ __typeof__(ptr) __ptr = (ptr); \ __typeof__(new) __new = (new); \ @@ -143,7 +143,7 @@ #define arch_xchg(ptr, x) \ ({ \ __typeof__(*(ptr)) _x_ = (x); \ - (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ + (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \ }) #define xchg32(ptr, x) \ diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h index 3f26416c2ad8..06e0e42f4eec 100644 --- a/arch/s390/include/asm/cmpxchg.h +++ b/arch/s390/include/asm/cmpxchg.h @@ -14,8 +14,8 @@ void __xchg_called_with_bad_pointer(void); -static __always_inline unsigned long __xchg(unsigned long x, - unsigned long address, int size) +static __always_inline unsigned long +__arch_xchg(unsigned long x, unsigned long address, int size) { unsigned long old; int shift; @@ -77,8 +77,8 @@ static __always_inline unsigned long __xchg(unsigned long x, __typeof__(*(ptr)) __ret; \ \ __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (unsigned long)(ptr), \ - sizeof(*(ptr))); \ + __arch_xchg((unsigned long)(x), (unsigned long)(ptr), \ + sizeof(*(ptr))); \ __ret; \ }) diff --git a/arch/sh/include/asm/cmpxchg.h b/arch/sh/include/asm/cmpxchg.h index 0ed9b3f4a577..288f6f38d98f 100644 --- a/arch/sh/include/asm/cmpxchg.h +++ b/arch/sh/include/asm/cmpxchg.h @@ -22,7 +22,7 @@ extern void __xchg_called_with_bad_pointer(void); -#define __xchg(ptr, x, size) \ +#define __arch_xchg(ptr, x, size) \ ({ \ unsigned long __xchg__res; \ volatile void *__xchg_ptr = (ptr); \ @@ -46,7 +46,7 @@ extern void __xchg_called_with_bad_pointer(void); }) #define arch_xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) + ((__typeof__(*(ptr)))__arch_xchg((ptr),(unsigned long)(x), sizeof(*(ptr)))) /* This function doesn't exist, so you'll get a linker error * if something tries to do an invalid cmpxchg(). */ diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h index 27a57a3a7597..7a1339533d1d 100644 --- a/arch/sparc/include/asm/cmpxchg_32.h +++ b/arch/sparc/include/asm/cmpxchg_32.h @@ -15,7 +15,7 @@ unsigned long __xchg_u32(volatile u32 *m, u32 new); void __xchg_called_with_bad_pointer(void); -static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) +static inline unsigned long __arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 4: @@ -25,7 +25,7 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int return x; } -#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) +#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));}) /* Emulate cmpxchg() the same way we emulate atomics, * by hashing the object address and indexing into an array diff --git a/arch/sparc/include/asm/cmpxchg_64.h b/arch/sparc/include/asm/cmpxchg_64.h index 12d00a42c0a3..66cd61dde9ec 100644 --- a/arch/sparc/include/asm/cmpxchg_64.h +++ b/arch/sparc/include/asm/cmpxchg_64.h @@ -55,7 +55,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long #define arch_xchg(ptr,x) \ ({ __typeof__(*(ptr)) __ret; \ __ret = (__typeof__(*(ptr))) \ - __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ + __arch_xchg((unsigned long)(x), (ptr), sizeof(*(ptr))); \ __ret; \ }) @@ -87,8 +87,8 @@ xchg16(__volatile__ unsigned short *m, unsigned short val) return (load32 & mask) >> bit_shift; } -static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, - int size) +static inline unsigned long +__arch_xchg(unsigned long x, __volatile__ void * ptr, int size) { switch (size) { case 2: diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h index 94fbe6ae7431..540573f515b7 100644 --- a/arch/x86/include/asm/cmpxchg.h +++ b/arch/x86/include/asm/cmpxchg.h @@ -221,9 +221,15 @@ extern void __add_wrong_size(void) #define __try_cmpxchg(ptr, pold, new, size) \ __raw_try_cmpxchg((ptr), (pold), (new), (size), LOCK_PREFIX) +#define __try_cmpxchg_local(ptr, pold, new, size) \ + __raw_try_cmpxchg((ptr), (pold), (new), (size), "") + #define arch_try_cmpxchg(ptr, pold, new) \ __try_cmpxchg((ptr), (pold), (new), sizeof(*(ptr))) +#define arch_try_cmpxchg_local(ptr, pold, new) \ + __try_cmpxchg_local((ptr), (pold), (new), sizeof(*(ptr))) + /* * xadd() adds "inc" to "*ptr" and atomically returns the previous * value of "*ptr". diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h index 349a47acaa4a..56d4ef604b91 100644 --- a/arch/x86/include/asm/local.h +++ b/arch/x86/include/asm/local.h @@ -120,8 +120,17 @@ static inline long local_sub_return(long i, local_t *l) #define local_inc_return(l) (local_add_return(1, l)) #define local_dec_return(l) (local_sub_return(1, l)) -#define local_cmpxchg(l, o, n) \ - (cmpxchg_local(&((l)->a.counter), (o), (n))) +static inline long local_cmpxchg(local_t *l, long old, long new) +{ + return cmpxchg_local(&l->a.counter, old, new); +} + +static inline bool local_try_cmpxchg(local_t *l, long *old, long new) +{ + typeof(l->a.counter) *__old = (typeof(l->a.counter) *) old; + return try_cmpxchg_local(&l->a.counter, __old, new); +} + /* Always has a lock prefix */ #define local_xchg(l, n) (xchg(&((l)->a.counter), (n))) diff --git a/arch/xtensa/include/asm/cmpxchg.h b/arch/xtensa/include/asm/cmpxchg.h index eb87810357ad..675a11ea8de7 100644 --- a/arch/xtensa/include/asm/cmpxchg.h +++ b/arch/xtensa/include/asm/cmpxchg.h @@ -170,7 +170,7 @@ static inline unsigned long xchg_u32(volatile int * m, unsigned long val) } #define arch_xchg(ptr,x) \ - ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) + ((__typeof__(*(ptr)))__arch_xchg((unsigned long)(x),(ptr),sizeof(*(ptr)))) static inline u32 xchg_small(volatile void *ptr, u32 x, int size) { @@ -203,7 +203,7 @@ static inline u32 xchg_small(volatile void *ptr, u32 x, int size) extern void __xchg_called_with_bad_pointer(void); static __inline__ unsigned long -__xchg(unsigned long x, volatile void * ptr, int size) +__arch_xchg(unsigned long x, volatile void * ptr, int size) { switch (size) { case 1: diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h index fca7f1d84818..7f97018df66f 100644 --- a/include/asm-generic/local.h +++ b/include/asm-generic/local.h @@ -42,6 +42,7 @@ typedef struct #define local_inc_return(l) atomic_long_inc_return(&(l)->a) #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n)) +#define local_try_cmpxchg(l, po, n) atomic_long_try_cmpxchg((&(l)->a), (po), (n)) #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n)) #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u)) #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a) diff --git a/include/asm-generic/local64.h b/include/asm-generic/local64.h index 765be0b7d883..14963a7a6253 100644 --- a/include/asm-generic/local64.h +++ b/include/asm-generic/local64.h @@ -42,7 +42,16 @@ typedef struct { #define local64_sub_return(i, l) local_sub_return((i), (&(l)->a)) #define local64_inc_return(l) local_inc_return(&(l)->a) -#define local64_cmpxchg(l, o, n) local_cmpxchg((&(l)->a), (o), (n)) +static inline s64 local64_cmpxchg(local64_t *l, s64 old, s64 new) +{ + return local_cmpxchg(&l->a, old, new); +} + +static inline bool local64_try_cmpxchg(local64_t *l, s64 *old, s64 new) +{ + return local_try_cmpxchg(&l->a, (long *)old, new); +} + #define local64_xchg(l, n) local_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) local_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) local_inc_not_zero(&(l)->a) @@ -81,6 +90,7 @@ typedef struct { #define local64_inc_return(l) atomic64_inc_return(&(l)->a) #define local64_cmpxchg(l, o, n) atomic64_cmpxchg((&(l)->a), (o), (n)) +#define local64_try_cmpxchg(l, po, n) atomic64_try_cmpxchg((&(l)->a), (po), (n)) #define local64_xchg(l, n) atomic64_xchg((&(l)->a), (n)) #define local64_add_unless(l, _a, u) atomic64_add_unless((&(l)->a), (_a), (u)) #define local64_inc_not_zero(l) atomic64_inc_not_zero(&(l)->a) diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h index 4226379a232d..a6e4437c5f36 100644 --- a/include/linux/atomic/atomic-arch-fallback.h +++ b/include/linux/atomic/atomic-arch-fallback.h @@ -217,6 +217,28 @@ #endif /* arch_try_cmpxchg64_relaxed */ +#ifndef arch_try_cmpxchg_local +#define arch_try_cmpxchg_local(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg_local((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg_local */ + +#ifndef arch_try_cmpxchg64_local +#define arch_try_cmpxchg64_local(_ptr, _oldp, _new) \ +({ \ + typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \ + ___r = arch_cmpxchg64_local((_ptr), ___o, (_new)); \ + if (unlikely(___r != ___o)) \ + *___op = ___r; \ + likely(___r == ___o); \ +}) +#endif /* arch_try_cmpxchg64_local */ + #ifndef arch_atomic_read_acquire static __always_inline int arch_atomic_read_acquire(const atomic_t *v) @@ -2646,4 +2668,4 @@ arch_atomic64_dec_if_positive(atomic64_t *v) #endif #endif /* _LINUX_ATOMIC_FALLBACK_H */ -// 00071fffa021cec66f6290d706d69c91df87bade +// ad2e2b4d168dbc60a73922616047a9bfa446af36 diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h index 0496816738ca..03a232a1fa57 100644 --- a/include/linux/atomic/atomic-instrumented.h +++ b/include/linux/atomic/atomic-instrumented.h @@ -1948,14 +1948,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg(__ai_ptr, __VA_ARGS__); \ }) #define xchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_acquire(__ai_ptr, __VA_ARGS__); \ }) @@ -1963,14 +1963,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_release(__ai_ptr, __VA_ARGS__); \ }) #define xchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_xchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) @@ -1978,14 +1978,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_acquire(__ai_ptr, __VA_ARGS__); \ }) @@ -1993,14 +1993,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_relaxed(__ai_ptr, __VA_ARGS__); \ }) @@ -2008,14 +2008,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_acquire(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_acquire(__ai_ptr, __VA_ARGS__); \ }) @@ -2023,14 +2023,14 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_release(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_release(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_relaxed(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_relaxed(__ai_ptr, __VA_ARGS__); \ }) @@ -2039,8 +2039,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2048,8 +2048,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2058,8 +2058,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_release(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2067,8 +2067,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2077,8 +2077,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg64(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2086,8 +2086,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg64_acquire(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2096,8 +2096,8 @@ atomic_long_dec_if_positive(atomic_long_t *v) typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ kcsan_release(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg64_release(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) @@ -2105,22 +2105,22 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ typeof(oldp) __ai_oldp = (oldp); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ - instrument_atomic_write(__ai_oldp, sizeof(*__ai_oldp)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ arch_try_cmpxchg64_relaxed(__ai_ptr, __ai_oldp, __VA_ARGS__); \ }) #define cmpxchg_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg_local(__ai_ptr, __VA_ARGS__); \ }) #define cmpxchg64_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_cmpxchg64_local(__ai_ptr, __VA_ARGS__); \ }) @@ -2128,15 +2128,33 @@ atomic_long_dec_if_positive(atomic_long_t *v) ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ arch_sync_cmpxchg(__ai_ptr, __VA_ARGS__); \ }) +#define try_cmpxchg_local(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + +#define try_cmpxchg64_local(ptr, oldp, ...) \ +({ \ + typeof(ptr) __ai_ptr = (ptr); \ + typeof(oldp) __ai_oldp = (oldp); \ + instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \ + instrument_read_write(__ai_oldp, sizeof(*__ai_oldp)); \ + arch_try_cmpxchg64_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \ +}) + #define cmpxchg_double(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ kcsan_mb(); \ - instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double(__ai_ptr, __VA_ARGS__); \ }) @@ -2144,9 +2162,9 @@ atomic_long_dec_if_positive(atomic_long_t *v) #define cmpxchg_double_local(ptr, ...) \ ({ \ typeof(ptr) __ai_ptr = (ptr); \ - instrument_atomic_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ + instrument_atomic_read_write(__ai_ptr, 2 * sizeof(*__ai_ptr)); \ arch_cmpxchg_double_local(__ai_ptr, __VA_ARGS__); \ }) #endif /* _LINUX_ATOMIC_INSTRUMENTED_H */ -// 1b485de9cbaa4900de59e14ee2084357eaeb1c3a +// 6b513a42e1a1b5962532a019b7fc91eaa044ad5e diff --git a/kernel/locking/rwbase_rt.c b/kernel/locking/rwbase_rt.c index c201aadb9301..25ec0239477c 100644 --- a/kernel/locking/rwbase_rt.c +++ b/kernel/locking/rwbase_rt.c @@ -72,15 +72,6 @@ static int __sched __rwbase_read_lock(struct rwbase_rt *rwb, int ret; raw_spin_lock_irq(&rtm->wait_lock); - /* - * Allow readers, as long as the writer has not completely - * acquired the semaphore for write. - */ - if (atomic_read(&rwb->readers) != WRITER_BIAS) { - atomic_inc(&rwb->readers); - raw_spin_unlock_irq(&rtm->wait_lock); - return 0; - } /* * Call into the slow lock path with the rtmutex->wait_lock diff --git a/scripts/atomic/gen-atomic-fallback.sh b/scripts/atomic/gen-atomic-fallback.sh index 3a07695e3c89..6e853f0dad8d 100755 --- a/scripts/atomic/gen-atomic-fallback.sh +++ b/scripts/atomic/gen-atomic-fallback.sh @@ -225,6 +225,10 @@ for cmpxchg in "cmpxchg" "cmpxchg64"; do gen_try_cmpxchg_fallbacks "${cmpxchg}" done +for cmpxchg in "cmpxchg_local" "cmpxchg64_local"; do + gen_try_cmpxchg_fallback "${cmpxchg}" "" +done + grep '^[a-z]' "$1" | while read name meta args; do gen_proto "${meta}" "${name}" "atomic" "int" ${args} done diff --git a/scripts/atomic/gen-atomic-instrumented.sh b/scripts/atomic/gen-atomic-instrumented.sh index 77c06526a574..d9ffd74f73ca 100755 --- a/scripts/atomic/gen-atomic-instrumented.sh +++ b/scripts/atomic/gen-atomic-instrumented.sh @@ -104,8 +104,8 @@ cat <