sparc32,parisc,csky: Provide one-byte and two-byte cmpxchg() support

This series provides native one-byte and two-byte cmpxchg() support
 for sparc32 and parisc, courtesy of Al Viro.  This support is provided
 by the same hashed-array-of-locks technique used for the other atomic
 operations provided for these two platforms.
 
 This series also provides emulated one-byte cmpxchg() support for csky
 using a new cmpxchg_emu_u8() function that uses a four-byte cmpxchg()
 to emulate the one-byte variant.
 
 Similar patches for emulation of one-byte cmpxchg() for arc, sh, and
 xtensa have not yet received maintainer acks, so they are slated for
 the v6.11 merge window.
 -----BEGIN PGP SIGNATURE-----
 
 iQJHBAABCgAxFiEEbK7UrM+RBIrCoViJnr8S83LZ+4wFAmY/gZ8THHBhdWxtY2tA
 a2VybmVsLm9yZwAKCRCevxLzctn7jFIjD/0Uu4VZZN96jYbSaDbC5aAkEHg/swBK
 6OVn+yspLOvkebVZlSfus+7rc5VUrxT3GA/gvAWEQsUlPqpYg6Qja/efFpPPRjIq
 lwkFE5HFgE0J4lBo9p78ggm6Hx60WUPlNg9uS23qURZbFTx5TYQyAdzXw9HlYzr8
 jg5IuTtO5L5AZzR2ocDRh4A5sqfcBJCVdVsKO+XzdFLLtgum+kJY7StYLPdY8VtL
 pIV3+ZQENoiwzE+wccnCb2R/4kt6jsEDShlpV4VEfv76HwbjBdvSq4jEg4jS2N3/
 AIyThclD97AEdbbM1oJ3oZdjD3GLGVPhVFfiMSGD5HGA+JVJPjJe2it4o+xY7CIR
 sSdI/E3Rs67qgaga6t2vHygDZABOwgNLAsc4VwM7X6I20fRixkYVc7aVOTnAPzmr
 15iaFd/T7fLKJcC3m/IXb9iNdlfe0Op4+YVD0lOTWmzIk80Xgf45a39u1VFlqQvh
 CLIZG3IdmuxXSWjOmk70iokzJgoSmBriGLbAT3K++pzGYUN/BNQs6XRR77BczFsX
 CbZTZKnEWZMR1U0UWa/TbvUKcsVBZTYebJSvJOG2/+oVqayzvwYfBsE/vWZcI72K
 XEEpKY9ZPDf/gCs/G4OFWt2QPJ0PL+Nt4UZDr5Khrqgo1PwN0uIXstA4mnJ0WjqQ
 sGiACjdTXk4h0w==
 =AEPy
 -----END PGP SIGNATURE-----

Merge tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu

Pull cmpxchg updates from Paul McKenney:
 "Provide one-byte and two-byte cmpxchg() support on sparc32, parisc,
  and csky

  This provides native one-byte and two-byte cmpxchg() support for
  sparc32 and parisc, courtesy of Al Viro. This support is provided by
  the same hashed-array-of-locks technique used for the other atomic
  operations provided for these two platforms.

  There is also emulated one-byte cmpxchg() support for csky using a new
  cmpxchg_emu_u8() function that uses a four-byte cmpxchg() to emulate
  the one-byte variant.

  Similar patches for emulation of one-byte cmpxchg() for arc, sh, and
  xtensa have not yet received maintainer acks, so they are slated for
  the v6.11 merge window"

* tag 'cmpxchg.2024.05.11a' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu:
  csky: Emulate one-byte cmpxchg
  lib: Add one-byte emulation function
  parisc: add u16 support to cmpxchg()
  parisc: add missing export of __cmpxchg_u8()
  parisc: unify implementations of __cmpxchg_u{8,32,64}
  parisc: __cmpxchg_u32(): lift conversion into the callers
  sparc32: add __cmpxchg_u{8,16}() and teach __cmpxchg() to handle those sizes
  sparc32: unify __cmpxchg_u{32,64}
  sparc32: make the first argument of __cmpxchg_u64() volatile u64 *
  sparc32: make __cmpxchg_u32() return u32
This commit is contained in:
Linus Torvalds 2024-05-13 10:05:39 -07:00
commit 2e57d1d606
11 changed files with 131 additions and 81 deletions

View file

@ -1617,4 +1617,7 @@ config CC_HAS_SANE_FUNCTION_ALIGNMENT
# strict alignment always, even with -falign-functions.
def_bool CC_HAS_MIN_FUNCTION_ALIGNMENT || CC_IS_CLANG
config ARCH_NEED_CMPXCHG_1_EMU
bool
endmenu

View file

@ -37,6 +37,7 @@ config CSKY
select ARCH_INLINE_SPIN_UNLOCK_BH if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQ if !PREEMPTION
select ARCH_INLINE_SPIN_UNLOCK_IRQRESTORE if !PREEMPTION
select ARCH_NEED_CMPXCHG_1_EMU
select ARCH_WANT_FRAME_POINTERS if !CPU_CK610 && $(cc-option,-mbacktrace)
select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT
select COMMON_CLK

View file

@ -6,6 +6,7 @@
#ifdef CONFIG_SMP
#include <linux/bug.h>
#include <asm/barrier.h>
#include <linux/cmpxchg-emu.h>
#define __xchg_relaxed(new, ptr, size) \
({ \
@ -61,6 +62,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@ -91,6 +95,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
@ -122,6 +129,9 @@
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 1: \
__ret = (__typeof__(*(ptr)))cmpxchg_emu_u8((volatile u8 *)__ptr, (uintptr_t)__old, (uintptr_t)__new); \
break; \
case 4: \
asm volatile ( \
RELEASE_FENCE \

View file

@ -56,26 +56,24 @@ __arch_xchg(unsigned long x, volatile void *ptr, int size)
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old,
unsigned int new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* __cmpxchg_u... defined in arch/parisc/lib/bitops.c */
extern u8 __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new_);
extern u16 __cmpxchg_u16(volatile u16 *ptr, u16 old, u16 new_);
extern u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
extern u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
return
#ifdef CONFIG_64BIT
case 8: return __cmpxchg_u64((u64 *)ptr, old, new_);
size == 8 ? __cmpxchg_u64(ptr, old, new_) :
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr,
(unsigned int)old, (unsigned int)new_);
case 1: return __cmpxchg_u8((u8 *)ptr, old & 0xff, new_ & 0xff);
}
__cmpxchg_called_with_bad_pointer();
return old;
size == 4 ? __cmpxchg_u32(ptr, old, new_) :
size == 2 ? __cmpxchg_u16(ptr, old, new_) :
size == 1 ? __cmpxchg_u8(ptr, old, new_) :
(__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \

View file

@ -22,6 +22,8 @@ EXPORT_SYMBOL(memset);
#include <linux/atomic.h>
EXPORT_SYMBOL(__xchg8);
EXPORT_SYMBOL(__xchg32);
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
EXPORT_SYMBOL(__cmpxchg_u64);
#ifdef CONFIG_SMP

View file

@ -56,38 +56,20 @@ unsigned long notrace __xchg8(char x, volatile char *ptr)
}
u64 notrace __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
#define CMPXCHG(T) \
T notrace __cmpxchg_##T(volatile T *ptr, T old, T new) \
{ \
unsigned long flags; \
T prev; \
\
_atomic_spin_lock_irqsave(ptr, flags); \
if ((prev = *ptr) == old) \
*ptr = new; \
_atomic_spin_unlock_irqrestore(ptr, flags); \
return prev; \
}
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
unsigned long notrace __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new)
{
unsigned long flags;
unsigned int prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return (unsigned long)prev;
}
u8 notrace __cmpxchg_u8(volatile u8 *ptr, u8 old, u8 new)
{
unsigned long flags;
u8 prev;
_atomic_spin_lock_irqsave(ptr, flags);
if ((prev = *ptr) == old)
*ptr = new;
_atomic_spin_unlock_irqrestore(ptr, flags);
return prev;
}
CMPXCHG(u64)
CMPXCHG(u32)
CMPXCHG(u16)
CMPXCHG(u8)

View file

@ -38,21 +38,19 @@ static __always_inline unsigned long __arch_xchg(unsigned long x, __volatile__ v
/* bug catcher for when unsupported size is used - won't link */
void __cmpxchg_called_with_bad_pointer(void);
/* we only need to support cmpxchg of a u32 on sparc */
unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
u8 __cmpxchg_u8(volatile u8 *m, u8 old, u8 new_);
u16 __cmpxchg_u16(volatile u16 *m, u16 old, u16 new_);
u32 __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
/* don't worry...optimizer will get rid of most of this */
static inline unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch (size) {
case 4:
return __cmpxchg_u32((u32 *)ptr, (u32)old, (u32)new_);
default:
__cmpxchg_called_with_bad_pointer();
break;
}
return old;
return
size == 1 ? __cmpxchg_u8(ptr, old, new_) :
size == 2 ? __cmpxchg_u16(ptr, old, new_) :
size == 4 ? __cmpxchg_u32(ptr, old, new_) :
(__cmpxchg_called_with_bad_pointer(), old);
}
#define arch_cmpxchg(ptr, o, n) \
@ -63,7 +61,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
(unsigned long)_n_, sizeof(*(ptr))); \
})
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new);
u64 __cmpxchg_u64(volatile u64 *ptr, u64 old, u64 new);
#define arch_cmpxchg64(ptr, old, new) __cmpxchg_u64(ptr, old, new)
#include <asm-generic/cmpxchg-local.h>

View file

@ -159,32 +159,27 @@ unsigned long sp32___change_bit(unsigned long *addr, unsigned long mask)
}
EXPORT_SYMBOL(sp32___change_bit);
unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
{
unsigned long flags;
u32 prev;
#define CMPXCHG(T) \
T __cmpxchg_##T(volatile T *ptr, T old, T new) \
{ \
unsigned long flags; \
T prev; \
\
spin_lock_irqsave(ATOMIC_HASH(ptr), flags); \
if ((prev = *ptr) == old) \
*ptr = new; \
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);\
\
return prev; \
}
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
if ((prev = *ptr) == old)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return (unsigned long)prev;
}
CMPXCHG(u8)
CMPXCHG(u16)
CMPXCHG(u32)
CMPXCHG(u64)
EXPORT_SYMBOL(__cmpxchg_u8);
EXPORT_SYMBOL(__cmpxchg_u16);
EXPORT_SYMBOL(__cmpxchg_u32);
u64 __cmpxchg_u64(u64 *ptr, u64 old, u64 new)
{
unsigned long flags;
u64 prev;
spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
if ((prev = *ptr) == old)
*ptr = new;
spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
return prev;
}
EXPORT_SYMBOL(__cmpxchg_u64);
unsigned long __xchg_u32(volatile u32 *ptr, u32 new)

View file

@ -0,0 +1,15 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* Emulated 1-byte and 2-byte cmpxchg operations for architectures
* lacking direct support for these sizes. These are implemented in terms
* of 4-byte cmpxchg operations.
*
* Copyright (C) 2024 Paul E. McKenney.
*/
#ifndef __LINUX_CMPXCHG_EMU_H
#define __LINUX_CMPXCHG_EMU_H
uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new);
#endif /* __LINUX_CMPXCHG_EMU_H */

View file

@ -236,6 +236,7 @@ obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o
lib-$(CONFIG_GENERIC_BUG) += bug.o
obj-$(CONFIG_HAVE_ARCH_TRACEHOOK) += syscall.o
obj-$(CONFIG_ARCH_NEED_CMPXCHG_1_EMU) += cmpxchg-emu.o
obj-$(CONFIG_DYNAMIC_DEBUG_CORE) += dynamic_debug.o
#ensure exported functions have prototypes

45
lib/cmpxchg-emu.c Normal file
View file

@ -0,0 +1,45 @@
// SPDX-License-Identifier: GPL-2.0+
/*
* Emulated 1-byte cmpxchg operation for architectures lacking direct
* support for this size. This is implemented in terms of 4-byte cmpxchg
* operations.
*
* Copyright (C) 2024 Paul E. McKenney.
*/
#include <linux/types.h>
#include <linux/export.h>
#include <linux/instrumented.h>
#include <linux/atomic.h>
#include <linux/panic.h>
#include <linux/bug.h>
#include <asm-generic/rwonce.h>
#include <linux/cmpxchg-emu.h>
union u8_32 {
u8 b[4];
u32 w;
};
/* Emulate one-byte cmpxchg() in terms of 4-byte cmpxchg. */
uintptr_t cmpxchg_emu_u8(volatile u8 *p, uintptr_t old, uintptr_t new)
{
u32 *p32 = (u32 *)(((uintptr_t)p) & ~0x3);
int i = ((uintptr_t)p) & 0x3;
union u8_32 old32;
union u8_32 new32;
u32 ret;
ret = READ_ONCE(*p32);
do {
old32.w = ret;
if (old32.b[i] != old)
return old32.b[i];
new32.w = old32.w;
new32.b[i] = new;
instrument_atomic_read_write(p, 1);
ret = data_race(cmpxchg(p32, old32.w, new32.w)); // Overridden above.
} while (ret != old32.w);
return old;
}
EXPORT_SYMBOL_GPL(cmpxchg_emu_u8);