Cleaned up interlocking funcs for PPC, fixed return value of

interlocked_add.
This commit is contained in:
Marcus Meissner 2002-08-20 00:01:15 +00:00 committed by Alexandre Julliard
parent 7678791d92
commit 3a17a87c81

View file

@ -752,89 +752,78 @@ __declspec(naked) long interlocked_xchg_add( long *dest, long incr )
#elif defined(__powerpc__) #elif defined(__powerpc__)
void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare) void* interlocked_cmpxchg_ptr( void **dest, void* xchg, void* compare)
{ {
long ret; long ret = 0;
long scratch; long scratch;
__asm__ __volatile__( __asm__ __volatile__(
"sync; "
"0: lwarx %0,0,%2 ;" "0: lwarx %0,0,%2 ;"
" xor. %1,%4,%0;" " xor. %1,%4,%0;"
" bne 1f;" " bne 1f;"
" stwcx. %3,0,%2;" " stwcx. %3,0,%2;"
" bne- 0b;" " bne- 0b;"
"1: " "1: "
"sync; "
: "=&r"(ret), "=&r"(scratch) : "=&r"(ret), "=&r"(scratch)
: "r"(dest), "r"(xchg), "r"(compare) : "r"(dest), "r"(xchg), "r"(compare)
: "cr0", "memory"); : "cr0","memory");
return (void*)ret; return (void*)ret;
} }
long interlocked_cmpxchg( long *dest, long xchg, long compare) long interlocked_cmpxchg( long *dest, long xchg, long compare)
{ {
long ret; long ret = 0;
long scratch; long scratch;
__asm__ __volatile__( __asm__ __volatile__(
"sync; "
"0: lwarx %0,0,%2 ;" "0: lwarx %0,0,%2 ;"
" xor. %1,%4,%0;" " xor. %1,%4,%0;"
" bne 1f;" " bne 1f;"
" stwcx. %3,0,%2;" " stwcx. %3,0,%2;"
" bne- 0b;" " bne- 0b;"
"1: " "1: "
"sync; "
: "=&r"(ret), "=&r"(scratch) : "=&r"(ret), "=&r"(scratch)
: "r"(dest), "r"(xchg), "r"(compare) : "r"(dest), "r"(xchg), "r"(compare)
: "cr0", "memory"); : "cr0","memory");
return ret; return ret;
} }
long interlocked_xchg_add( long *dest, long incr ) long interlocked_xchg_add( long *dest, long incr )
{ {
void *ret __attribute__ ((aligned (4))) = &ret; long ret = 0;
long inc = incr;
long zero = 0; long zero = 0;
__asm__ __volatile__( __asm__ __volatile__(
"sync; "
"0: lwarx %0, %3, %1;" "0: lwarx %0, %3, %1;"
" add %0, %2, %0;" " add %0, %2, %0;"
" stwcx. %0, %3, %1;" " stwcx. %0, %3, %1;"
" bne- 0b;" " bne- 0b;"
"sync; " : "=&r" (ret)
: "=&r"(ret) : "r"(dest), "r"(incr), "r"(zero)
: "r"(dest), "r"(inc), "r"(zero)
: "cr0", "memory" : "cr0", "memory"
); );
return (long)ret; return ret-incr;
} }
long interlocked_xchg( long* dest, long val ) long interlocked_xchg( long* dest, long val )
{ {
void *ret __attribute__ ((aligned (4))) = &ret; long ret = 0;
__asm__ __volatile__( __asm__ __volatile__(
"sync; "
"0: lwarx %0,0,%1 ;" "0: lwarx %0,0,%1 ;"
" stwcx. %2,0,%1;" " stwcx. %2,0,%1;"
" bne- 0b;" " bne- 0b;"
"sync; "
: "=&r"(ret) : "=&r"(ret)
: "r"(dest), "r"(val) : "r"(dest), "r"(val)
: "cr0", "memory"); : "cr0","memory");
return (long)ret; return ret;
} }
void* interlocked_xchg_ptr( void** dest, void* val ) void* interlocked_xchg_ptr( void** dest, void* val )
{ {
void *ret __attribute__ ((aligned (4))) = &ret; void *ret = NULL;
__asm__ __volatile__( __asm__ __volatile__(
"sync; "
"0: lwarx %0,0,%1 ;" "0: lwarx %0,0,%1 ;"
" stwcx. %2,0,%1;" " stwcx. %2,0,%1;"
" bne- 0b;" " bne- 0b;"
"sync; "
: "=&r"(ret) : "=&r"(ret)
: "r"(dest), "r"(val) : "r"(dest), "r"(val)
: "cr0", "memory"); : "cr0","memory");
return (void*)ret; return ret;
} }
#elif defined(__sparc__) && defined(__sun__) #elif defined(__sparc__) && defined(__sun__)