commit 5023554eb01dc043f05747a2eb913ceebf2f6d01 Author: Kyle Evans Date: Wed Oct 2 14:45:34 2019 -0500 Try this diff --git a/sys/powerpc/include/atomic.h b/sys/powerpc/include/atomic.h index c1ad6dbd6f2..9727457b5a6 100644 --- a/sys/powerpc/include/atomic.h +++ b/sys/powerpc/include/atomic.h @@ -65,6 +65,15 @@ #define __ATOMIC_ACQ() __asm __volatile("isync" : : : "memory") #endif +/* These may not be defined in this file, but they will be defined */ +static __inline int atomic_cmpset_char(__volatile uint8_t *, uint8_t, uint8_t); +static __inline int atomic_fcmpset_char(__volatile uint8_t *, uint8_t *, + uint8_t); +static __inline int atomic_cmpset_short(__volatile uint16_t *, uint16_t, + uint16_t); +static __inline int atomic_fcmpset_short(__volatile uint16_t *, uint16_t *, + uint16_t); + static __inline void powerpc_lwsync(void) { @@ -560,40 +569,12 @@ atomic_store_rel_long(volatile u_long *addr, u_long val) * two values are equal, update the value of *p with newval. Returns * zero if the compare failed, nonzero otherwise. */ -static __inline int -atomic_cmpset_masked(volatile u_int *p, u_int cmpval, u_int newval, u_int mask) -{ - int ret; - uint32_t tmp; - - __asm __volatile ( - "1:\tlwarx %2, 0, %3\n\t" /* load old value */ - "and %0, %2, %7\n\t" - "cmplw %4, %0\n\t" /* compare */ - "bne- 2f\n\t" /* exit if not equal */ - "andc %2, %2, %7\n\t" - "or %2, %2, %5\n\t" - "stwcx. %2, 0, %3\n\t" /* attempt to store */ - "bne- 1b\n\t" /* spin if failed */ - "li %0, 1\n\t" /* success - retval = 1 */ - "b 3f\n\t" /* we've succeeded */ - "2:\n\t" - "stwcx. %2, 0, %3\n\t" /* clear reservation (74xx) */ - "li %0, 0\n\t" /* failure - retval = 0 */ - "3:\n\t" - : "=&r" (ret), "=m" (*p), "+&r" (tmp) - : "r" (p), "r" (cmpval), "r" (newval), "m" (*p), - "r" (mask) - : "cr0", "memory"); - - return (ret); -} - +#ifdef ISA_206_ATOMICS static __inline int atomic_cmpset_char(volatile u_char *p, u_char cmpval, u_char newval) { int ret; -#ifdef ISA_206_ATOMICS + __asm __volatile ( "1:\tlbarx %0, 0, %2\n\t" /* load old value */ "cmplw %3, %0\n\t" /* compare */ @@ -609,24 +590,6 @@ atomic_cmpset_char(volatile u_char *p, u_char cmpval, u_char newval) : "=&r" (ret), "=m" (*p) : "r" (p), "r" (cmpval), "r" (newval), "m" (*p) : "cr0", "memory"); -#else - u_int *p_adj; - uint32_t alignment, cmp_val, mask, new_val, shift; - - alignment = ((((uintptr_t)p) & 0x3)); - p_adj = (void *)(((uintptr_t)p & ~0x3)); - - shift = (3 - alignment) * 8; - - mask = 0xff; - new_val = newval; - cmp_val = cmpval; - mask <<= shift; - cmp_val <<= shift; - new_val <<= shift; - - ret = atomic_cmpset_masked(p_adj, cmp_val, new_val, mask); -#endif return (ret); } @@ -635,7 +598,7 @@ static __inline int atomic_cmpset_short(volatile u_short *p, u_short cmpval, u_short newval) { int ret; -#ifdef ISA_206_ATOMICS + __asm __volatile ( "1:\tlharx %0, 0, %2\n\t" /* load old value */ "cmplw %3, %0\n\t" /* compare */ @@ -651,28 +614,10 @@ atomic_cmpset_short(volatile u_short *p, u_short cmpval, u_short newval) : "=&r" (ret), "=m" (*p) : "r" (p), "r" (cmpval), "r" (newval), "m" (*p) : "cr0", "memory"); -#else - u_int *p_adj; - uint32_t cmp_val, mask, new_val; - bool is_aligned; - - is_aligned = ((((uintptr_t)p) & 0x2) == 0); - p_adj = (void *)(((uintptr_t)p & ~0x3)); - - mask = 0xffff; - new_val = newval; - cmp_val = cmpval; - if (is_aligned) { - mask <<= 16; - cmp_val <<= 16; - new_val <<= 16; - } - - ret = atomic_cmpset_masked(p_adj, cmp_val, new_val, mask); -#endif return (ret); } +#endif /* ISA_206_ATOMICS */ static __inline int atomic_cmpset_int(volatile u_int* p, u_int cmpval, u_int newval) @@ -789,39 +734,12 @@ ATOMIC_CMPSET_ACQ_REL(long); * zero if the compare failed and sets *cmpval to the read value from *p, * nonzero otherwise. */ -static __inline int -atomic_fcmpset_masked(volatile u_int *p, u_int *cmpval, u_int newval, u_int mask) -{ - int ret; - uint32_t tmp; - - __asm __volatile ( - "lwarx %2, 0, %4\n\t" /* load old value */ - "and %0, %2, %9\n\t" - "cmplw %4, %0\n\t" /* compare */ - "bne- 1f\n\t" /* exit if not equal */ - "andc %2, %2, %8\n\t" - "or %2, %2, %6\n\t" - "stwcx. %2, 0, %4\n\t" /* attempt to store */ - "li %0, 1\n\t" /* success - retval = 1 */ - "b 2f\n\t" /* we've succeeded */ - "1:\n\t" - "stwcx. %2, 0, %4\n\t" /* clear reservation (74xx) */ - "li %0, 0\n\t" /* failure - retval = 0 */ - "2:\n\t" - : "=&r" (ret), "=m" (*p), "+&r" (tmp), "=m" (*cmpval) - : "r" (p), "r" (*cmpval), "r" (newval), "m" (*p), "r"(cmpval), - "r" (mask) - : "cr0", "memory"); - - return (ret); -} - +#ifdef ISA_206_ATOMICS static __inline int atomic_fcmpset_char(volatile u_char *p, u_char *cmpval, u_char newval) { int ret; -#ifdef ISA_206_ATOMICS + __asm __volatile ( "lbarx %0, 0, %3\n\t" /* load old value */ "cmplw %4, %0\n\t" /* compare */ @@ -838,26 +756,6 @@ atomic_fcmpset_char(volatile u_char *p, u_char *cmpval, u_char newval) : "=&r" (ret), "=m" (*p), "=m" (*cmpval) : "r" (p), "r" (*cmpval), "r" (newval), "m" (*p), "r"(cmpval) : "cr0", "memory"); -#else - u_int *p_adj; - uint32_t alignment, mask, new_val, shift, tmp; - - alignment = ((((uintptr_t)p) & 0x3)); - p_adj = (void *)(((uintptr_t)p & ~0x3)); - - shift = (3 - alignment) * 8; - tmp = *cmpval; - new_val = newval; - - mask = 0xff; - mask <<= shift; - tmp <<= shift; - newval <<= shift; - - ret = atomic_fcmpset_masked(p_adj, &tmp, newval, mask); - if (!ret) - *cmpval = tmp >> shift; -#endif return (ret); } @@ -866,7 +764,7 @@ static __inline int atomic_fcmpset_short(volatile u_short *p, u_short *cmpval, u_short newval) { int ret; -#ifdef ISA_206_ATOMICS + __asm __volatile ( "lharx %0, 0, %3\n\t" /* load old value */ "cmplw %4, %0\n\t" /* compare */ @@ -883,30 +781,11 @@ atomic_fcmpset_short(volatile u_short *p, u_short *cmpval, u_short newval) : "=&r" (ret), "=m" (*p), "=m" (*cmpval) : "r" (p), "r" (*cmpval), "r" (newval), "m" (*p), "r"(cmpval) : "cr0", "memory"); -#else - u_int *p_adj; - uint32_t mask, new_val, tmp; - bool is_aligned; - - is_aligned = ((((uintptr_t)p) & 0x2) == 0); - p_adj = (void *)(((uintptr_t)p & ~0x3)); - - tmp = *cmpval; - new_val = newval; - mask = 0xffff; - if (is_aligned) { - mask <<= 16; - newval <<= 16; - tmp <<= 16; - } - - ret = atomic_fcmpset_masked(p_adj, &tmp, newval, mask); - if (!ret) - *cmpval = tmp >> (is_aligned ? 16 : 0); -#endif return (ret); } +#endif /* ISA_206_ATOMICS */ + static __inline int atomic_fcmpset_int(volatile u_int *p, u_int *cmpval, u_int newval) { @@ -992,6 +871,14 @@ ATOMIC_FCMPSET_ACQ_REL(short); ATOMIC_FCMPSET_ACQ_REL(int); ATOMIC_FCMPSET_ACQ_REL(long); +#define atomic_fcmpset_8 atomic_fcmpset_char +#define atomic_fcmpset_acq_8 atomic_fcmpset_acq_char +#define atomic_fcmpset_rel_8 atomic_fcmpset_rel_char + +#define atomic_fcmpset_16 atomic_fcmpset_short +#define atomic_fcmpset_acq_16 atomic_fcmpset_acq_short +#define atomic_fcmpset_rel_16 atomic_fcmpset_rel_short + #define atomic_fcmpset_32 atomic_fcmpset_int #define atomic_fcmpset_acq_32 atomic_fcmpset_acq_int #define atomic_fcmpset_rel_32 atomic_fcmpset_rel_int @@ -1109,4 +996,8 @@ atomic_thread_fence_seq_cst(void) __asm __volatile("sync" : : : "memory"); } +#ifndef ISA_206_ATOMICS +#include +#endif + #endif /* ! _MACHINE_ATOMIC_H_ */