Index: mips/include/atomic.h =================================================================== --- mips/include/atomic.h (revision 221778) +++ mips/include/atomic.h (working copy) @@ -66,122 +66,143 @@ #define wmb() mips_sync() #define rmb() mips_sync() -/* - * Various simple arithmetic on memory which is atomic in the presence - * of interrupts and SMP safe. - */ +#define ATOMIC_SET_OP(type) \ + static __inline void \ + atomic_set_##type(__volatile u_##type *p, u_##type v) \ + { \ + u_##type temp; \ + \ + __asm __volatile ( \ + "1:\tll %0, %3\n\t" /* load old value */ \ + "or %0, %2, %0\n\t" /* calc. new value */ \ + "sc %0, %1\n\t" /* attempt to store */ \ + "beqz %0, 1b\n\t" /* spin if failed */ \ + : "=&r" (temp), "=m" (*p) \ + : "r" (v), "m" (*p) \ + : "memory"); \ + } -void atomic_set_8(__volatile uint8_t *, uint8_t); -void atomic_clear_8(__volatile uint8_t *, uint8_t); -void atomic_add_8(__volatile uint8_t *, uint8_t); -void atomic_subtract_8(__volatile uint8_t *, uint8_t); +#define ATOMIC_CLEAR_OP(type) \ + static __inline void \ + atomic_clear_##type(__volatile u_##type *p, u_##type v) \ + { \ + u_##type temp; \ + \ + v = ~v; \ + \ + __asm __volatile ( \ + "1:\tll %0, %3\n\t" /* load old value */ \ + "and %0, %2, %0\n\t" /* calc. new value */ \ + "sc %0, %1\n\t" /* attempt to store */ \ + "beqz %0, 1b\n\t" /* spin if failed */ \ + : "=&r" (temp), "=m" (*p) \ + : "r" (v), "m" (*p) \ + : "memory"); \ + } -void atomic_set_16(__volatile uint16_t *, uint16_t); -void atomic_clear_16(__volatile uint16_t *, uint16_t); -void atomic_add_16(__volatile uint16_t *, uint16_t); -void atomic_subtract_16(__volatile uint16_t *, uint16_t); +#define ATOMIC_ADD_OP(type) \ + static __inline void \ + atomic_add_##type(__volatile u_##type *p, u_##type v) \ + { \ + u_##type temp; \ + \ + __asm __volatile ( \ + "1:\tll %0, %3\n\t" /* load old value */ \ + "addu %0, %2, %0\n\t" /* calc. new value */ \ + "sc %0, %1\n\t" /* attempt to store */ \ + "beqz %0, 1b\n\t" /* spin if failed */ \ + : "=&r" (temp), "=m" (*p) \ + : "r" (v), "m" (*p) \ + : "memory"); \ + } -static __inline void -atomic_set_32(__volatile uint32_t *p, uint32_t v) -{ - uint32_t temp; +#define ATOMIC_SUBTRACT_OP(type) \ + static __inline void \ + atomic_subtract_##type(__volatile u_##type *p, u_##type v) \ + { \ + u_##type temp; \ + \ + __asm __volatile ( \ + "1:\tll %0, %3\n\t" /* load old value */ \ + "subu %0, %2\n\t" /* calc. new value */ \ + "sc %0, %1\n\t" /* attempt to store */ \ + "beqz %0, 1b\n\t" /* spin if failed */ \ + : "=&r" (temp), "=m" (*p) \ + : "r" (v), "m" (*p) \ + : "memory"); \ + } - __asm __volatile ( - "1:\tll %0, %3\n\t" /* load old value */ - "or %0, %2, %0\n\t" /* calculate new value */ - "sc %0, %1\n\t" /* attempt to store */ - "beqz %0, 1b\n\t" /* spin if failed */ - : "=&r" (temp), "=m" (*p) - : "r" (v), "m" (*p) - : "memory"); +#define ATOMIC_READANDCLEAR_OP(type) \ + static __inline u_##type \ + atomic_readandclear_##type(__volatile u_##type *addr) \ + { \ + u_##type result,temp; \ + \ + __asm __volatile ( \ + "1:\tll %0,%3\n\t" /* \ + * load current value, \ + * asserting lock \ + */ \ + "li %1,0\n\t" /* value to store */ \ + "sc %1,%2\n\t" /* attempt to store */ \ + "beqz %1, 1b\n\t" /* \ + * if the store failed, \ + * spin \ + */ \ + : "=&r"(result), "=&r"(temp), "=m" (*addr) \ + : "m" (*addr) \ + : "memory"); \ + \ + return (result); \ + } -} +#define ATOMIC_READANDSET_OP(type) \ + static __inline u_##type \ + atomic_readandset_##type(__volatile u_##type *addr, \ + u_##type value) \ + { \ + u_##type result,temp; \ + \ + __asm __volatile ( \ + "1:\tll %0,%3\n\t" /* \ + * load current value, \ + * asserting lock \ + */ \ + "or %1,$0,%4\n\t" \ + "sc %1,%2\n\t" /* attempt to store */ \ + "beqz %1, 1b\n\t" /* \ + * if the store failed, \ + * spin \ + */ \ + : "=&r"(result), "=&r"(temp), "=m" (*addr) \ + : "m" (*addr), "r" (value) \ + : "memory"); \ + \ + return (result); \ + } -static __inline void -atomic_clear_32(__volatile uint32_t *p, uint32_t v) -{ - uint32_t temp; - v = ~v; +/* + * Various simple arithmetic on memory which is atomic in the presence + * of interrupts and SMP safe. + */ - __asm __volatile ( - "1:\tll %0, %3\n\t" /* load old value */ - "and %0, %2, %0\n\t" /* calculate new value */ - "sc %0, %1\n\t" /* attempt to store */ - "beqz %0, 1b\n\t" /* spin if failed */ - : "=&r" (temp), "=m" (*p) - : "r" (v), "m" (*p) - : "memory"); -} +void atomic_set_short(__volatile u_short *, u_short); +void atomic_clear_short(__volatile u_short *, u_short); +void atomic_add_short(__volatile u_short *, u_short); +void atomic_subtract_short(__volatile u_short *, u_short); -static __inline void -atomic_add_32(__volatile uint32_t *p, uint32_t v) -{ - uint32_t temp; +ATOMIC_SET_OP(int) +ATOMIC_CLEAR_OP(int) +ATOMIC_ADD_OP(int) +ATOMIC_SUBTRACT_OP(int) +ATOMIC_READANDCLEAR_OP(int) +ATOMIC_READANDSET_OP(int) - __asm __volatile ( - "1:\tll %0, %3\n\t" /* load old value */ - "addu %0, %2, %0\n\t" /* calculate new value */ - "sc %0, %1\n\t" /* attempt to store */ - "beqz %0, 1b\n\t" /* spin if failed */ - : "=&r" (temp), "=m" (*p) - : "r" (v), "m" (*p) - : "memory"); -} - -static __inline void -atomic_subtract_32(__volatile uint32_t *p, uint32_t v) -{ - uint32_t temp; - - __asm __volatile ( - "1:\tll %0, %3\n\t" /* load old value */ - "subu %0, %2\n\t" /* calculate new value */ - "sc %0, %1\n\t" /* attempt to store */ - "beqz %0, 1b\n\t" /* spin if failed */ - : "=&r" (temp), "=m" (*p) - : "r" (v), "m" (*p) - : "memory"); -} - -static __inline uint32_t -atomic_readandclear_32(__volatile uint32_t *addr) -{ - uint32_t result,temp; - - __asm __volatile ( - "1:\tll %0,%3\n\t" /* load current value, asserting lock */ - "li %1,0\n\t" /* value to store */ - "sc %1,%2\n\t" /* attempt to store */ - "beqz %1, 1b\n\t" /* if the store failed, spin */ - : "=&r"(result), "=&r"(temp), "=m" (*addr) - : "m" (*addr) - : "memory"); - - return result; -} - -static __inline uint32_t -atomic_readandset_32(__volatile uint32_t *addr, uint32_t value) -{ - uint32_t result,temp; - - __asm __volatile ( - "1:\tll %0,%3\n\t" /* load current value, asserting lock */ - "or %1,$0,%4\n\t" - "sc %1,%2\n\t" /* attempt to store */ - "beqz %1, 1b\n\t" /* if the store failed, spin */ - : "=&r"(result), "=&r"(temp), "=m" (*addr) - : "m" (*addr), "r" (value) - : "memory"); - - return result; -} - #if defined(__mips_n64) || defined(__mips_n32) static __inline void -atomic_set_64(__volatile uint64_t *p, uint64_t v) +atomic_set_long(__volatile u_long *p, u_long v) { - uint64_t temp; + u_long temp; __asm __volatile ( "1:\n\t" @@ -196,9 +217,9 @@ } static __inline void -atomic_clear_64(__volatile uint64_t *p, uint64_t v) +atomic_clear_long(__volatile u_long *p, u_long v) { - uint64_t temp; + u_long temp; v = ~v; __asm __volatile ( @@ -213,9 +234,9 @@ } static __inline void -atomic_add_64(__volatile uint64_t *p, uint64_t v) +atomic_add_long(__volatile u_long *p, u_long v) { - uint64_t temp; + u_long temp; __asm __volatile ( "1:\n\t" @@ -229,9 +250,9 @@ } static __inline void -atomic_subtract_64(__volatile uint64_t *p, uint64_t v) +atomic_subtract_long(__volatile u_long *p, u_long v) { - uint64_t temp; + u_long temp; __asm __volatile ( "1:\n\t" @@ -244,10 +265,10 @@ : "memory"); } -static __inline uint64_t -atomic_readandclear_64(__volatile uint64_t *addr) +static __inline u_long +atomic_readandclear_long(__volatile u_long *addr) { - uint64_t result,temp; + u_long result,temp; __asm __volatile ( "1:\n\t" @@ -262,10 +283,10 @@ return result; } -static __inline uint64_t -atomic_readandset_64(__volatile uint64_t *addr, uint64_t value) +static __inline u_long +atomic_readandset_long(__volatile u_long *addr, u_long value) { - uint64_t result,temp; + u_long result,temp; __asm __volatile ( "1:\n\t" @@ -279,68 +300,78 @@ return result; } +#else /* __mips_n64 || __mips_n32 */ +ATOMIC_SET_OP(long) +ATOMIC_CLEAR_OP(long) +ATOMIC_ADD_OP(long) +ATOMIC_SUBTRACT_OP(long) +ATOMIC_READANDCLEAR_OP(long) +ATOMIC_READANDSET_OP(long) #endif -#define ATOMIC_ACQ_REL(NAME, WIDTH) \ +#undef ATOMIC_SET_OP +#undef ATOMIC_CLEAR_OP +#undef ATOMIC_ADD_OP +#undef ATOMIC_SUBTRACT_OP +#undef ATOMIC_READANDCLEAR_OP +#undef ATOMIC_READANDSET_OP + +#define ATOMIC_ACQ_REL(name, type) \ static __inline void \ -atomic_##NAME##_acq_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ +atomic_##name##_acq_##type(__volatile u_##type *p, u_##type v) \ { \ - atomic_##NAME##_##WIDTH(p, v); \ + atomic_##name##_##type(p, v); \ mips_sync(); \ } \ \ static __inline void \ -atomic_##NAME##_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ +atomic_##name##_rel_##type(__volatile u_##type *p, u_##type v) \ { \ mips_sync(); \ - atomic_##NAME##_##WIDTH(p, v); \ + atomic_##name##_##type(p, v); \ } /* Variants of simple arithmetic with memory barriers. */ -ATOMIC_ACQ_REL(set, 8) -ATOMIC_ACQ_REL(clear, 8) -ATOMIC_ACQ_REL(add, 8) -ATOMIC_ACQ_REL(subtract, 8) -ATOMIC_ACQ_REL(set, 16) -ATOMIC_ACQ_REL(clear, 16) -ATOMIC_ACQ_REL(add, 16) -ATOMIC_ACQ_REL(subtract, 16) -ATOMIC_ACQ_REL(set, 32) -ATOMIC_ACQ_REL(clear, 32) -ATOMIC_ACQ_REL(add, 32) -ATOMIC_ACQ_REL(subtract, 32) -#if defined(__mips_n64) || defined(__mips_n32) -ATOMIC_ACQ_REL(set, 64) -ATOMIC_ACQ_REL(clear, 64) -ATOMIC_ACQ_REL(add, 64) -ATOMIC_ACQ_REL(subtract, 64) -#endif +ATOMIC_ACQ_REL(set, short) +ATOMIC_ACQ_REL(clear, short) +ATOMIC_ACQ_REL(add, short) +ATOMIC_ACQ_REL(subtract, short) +ATOMIC_ACQ_REL(set, int) +ATOMIC_ACQ_REL(clear, int) +ATOMIC_ACQ_REL(add, int) +ATOMIC_ACQ_REL(subtract, int) + +ATOMIC_ACQ_REL(set, long) +ATOMIC_ACQ_REL(clear, long) +ATOMIC_ACQ_REL(add, long) +ATOMIC_ACQ_REL(subtract, long) + #undef ATOMIC_ACQ_REL /* * We assume that a = b will do atomic loads and stores. */ -#define ATOMIC_STORE_LOAD(WIDTH) \ -static __inline uint##WIDTH##_t \ -atomic_load_acq_##WIDTH(__volatile uint##WIDTH##_t *p) \ -{ \ - uint##WIDTH##_t v; \ - \ - v = *p; \ - mips_sync(); \ - return (v); \ -} \ - \ -static __inline void \ -atomic_store_rel_##WIDTH(__volatile uint##WIDTH##_t *p, uint##WIDTH##_t v)\ -{ \ - mips_sync(); \ - *p = v; \ -} +#define ATOMIC_STORE_LOAD(type) \ + static __inline u_##type \ + atomic_load_acq_##type(__volatile u_##type *p) \ + { \ + u_##type v; \ + \ + v = *p; \ + mips_sync(); \ + return (v); \ + } \ + \ + static __inline void \ + atomic_store_rel_##type(__volatile u_##type *p, u_##type v) \ + { \ + mips_sync(); \ + *p = v; \ + } -ATOMIC_STORE_LOAD(32) -ATOMIC_STORE_LOAD(64) +ATOMIC_STORE_LOAD(int) +ATOMIC_STORE_LOAD(long) #if !defined(__mips_n64) && !defined(__mips_n32) void atomic_store_64(__volatile uint64_t *, uint64_t *); void atomic_load_64(__volatile uint64_t *, uint64_t *); @@ -365,10 +396,10 @@ * two values are equal, update the value of *p with newval. Returns * zero if the compare failed, nonzero otherwise. */ -static __inline uint32_t -atomic_cmpset_32(__volatile uint32_t* p, uint32_t cmpval, uint32_t newval) +static __inline int +atomic_cmpset_int(__volatile u_int* p, u_int cmpval, u_int newval) { - uint32_t ret; + u_int ret; __asm __volatile ( "1:\tll %0, %4\n\t" /* load old value */ @@ -384,7 +415,7 @@ : "r" (cmpval), "r" (newval), "m" (*p) : "memory"); - return ret; + return (ret); } /* @@ -392,31 +423,31 @@ * two values are equal, update the value of *p with newval. Returns * zero if the compare failed, nonzero otherwise. */ -static __inline uint32_t -atomic_cmpset_acq_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval) +static __inline int +atomic_cmpset_acq_int(__volatile u_int *p, u_int cmpval, u_int newval) { int retval; - retval = atomic_cmpset_32(p, cmpval, newval); + retval = atomic_cmpset_int(p, cmpval, newval); mips_sync(); return (retval); } -static __inline uint32_t -atomic_cmpset_rel_32(__volatile uint32_t *p, uint32_t cmpval, uint32_t newval) +static __inline int +atomic_cmpset_rel_int(__volatile u_int *p, u_int cmpval, u_int newval) { mips_sync(); - return (atomic_cmpset_32(p, cmpval, newval)); + return (atomic_cmpset_int(p, cmpval, newval)); } /* * Atomically add the value of v to the integer pointed to by p and return * the previous value of *p. */ -static __inline uint32_t -atomic_fetchadd_32(__volatile uint32_t *p, uint32_t v) +static __inline u_int +atomic_fetchadd_int(__volatile u_int *p, u_int v) { - uint32_t value, temp; + u_int value, temp; __asm __volatile ( "1:\tll %0, %1\n\t" /* load old value */ @@ -434,10 +465,10 @@ * two values are equal, update the value of *p with newval. Returns * zero if the compare failed, nonzero otherwise. */ -static __inline uint64_t -atomic_cmpset_64(__volatile uint64_t* p, uint64_t cmpval, uint64_t newval) +static __inline int +atomic_cmpset_long(__volatile u_long* p, u_long cmpval, u_long newval) { - uint64_t ret; + int ret; __asm __volatile ( "1:\n\t" @@ -454,39 +485,17 @@ : "r" (cmpval), "r" (newval), "m" (*p) : "memory"); - return ret; + return (ret); } /* - * Atomically compare the value stored at *p with cmpval and if the - * two values are equal, update the value of *p with newval. Returns - * zero if the compare failed, nonzero otherwise. - */ -static __inline uint64_t -atomic_cmpset_acq_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) -{ - int retval; - - retval = atomic_cmpset_64(p, cmpval, newval); - mips_sync(); - return (retval); -} - -static __inline uint64_t -atomic_cmpset_rel_64(__volatile uint64_t *p, uint64_t cmpval, uint64_t newval) -{ - mips_sync(); - return (atomic_cmpset_64(p, cmpval, newval)); -} - -/* * Atomically add the value of v to the integer pointed to by p and return * the previous value of *p. */ -static __inline uint64_t -atomic_fetchadd_64(__volatile uint64_t *p, uint64_t v) +static __inline u_long +atomic_fetchadd_long(__volatile u_long *p, u_long v) { - uint64_t value, temp; + u_long value, temp; __asm __volatile ( "1:\n\t" @@ -498,58 +507,83 @@ : "r" (v), "m" (*p)); return (value); } +#else /* __mips_n64 || __mips_n32 */ +static __inline int +atomic_cmpset_long(__volatile u_long *p, u_long cmpval, u_long newval) +{ + return (atomic_cmpset_int((__volatile u_int *)p, (u_int)cmpval, + (u_int)newval)); +} + +/* + * Atomically add the value of v to the integer pointed to by p and return + * the previous value of *p. + */ +static __inline u_long +atomic_fetchadd_long(__volatile u_long *p, u_long v) +{ + return ((u_long)atomic_fetchadd_int((__volatile u_int *)p, (u_int)v)); +} #endif -/* Operations on chars. */ -#define atomic_set_char atomic_set_8 -#define atomic_set_acq_char atomic_set_acq_8 -#define atomic_set_rel_char atomic_set_rel_8 -#define atomic_clear_char atomic_clear_8 -#define atomic_clear_acq_char atomic_clear_acq_8 -#define atomic_clear_rel_char atomic_clear_rel_8 -#define atomic_add_char atomic_add_8 -#define atomic_add_acq_char atomic_add_acq_8 -#define atomic_add_rel_char atomic_add_rel_8 -#define atomic_subtract_char atomic_subtract_8 -#define atomic_subtract_acq_char atomic_subtract_acq_8 -#define atomic_subtract_rel_char atomic_subtract_rel_8 +/* + * Atomically compare the value stored at *p with cmpval and if the + * two values are equal, update the value of *p with newval. Returns + * zero if the compare failed, nonzero otherwise. + */ +static __inline int +atomic_cmpset_acq_long(__volatile u_long *p, u_long cmpval, u_long newval) +{ + int retval; -/* Operations on shorts. */ -#define atomic_set_short atomic_set_16 -#define atomic_set_acq_short atomic_set_acq_16 -#define atomic_set_rel_short atomic_set_rel_16 -#define atomic_clear_short atomic_clear_16 -#define atomic_clear_acq_short atomic_clear_acq_16 -#define atomic_clear_rel_short atomic_clear_rel_16 -#define atomic_add_short atomic_add_16 -#define atomic_add_acq_short atomic_add_acq_16 -#define atomic_add_rel_short atomic_add_rel_16 -#define atomic_subtract_short atomic_subtract_16 -#define atomic_subtract_acq_short atomic_subtract_acq_16 -#define atomic_subtract_rel_short atomic_subtract_rel_16 + retval = atomic_cmpset_long(p, cmpval, newval); + mips_sync(); + return (retval); +} -/* Operations on ints. */ -#define atomic_set_int atomic_set_32 -#define atomic_set_acq_int atomic_set_acq_32 -#define atomic_set_rel_int atomic_set_rel_32 -#define atomic_clear_int atomic_clear_32 -#define atomic_clear_acq_int atomic_clear_acq_32 -#define atomic_clear_rel_int atomic_clear_rel_32 -#define atomic_add_int atomic_add_32 -#define atomic_add_acq_int atomic_add_acq_32 -#define atomic_add_rel_int atomic_add_rel_32 -#define atomic_subtract_int atomic_subtract_32 -#define atomic_subtract_acq_int atomic_subtract_acq_32 -#define atomic_subtract_rel_int atomic_subtract_rel_32 -#define atomic_cmpset_int atomic_cmpset_32 -#define atomic_cmpset_acq_int atomic_cmpset_acq_32 -#define atomic_cmpset_rel_int atomic_cmpset_rel_32 -#define atomic_load_acq_int atomic_load_acq_32 -#define atomic_store_rel_int atomic_store_rel_32 -#define atomic_readandclear_int atomic_readandclear_32 -#define atomic_readandset_int atomic_readandset_32 -#define atomic_fetchadd_int atomic_fetchadd_32 +static __inline int +atomic_cmpset_rel_long(__volatile u_long *p, u_long cmpval, u_long newval) +{ + mips_sync(); + return (atomic_cmpset_long(p, cmpval, newval)); +} +/* Operations on 16-bits. */ +#define atomic_set_16 atomic_set_short +#define atomic_set_acq_16 atomic_set_acq_short +#define atomic_set_rel_16 atomic_set_rel_short +#define atomic_clear_16 atomic_clear_short +#define atomic_clear_acq_16 atomic_clear_acq_short +#define atomic_clear_rel_16 atomic_clear_rel_short +#define atomic_add_16 atomic_add_short +#define atomic_add_acq_16 atomic_add_acq_short +#define atomic_add_rel_16 atomic_add_rel_short +#define atomic_subtract_16 atomic_subtract_short +#define atomic_subtract_acq_16 atomic_subtract_acq_short +#define atomic_subtract_rel_16 atomic_subtract_rel_short + +/* Operations on 32-bits. */ +#define atomic_set_32 atomic_set_int +#define atomic_set_acq_32 atomic_set_acq_int +#define atomic_set_rel_32 atomic_set_rel_int +#define atomic_clear_32 atomic_clear_int +#define atomic_clear_acq_32 atomic_clear_acq_int +#define atomic_clear_rel_32 atomic_clear_rel_int +#define atomic_add_32 atomic_add_int +#define atomic_add_acq_32 atomic_add_acq_int +#define atomic_add_rel_32 atomic_add_rel_int +#define atomic_subtract_32 atomic_subtract_int +#define atomic_subtract_acq_32 atomic_subtract_acq_int +#define atomic_subtract_rel_32 atomic_subtract_rel_int +#define atomic_cmpset_32 atomic_cmpset_int +#define atomic_cmpset_acq_32 atomic_cmpset_acq_int +#define atomic_cmpset_rel_32 atomic_cmpset_rel_int +#define atomic_load_acq_32 atomic_load_acq_int +#define atomic_store_rel_32 atomic_store_rel_int +#define atomic_readandclear_32 atomic_readandclear_int +#define atomic_readandset_32 atomic_readandset_int +#define atomic_fetchadd_32 atomic_fetchadd_int + /* * I think the following is right, even for n32. For n32 the pointers * are still 32-bits, so we need to operate on them as 32-bit quantities, @@ -557,59 +591,27 @@ * no question because they are always 32-bits. */ #ifdef __mips_n64 -/* Operations on longs. */ -#define atomic_set_long atomic_set_64 -#define atomic_set_acq_long atomic_set_acq_64 -#define atomic_set_rel_long atomic_set_rel_64 -#define atomic_clear_long atomic_clear_64 -#define atomic_clear_acq_long atomic_clear_acq_64 -#define atomic_clear_rel_long atomic_clear_rel_64 -#define atomic_add_long atomic_add_64 -#define atomic_add_acq_long atomic_add_acq_64 -#define atomic_add_rel_long atomic_add_rel_64 -#define atomic_subtract_long atomic_subtract_64 -#define atomic_subtract_acq_long atomic_subtract_acq_64 -#define atomic_subtract_rel_long atomic_subtract_rel_64 -#define atomic_cmpset_long atomic_cmpset_64 -#define atomic_cmpset_acq_long atomic_cmpset_acq_64 -#define atomic_cmpset_rel_long atomic_cmpset_rel_64 -#define atomic_load_acq_long atomic_load_acq_64 -#define atomic_store_rel_long atomic_store_rel_64 -#define atomic_fetchadd_long atomic_fetchadd_64 -#define atomic_readandclear_long atomic_readandclear_64 +/* Operations on 64-bits. */ +#define atomic_set_64 atomic_set_long +#define atomic_set_acq_64 atomic_set_acq_long +#define atomic_set_rel_64 atomic_set_rel_long +#define atomic_clear_64 atomic_clear_long +#define atomic_clear_acq_64 atomic_clear_acq_long +#define atomic_clear_rel_64 atomic_clear_rel_long +#define atomic_add_64 atomic_add_long +#define atomic_add_acq_64 atomic_add_acq_long +#define atomic_add_rel_64 atomic_add_rel_long +#define atomic_subtract_64 atomic_subtract_long +#define atomic_subtract_acq_64 atomic_subtract_acq_long +#define atomic_subtract_rel_64 atomic_subtract_rel_long +#define atomic_cmpset_64 atomic_cmpset_long +#define atomic_cmpset_acq_64 atomic_cmpset_acq_long +#define atomic_cmpset_rel_64 atomic_cmpset_rel_long +#define atomic_load_acq_64 atomic_load_acq_long +#define atomic_store_rel_64 atomic_store_rel_long +#define atomic_fetchadd_64 atomic_fetchadd_long +#define atomic_readandclear_64 atomic_readandclear_long -#else /* !__mips_n64 */ - -/* Operations on longs. */ -#define atomic_set_long atomic_set_32 -#define atomic_set_acq_long atomic_set_acq_32 -#define atomic_set_rel_long atomic_set_rel_32 -#define atomic_clear_long atomic_clear_32 -#define atomic_clear_acq_long atomic_clear_acq_32 -#define atomic_clear_rel_long atomic_clear_rel_32 -#define atomic_add_long(p, v) \ - atomic_add_32((volatile u_int *)(p), (u_int)(v)) -#define atomic_add_acq_long atomic_add_acq_32 -#define atomic_add_rel_long atomic_add_rel_32 -#define atomic_subtract_long(p, v) \ - atomic_subtract_32((volatile u_int *)(p), (u_int)(v)) -#define atomic_subtract_acq_long atomic_subtract_acq_32 -#define atomic_subtract_rel_long atomic_subtract_rel_32 -#define atomic_cmpset_long atomic_cmpset_32 -#define atomic_cmpset_acq_long(p, cmpval, newval) \ - atomic_cmpset_acq_32((volatile u_int *)(p), \ - (u_int)(cmpval), (u_int)(newval)) -#define atomic_cmpset_rel_long(p, cmpval, newval) \ - atomic_cmpset_rel_32((volatile u_int *)(p), \ - (u_int)(cmpval), (u_int)(newval)) -#define atomic_load_acq_long atomic_load_acq_32 -#define atomic_store_rel_long atomic_store_rel_32 -#define atomic_fetchadd_long(p, v) \ - atomic_fetchadd_32((volatile u_int *)(p), (u_int)(v)) -#define atomic_readandclear_long atomic_readandclear_32 - -#endif /* __mips_n64 */ - /* Operations on pointers. */ #define atomic_set_ptr atomic_set_long #define atomic_set_acq_ptr atomic_set_acq_long @@ -630,4 +632,28 @@ #define atomic_store_rel_ptr atomic_store_rel_long #define atomic_readandclear_ptr atomic_readandclear_long +#else /* !__mips_n64 */ + +/* Operations on pointers. */ +#define atomic_set_ptr atomic_set_int +#define atomic_set_acq_ptr atomic_set_acq_int +#define atomic_set_rel_ptr atomic_set_rel_int +#define atomic_clear_ptr atomic_clear_int +#define atomic_clear_acq_ptr atomic_clear_acq_int +#define atomic_clear_rel_ptr atomic_clear_rel_int +#define atomic_add_ptr atomic_add_int +#define atomic_add_acq_ptr atomic_add_acq_int +#define atomic_add_rel_ptr atomic_add_rel_int +#define atomic_subtract_ptr atomic_subtract_int +#define atomic_subtract_acq_ptr atomic_subtract_acq_int +#define atomic_subtract_rel_ptr atomic_subtract_rel_int +#define atomic_cmpset_ptr atomic_cmpset_int +#define atomic_cmpset_acq_ptr atomic_cmpset_acq_int +#define atomic_cmpset_rel_ptr atomic_cmpset_rel_int +#define atomic_load_acq_ptr atomic_load_acq_int +#define atomic_store_rel_ptr atomic_store_rel_int +#define atomic_readandclear_ptr atomic_readandclear_int + +#endif /* __mips_n64 */ + #endif /* ! _MACHINE_ATOMIC_H_ */ Index: mips/rmi/dev/nlge/if_nlge.c =================================================================== --- mips/rmi/dev/nlge/if_nlge.c (revision 221778) +++ mips/rmi/dev/nlge/if_nlge.c (working copy) @@ -51,10 +51,10 @@ #include "opt_device_polling.h" #endif +#include #include #include #include -#include #include #include #include Index: mips/mips/support.S =================================================================== --- mips/mips/support.S (revision 221778) +++ mips/mips/support.S (working copy) @@ -1102,12 +1102,12 @@ /** * void - * atomic_set_16(u_int16_t *a, u_int16_t b) + * atomic_set_short(u_short *a, u_short b) * { * *a |= b; * } */ -LEAF(atomic_set_16) +LEAF(atomic_set_short) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 @@ -1120,16 +1120,16 @@ nop j ra nop -END(atomic_set_16) +END(atomic_set_short) /** * void - * atomic_clear_16(u_int16_t *a, u_int16_t b) + * atomic_clear_short(u_short *a, u_short b) * { * *a &= ~b; * } */ -LEAF(atomic_clear_16) +LEAF(atomic_clear_short) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 @@ -1147,17 +1147,17 @@ nop j ra nop -END(atomic_clear_16) +END(atomic_clear_short) /** * void - * atomic_subtract_16(uint16_t *a, uint16_t b) + * atomic_subtract_short(u_short *a, u_short b) * { * *a -= b; * } */ -LEAF(atomic_subtract_16) +LEAF(atomic_subtract_short) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 @@ -1175,16 +1175,16 @@ nop j ra nop -END(atomic_subtract_16) +END(atomic_subtract_short) /** * void - * atomic_add_16(uint16_t *a, uint16_t b) + * atomic_add_short(u_short *a, u_short b) * { * *a += b; * } */ -LEAF(atomic_add_16) +LEAF(atomic_add_short) .set noreorder srl a0, a0, 2 # round down address to be 32-bit aligned sll a0, a0, 2 @@ -1202,63 +1202,8 @@ nop j ra nop -END(atomic_add_16) +END(atomic_add_short) -/** - * void - * atomic_add_8(uint8_t *a, uint8_t b) - * { - * *a += b; - * } - */ -LEAF(atomic_add_8) - .set noreorder - srl a0, a0, 2 # round down address to be 32-bit aligned - sll a0, a0, 2 -1: - ll t0, 0(a0) - move t1, t0 - andi t1, t1, 0xff # t1 has the original lower 8 bits - addu t1, t1, a1 - andi t1, t1, 0xff # t1 has the new lower 8 bits - srl t0, t0, 8 # preserve original top 24 bits - sll t0, t0, 8 - or t0, t0, t1 - sc t0, 0(a0) - beq t0, zero, 1b - nop - j ra - nop -END(atomic_add_8) - - -/** - * void - * atomic_subtract_8(uint8_t *a, uint8_t b) - * { - * *a += b; - * } - */ -LEAF(atomic_subtract_8) - .set noreorder - srl a0, a0, 2 # round down address to be 32-bit aligned - sll a0, a0, 2 -1: - ll t0, 0(a0) - move t1, t0 - andi t1, t1, 0xff # t1 has the original lower 8 bits - subu t1, t1, a1 - andi t1, t1, 0xff # t1 has the new lower 8 bits - srl t0, t0, 8 # preserve original top 24 bits - sll t0, t0, 8 - or t0, t0, t1 - sc t0, 0(a0) - beq t0, zero, 1b - nop - j ra - nop -END(atomic_subtract_8) - /* * atomic 64-bit register read/write assembly language support routines. */