--- //depot/vendor/freebsd/src/sys/i386/include/atomic.h 2004/03/12 21:50:47 +++ //depot/projects/smpng/sys/i386/include/atomic.h 2004/07/29 19:55:07 @@ -69,7 +69,7 @@ int atomic_cmpset_int(volatile u_int *dst, u_int exp, u_int src); -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP) \ u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -175,12 +175,12 @@ #if defined(I386_CPU) /* - * We assume that a = b will do atomic loads and stores. - * - * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee - * memory ordering. These should only be used on a 386. + * We assume that a = b will do atomic loads and stores. However, on a + * PentiumPro or higher reads may pass writes, so for that case we have + * to use a serializing instruction (i.e. with LOCK) to do the load. For + * the 386 case, we can use a simple store since 386's don't support SMP. */ -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ @@ -190,14 +190,14 @@ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ + __asm __volatile("" : : : "memory"); \ *p = v; \ - __asm __volatile("" : : : "memory"); \ } \ struct __hack #else /* !defined(I386_CPU) */ -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP) \ static __inline u_##TYPE \ atomic_load_acq_##TYPE(volatile u_##TYPE *p) \ { \ @@ -211,16 +211,11 @@ return (res); \ } \ \ -/* \ - * The XCHG instruction asserts LOCK automagically. \ - */ \ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ - __asm __volatile(SOP \ - : "+m" (*p), /* 0 */ \ - "+r" (v) /* 1 */ \ - : : "memory"); \ + __asm __volatile("" : : : "memory"); \ + *p = v; \ } \ struct __hack @@ -230,7 +225,7 @@ extern int atomic_cmpset_int(volatile u_int *, u_int, u_int); -#define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ +#define ATOMIC_STORE_LOAD(TYPE, LOP) \ extern u_##TYPE atomic_load_acq_##TYPE(volatile u_##TYPE *p); \ extern void atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v) @@ -258,10 +253,10 @@ ATOMIC_ASM(add, long, "addl %1,%0", "ir", v); ATOMIC_ASM(subtract, long, "subl %1,%0", "ir", v); -ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1", "xchgb %b1,%0"); -ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1", "xchgw %w1,%0"); -ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1", "xchgl %1,%0"); -ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1", "xchgl %1,%0"); +ATOMIC_STORE_LOAD(char, "cmpxchgb %b0,%1"); +ATOMIC_STORE_LOAD(short,"cmpxchgw %w0,%1"); +ATOMIC_STORE_LOAD(int, "cmpxchgl %0,%1"); +ATOMIC_STORE_LOAD(long, "cmpxchgl %0,%1"); #undef ATOMIC_ASM #undef ATOMIC_STORE_LOAD --- //depot/vendor/freebsd/src/sys/i386/include/cpufunc.h 2004/04/07 20:52:05 +++ //depot/projects/smpng/sys/i386/include/cpufunc.h 2004/07/29 22:40:40 @@ -38,6 +38,8 @@ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ +#include + struct region_descriptor; #define readb(va) (*(volatile u_int8_t *) (va)) @@ -608,14 +613,16 @@ register_t eflags; eflags = read_eflags(); - disable_intr(); + if (eflags & PSL_I) + disable_intr(); return (eflags); } static __inline void intr_restore(register_t eflags) { - write_eflags(eflags); + if (eflags & PSL_I) + enable_intr(); } #else /* !(__GNUC__ || __INTEL_COMPILER) */ --- //depot/vendor/freebsd/src/sys/kern/kern_mutex.c 2004/07/27 16:35:28 +++ //depot/projects/smpng/sys/kern/kern_mutex.c 2004/07/29 20:49:29 @@ -417,10 +417,10 @@ * sleep waiting for it), or if we need to recurse on it. */ void -_mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line) +_mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, const char *file, + int line) { struct turnstile *ts; - struct thread *td = curthread; #if defined(SMP) && !defined(NO_ADAPTIVE_MUTEXES) struct thread *owner; #endif @@ -568,7 +568,8 @@ * is handled inline. */ void -_mtx_lock_spin(struct mtx *m, int opts, const char *file, int line) +_mtx_lock_spin(struct mtx *m, struct thread *td, int opts, const char *file, + int line) { int i = 0; @@ -576,7 +577,7 @@ CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); for (;;) { - if (_obtain_lock(m, curthread)) + if (_obtain_lock(m, td)) break; /* Give interrupts a chance while we spin. */ --- //depot/vendor/freebsd/src/sys/sys/mutex.h 2004/06/20 21:51:31 +++ //depot/projects/smpng/sys/sys/mutex.h 2004/07/29 20:49:29 @@ -100,17 +100,19 @@ void mtx_destroy(struct mtx *m); void mtx_sysinit(void *arg); void mutex_init(void); -void _mtx_lock_sleep(struct mtx *m, int opts, const char *file, int line); +void _mtx_lock_sleep(struct mtx *m, struct thread *td, int opts, + const char *file, int line); void _mtx_unlock_sleep(struct mtx *m, int opts, const char *file, int line); -void _mtx_lock_spin(struct mtx *m, int opts, const char *file, int line); +void _mtx_lock_spin(struct mtx *m, struct thread *td, int opts, + const char *file, int line); void _mtx_unlock_spin(struct mtx *m, int opts, const char *file, int line); int _mtx_trylock(struct mtx *m, int opts, const char *file, int line); void _mtx_lock_flags(struct mtx *m, int opts, const char *file, int line); void _mtx_unlock_flags(struct mtx *m, int opts, const char *file, int line); void _mtx_lock_spin_flags(struct mtx *m, int opts, const char *file, - int line); + int line); void _mtx_unlock_spin_flags(struct mtx *m, int opts, const char *file, - int line); + int line); #ifdef INVARIANT_SUPPORT void _mtx_assert(struct mtx *m, int what, const char *file, int line); #endif @@ -144,8 +146,10 @@ */ #ifndef _get_sleep_lock #define _get_sleep_lock(mp, tid, opts, file, line) do { \ - if (!_obtain_lock((mp), (tid))) \ - _mtx_lock_sleep((mp), (opts), (file), (line)); \ + struct thread *_tid = (tid); \ + \ + if (!_obtain_lock((mp), _tid)) \ + _mtx_lock_sleep((mp), _tid, (opts), (file), (line)); \ } while (0) #endif @@ -158,12 +162,14 @@ */ #ifndef _get_spin_lock #define _get_spin_lock(mp, tid, opts, file, line) do { \ + struct thread *_tid = (tid); \ + \ critical_enter(); \ - if (!_obtain_lock((mp), (tid))) { \ - if ((mp)->mtx_lock == (uintptr_t)(tid)) \ + if (!_obtain_lock((mp), _tid)) { \ + if ((mp)->mtx_lock == (uintptr_t)_tid) \ (mp)->mtx_recurse++; \ else \ - _mtx_lock_spin((mp), (opts), (file), (line)); \ + _mtx_lock_spin((mp), _tid, (opts), (file), (line)); \ } \ } while (0) #endif @@ -195,7 +201,7 @@ (mp)->mtx_recurse--; \ else \ _release_lock_quick((mp)); \ - critical_exit(); \ + critical_exit(); \ } while (0) #endif