--- //depot/vendor/freebsd/src/sys/i386/include/atomic.h 2004/03/12 21:50:47 +++ //depot/projects/smpng/sys/i386/include/atomic.h 2004/11/09 14:42:52 @@ -172,13 +172,14 @@ #if defined(__GNUC__) || defined(__INTEL_COMPILER) -#if defined(I386_CPU) +#if !defined(SMP) /* - * We assume that a = b will do atomic loads and stores. - * - * XXX: This is _NOT_ safe on a P6 or higher because it does not guarantee - * memory ordering. These should only be used on a 386. + * We assume that a = b will do atomic loads and stores. However, on a + * PentiumPro or higher, reads may pass writes, so for that case we have + * to use a serializing instruction (i.e. with LOCK) to do the load in + * SMP kernels. For UP kernels, however, the cache of the single processor + * is always consistent, so we don't need any memory barriers. */ #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ @@ -190,12 +191,12 @@ static __inline void \ atomic_store_rel_##TYPE(volatile u_##TYPE *p, u_##TYPE v)\ { \ + __asm __volatile("" : : : "memory"); \ *p = v; \ - __asm __volatile("" : : : "memory"); \ } \ struct __hack -#else /* !defined(I386_CPU) */ +#else /* defined(SMP) */ #define ATOMIC_STORE_LOAD(TYPE, LOP, SOP) \ static __inline u_##TYPE \ @@ -224,7 +225,7 @@ } \ struct __hack -#endif /* defined(I386_CPU) */ +#endif /* !defined(SMP) */ #else /* !(defined(__GNUC__) || defined(__INTEL_COMPILER)) */ --- //depot/vendor/freebsd/src/sys/i386/include/cpufunc.h 2004/04/07 20:52:05 +++ //depot/projects/smpng/sys/i386/include/cpufunc.h 2004/08/25 00:38:37 @@ -38,6 +38,8 @@ #ifndef _MACHINE_CPUFUNC_H_ #define _MACHINE_CPUFUNC_H_ +#include + struct region_descriptor; #define readb(va) (*(volatile u_int8_t *) (va)) @@ -608,14 +610,16 @@ register_t eflags; eflags = read_eflags(); - disable_intr(); + if (eflags & PSL_I) + disable_intr(); return (eflags); } static __inline void intr_restore(register_t eflags) { - write_eflags(eflags); + if (eflags & PSL_I) + enable_intr(); } #else /* !(__GNUC__ || __INTEL_COMPILER) */