--- //depot/vendor/freebsd/src/sys/kern/kern_rwlock.c 2007/11/26 22:44:43 +++ //depot/user/attilio/attilio_schedlock/kern/kern_rwlock.c 2007/11/30 16:43:58 @@ -238,19 +238,9 @@ KASSERT(rw_wowner(rw) != curthread, ("%s (%s): wlock already held @ %s:%d", __func__, rw->lock_object.lo_name, file, line)); - WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER, file, line); + WITNESS_CHECKORDER(&rw->lock_object, LOP_NEWORDER | LOP_NORECURSE, + file, line); - /* - * Note that we don't make any attempt to try to block read - * locks once a writer has blocked on the lock. The reason is - * that we currently allow for read locks to recurse and we - * don't keep track of all the holders of read locks. Thus, if - * we were to block readers once a writer blocked and a reader - * tried to recurse on their reader lock after a writer had - * blocked we would end up in a deadlock since the reader would - * be blocked on the writer, and the writer would be blocked - * waiting for the reader to release its original read lock. - */ for (;;) { /* * Handle the easy case. If no other thread has a write @@ -263,14 +253,13 @@ * as a read lock with no waiters. */ x = rw->rw_lock; - if (x & RW_LOCK_READ) { + if ((x & RW_LOCK_READ) && !(x & RW_LOCK_WRITE_WAITERS)) { /* * The RW_LOCK_READ_WAITERS flag should only be set - * if another thread currently holds a write lock, - * and in that case RW_LOCK_READ should be clear. + * if the lock has been unlocked and write waiters + * were present. */ - MPASS((x & RW_LOCK_READ_WAITERS) == 0); if (atomic_cmpset_acq_ptr(&rw->rw_lock, x, x + RW_ONE_READER)) { #ifdef LOCK_PROFILING_SHARED @@ -296,36 +285,39 @@ * the owner stops running or the state of the lock * changes. */ - owner = (struct thread *)RW_OWNER(x); - if (TD_IS_RUNNING(owner)) { - if (LOCK_LOG_TEST(&rw->lock_object, 0)) - CTR3(KTR_LOCK, "%s: spinning on %p held by %p", - __func__, rw, owner); + if (!(x & RW_LOCK_READ)) { + owner = (struct thread *)RW_OWNER(x); + if (TD_IS_RUNNING(owner)) { + if (LOCK_LOG_TEST(&rw->lock_object, 0)) + CTR3(KTR_LOCK, + "%s: spinning on %p held by %p", + __func__, rw, owner); #ifdef LOCK_PROFILING_SHARED - lock_profile_obtain_lock_failed(&rw->lock_object, - &contested, &waittime); + lock_profile_obtain_lock_failed( + &rw->lock_object, &contested, &waittime); #endif - while ((struct thread*)RW_OWNER(rw->rw_lock) == owner && - TD_IS_RUNNING(owner)) - cpu_spinwait(); - continue; + while ((struct thread*)RW_OWNER(rw->rw_lock) == + owner && TD_IS_RUNNING(owner)) + cpu_spinwait(); + continue; + } } #endif /* * Okay, now it's the hard case. Some other thread already - * has a write lock, so acquire the turnstile lock so we can - * begin the process of blocking. + * has a write lock or there are write waiters present, + * acquire the turnstile lock so we can begin the process + * of blocking. */ ts = turnstile_trywait(&rw->lock_object); /* * The lock might have been released while we spun, so - * recheck its state and restart the loop if there is no - * longer a write lock. + * recheck its state and restart the loop if needed. */ x = rw->rw_lock; - if (x & RW_LOCK_READ) { + if (x == RW_UNLOCKED) { turnstile_cancel(ts); cpu_spinwait(); continue; @@ -336,19 +328,27 @@ * If the current owner of the lock is executing on another * CPU quit the hard path and try to spin. */ - owner = (struct thread *)RW_OWNER(x); - if (TD_IS_RUNNING(owner)) { - turnstile_cancel(ts); - cpu_spinwait(); - continue; + if (!(x & RW_LOCK_READ)) { + owner = (struct thread *)RW_OWNER(x); + if (TD_IS_RUNNING(owner)) { + turnstile_cancel(ts); + cpu_spinwait(); + continue; + } } #endif /* - * Ok, it's still a write lock. If the RW_LOCK_READ_WAITERS - * flag is already set, then we can go ahead and block. If - * it is not set then try to set it. If we fail to set it - * drop the turnstile lock and restart the loop. + * The lock is held in write mode or it has alredy write + * waiters. Just assert for this conditions. + */ + MPASS(!(x & RW_LOCK_READ) || (x & RW_LOCK_WRITE_WAITERS)); + + /* + * If the RW_LOCK_READ_WAITERS flag is already set, then + * we can go ahead and block. If it is not set then try + * to set it. If we fail to set it drop the turnstile + * lock and restart the loop. */ if (!(x & RW_LOCK_READ_WAITERS)) { if (!atomic_cmpset_ptr(&rw->rw_lock, x, @@ -394,7 +394,7 @@ _rw_runlock(struct rwlock *rw, const char *file, int line) { struct turnstile *ts; - uintptr_t x; + uintptr_t x, v, queue; KASSERT(rw->rw_lock != RW_DESTROYED, ("rw_runlock() of destroyed rwlock @ %s:%d", file, line)); @@ -423,14 +423,6 @@ } continue; } - - - /* - * We should never have read waiters while at least one - * thread holds a read lock. (See note above) - */ - KASSERT(!(x & RW_LOCK_READ_WAITERS), - ("%s: waiting readers", __func__)); #ifdef LOCK_PROFILING_SHARED lock_profile_release_lock(&rw->lock_object); #endif @@ -439,7 +431,7 @@ * If there aren't any waiters for a write lock, then try * to drop it quickly. */ - if (!(x & RW_LOCK_WRITE_WAITERS)) { + if (!(x & (RW_LOCK_WRITE_WAITERS | RW_LOCK_READ_WAITERS))) { /* * There shouldn't be any flags set and we should @@ -460,16 +452,13 @@ } /* - * There should just be one reader with one or more - * writers waiting. - */ - MPASS(x == (RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS)); - - /* - * Ok, we know we have a waiting writer and we think we - * are the last reader, so grab the turnstile lock. + * Ok, we know we have waiters and we think we are the + * last reader, so grab the turnstile lock. */ turnstile_chain_lock(&rw->lock_object); + v = rw->rw_lock & (RW_LOCK_READ_WAITERS | + RW_LOCK_WRITE_WAITERS); + MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); /* * Try to drop our lock leaving the lock in a unlocked @@ -487,8 +476,14 @@ * acquired a read lock, so drop the turnstile lock and * restart. */ - if (!atomic_cmpset_ptr(&rw->rw_lock, - RW_READERS_LOCK(1) | RW_LOCK_WRITE_WAITERS, RW_UNLOCKED)) { + x = RW_UNLOCKED; + if (v & RW_LOCK_WRITE_WAITERS) { + queue = TS_EXCLUSIVE_QUEUE; + x |= (v & RW_LOCK_READ_WAITERS); + } else + queue = TS_SHARED_QUEUE; + if (!atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, + x)) { turnstile_chain_unlock(&rw->lock_object); continue; } @@ -505,7 +500,7 @@ */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); - turnstile_broadcast(ts, TS_EXCLUSIVE_QUEUE); + turnstile_broadcast(ts, queue); turnstile_unpend(ts, TS_SHARED_LOCK); turnstile_chain_unlock(&rw->lock_object); break; @@ -618,6 +613,30 @@ } /* + * If the lock was released by a reader with both readers + * and writers waiting and a writer hasn't woken up and + * acquired the lock yet, rw_lock will be set to the + * value RW_UNLOCKED | RW_LOCK_READ_WAITERS. If we see + * that value, try to acquire it once. Note that we have + * to preserve the RW_LOCK_READ_WAITERS flag as there are + * other writers waiting still. If we fail, restart the + * loop. + */ + if (v == (RW_UNLOCKED | RW_LOCK_READ_WAITERS)) { + if (atomic_cmpset_acq_ptr(&rw->rw_lock, + RW_UNLOCKED | RW_LOCK_READ_WAITERS, + tid | RW_LOCK_READ_WAITERS)) { + turnstile_claim(ts); + CTR2(KTR_LOCK, "%s: %p claimed by new writer", + __func__, rw); + break; + } + turnstile_cancel(ts); + cpu_spinwait(); + continue; + } + + /* * If the RW_LOCK_WRITE_WAITERS flag isn't set, then try to * set it. If we fail to set it, then loop back and try * again. @@ -740,25 +759,24 @@ * turnstile. So, do the simple case of no waiters first. */ tid = (uintptr_t)curthread; - if (!(rw->rw_lock & RW_LOCK_WRITE_WAITERS)) { + if (!(rw->rw_lock & (RW_LOCK_WRITE_WAITERS | RW_LOCK_READ_WAITERS))) { success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1), tid); goto out; } /* - * Ok, we think we have write waiters, so lock the - * turnstile. + * Ok, we think we have waiters, so lock the turnstile. */ ts = turnstile_trywait(&rw->lock_object); /* * Try to switch from one reader to a writer again. This time - * we honor the current state of the RW_LOCK_WRITE_WAITERS - * flag. If we obtain the lock with the flag set, then claim - * ownership of the turnstile. + * we honor the current state of the waiters flags. + * If we obtain the lock with the flags set, then claim ownership + * of the turnstile. */ - v = rw->rw_lock & RW_LOCK_WRITE_WAITERS; + v = rw->rw_lock & (RW_LOCK_WRITE_WAITERS | RW_LOCK_READ_WAITERS); success = atomic_cmpset_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, tid | v); if (success && v) @@ -795,8 +813,7 @@ /* * Convert from a writer to a single reader. First we handle * the easy case with no waiters. If there are any waiters, we - * lock the turnstile, "disown" the lock, and awaken any read - * waiters. + * lock the turnstile and "disown" the lock. */ tid = (uintptr_t)curthread; if (atomic_cmpset_rel_ptr(&rw->rw_lock, tid, RW_READERS_LOCK(1))) @@ -807,24 +824,17 @@ * read the waiter flags without any races. */ turnstile_chain_lock(&rw->lock_object); - v = rw->rw_lock; + v = rw->rw_lock & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS); MPASS(v & (RW_LOCK_READ_WAITERS | RW_LOCK_WRITE_WAITERS)); /* - * Downgrade from a write lock while preserving - * RW_LOCK_WRITE_WAITERS and give up ownership of the - * turnstile. If there are any read waiters, wake them up. + * Downgrade from a write lock while preserving waiters flag + * and give up ownership of the turnstile. */ ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); - if (v & RW_LOCK_READ_WAITERS) - turnstile_broadcast(ts, TS_SHARED_QUEUE); - atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | - (v & RW_LOCK_WRITE_WAITERS)); - if (v & RW_LOCK_READ_WAITERS) - turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); - else if (ts) - turnstile_disown(ts); + atomic_store_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v); + turnstile_disown(ts); turnstile_chain_unlock(&rw->lock_object); out: LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); --- //depot/vendor/freebsd/src/sys/kern/subr_turnstile.c 2007/11/14 06:27:32 +++ //depot/user/attilio/attilio_schedlock/kern/subr_turnstile.c 2007/11/30 16:43:58 @@ -674,8 +674,6 @@ td = curthread; mtx_assert(&ts->ts_lock, MA_OWNED); - if (queue == TS_SHARED_QUEUE) - MPASS(owner != NULL); if (owner) MPASS(owner->td_proc->p_magic == P_MAGIC); MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); --- //depot/vendor/freebsd/src/sys/kern/subr_witness.c 2007/11/24 04:37:19 +++ //depot/user/attilio/attilio_schedlock/kern/subr_witness.c 2007/11/29 20:29:57 @@ -922,6 +922,9 @@ lock1->li_file, lock1->li_line); panic("excl->share"); } + if (flags & LOP_NORECURSE) + panic("recurse on non-recursive lock %s @ %s:%d", + lock->lo_name, file, line); return; } --- //depot/vendor/freebsd/src/sys/sys/lock.h 2007/11/18 14:44:31 +++ //depot/user/attilio/attilio_schedlock/sys/lock.h 2007/11/29 20:29:57 @@ -103,6 +103,7 @@ #define LOP_TRYLOCK 0x00000004 /* Don't check lock order. */ #define LOP_EXCLUSIVE 0x00000008 /* Exclusive lock. */ #define LOP_DUPOK 0x00000010 /* Don't check for duplicate acquires */ +#define LOP_NORECURSE 0x00000020 /* Don't allow recursion for the lock */ /* Flags passed to witness_assert. */ #define LA_UNLOCKED 0x00000000 /* Lock is unlocked. */