Index: sys/conf/files =================================================================== --- sys/conf/files (revision 268894) +++ sys/conf/files (working copy) @@ -2927,6 +2927,7 @@ kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard +kern/kern_spinqueue.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard Index: sys/kern/kern_mutex.c =================================================================== --- sys/kern/kern_mutex.c (revision 268894) +++ sys/kern/kern_mutex.c (working copy) @@ -58,6 +58,7 @@ #include #include #include +#include #include #include @@ -370,6 +371,7 @@ { struct mtx *m; struct turnstile *ts; + struct spinqueue me; uintptr_t v; #ifdef ADAPTIVE_MUTEXES volatile struct thread *owner; @@ -445,7 +447,7 @@ } #endif - ts = turnstile_trywait(&m->lock_object); + ts = turnstile_trywait(&m->lock_object, &me); v = m->mtx_lock; /* @@ -453,7 +455,7 @@ * the turnstile chain lock. */ if (v == MTX_UNOWNED) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } @@ -467,7 +469,7 @@ */ owner = (struct thread *)(v & ~MTX_FLAGMASK); if (TD_IS_RUNNING(owner)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } #endif @@ -479,7 +481,7 @@ */ if ((v & MTX_CONTESTED) == 0 && !atomic_cmpset_ptr(&m->mtx_lock, v, v | MTX_CONTESTED)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } @@ -505,7 +507,7 @@ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(); #endif - turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE); + turnstile_wait(ts, mtx_owner(m), TS_EXCLUSIVE_QUEUE, &me); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(); sleep_cnt++; @@ -732,6 +734,7 @@ { struct mtx *m; struct turnstile *ts; + struct spinqueue me; if (SCHEDULER_STOPPED()) return; @@ -750,7 +753,7 @@ * We have to lock the chain before the turnstile so this turnstile * can be removed from the hash list if it is empty. */ - turnstile_chain_lock(&m->lock_object); + turnstile_chain_lock(&m->lock_object, &me); ts = turnstile_lookup(&m->lock_object); if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_unlock_sleep: %p contested", m); @@ -763,7 +766,7 @@ * unlock the chain lock so a new turnstile may take it's place. */ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); - turnstile_chain_unlock(&m->lock_object); + turnstile_chain_unlock(&m->lock_object, &me); } /* Index: sys/kern/kern_rmlock.c =================================================================== --- sys/kern/kern_rmlock.c (revision 268894) +++ sys/kern/kern_rmlock.c (working copy) @@ -49,6 +49,7 @@ #include #include #include +#include #include #include @@ -482,10 +483,11 @@ if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; + struct spinqueue me; rm = tracker->rmp_rmlock; - turnstile_chain_lock(&rm->lock_object); + turnstile_chain_lock(&rm->lock_object, &me); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); @@ -492,7 +494,7 @@ turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); - turnstile_chain_unlock(&rm->lock_object); + turnstile_chain_unlock(&rm->lock_object, &me); } else mtx_unlock_spin(&rm_spinlock); } @@ -526,6 +528,7 @@ { struct rm_priotracker *prio; struct turnstile *ts; + struct spinqueue me; cpuset_t readcpus; if (SCHEDULER_STOPPED()) @@ -559,11 +562,11 @@ mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { - ts = turnstile_trywait(&rm->lock_object); + ts = turnstile_trywait(&rm->lock_object, &me); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, - TS_EXCLUSIVE_QUEUE); + TS_EXCLUSIVE_QUEUE, &me); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); Index: sys/kern/kern_rwlock.c =================================================================== --- sys/kern/kern_rwlock.c (revision 268894) +++ sys/kern/kern_rwlock.c (working copy) @@ -46,6 +46,7 @@ #include #include #include +#include #include @@ -338,6 +339,7 @@ { struct rwlock *rw; struct turnstile *ts; + struct spinqueue me; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; @@ -451,7 +453,7 @@ * acquire the turnstile lock so we can begin the process * of blocking. */ - ts = turnstile_trywait(&rw->lock_object); + ts = turnstile_trywait(&rw->lock_object, &me); /* * The lock might have been released while we spun, so @@ -459,7 +461,7 @@ */ v = rw->rw_lock; if (RW_CAN_READ(v)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } @@ -474,7 +476,7 @@ if ((v & RW_LOCK_READ) == 0) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } } @@ -494,7 +496,7 @@ if (!(v & RW_LOCK_READ_WAITERS)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_READ_WAITERS)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) @@ -512,7 +514,7 @@ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(); #endif - turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE); + turnstile_wait(ts, rw_owner(rw), TS_SHARED_QUEUE, &me); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(); sleep_cnt++; @@ -585,6 +587,7 @@ { struct rwlock *rw; struct turnstile *ts; + struct spinqueue me; uintptr_t x, v, queue; if (SCHEDULER_STOPPED()) @@ -638,7 +641,7 @@ * Ok, we know we have waiters and we think we are the * last reader, so grab the turnstile lock. */ - turnstile_chain_lock(&rw->lock_object); + turnstile_chain_lock(&rw->lock_object, &me); v = rw->rw_lock & (RW_LOCK_WAITERS | RW_LOCK_WRITE_SPINNER); MPASS(v & RW_LOCK_WAITERS); @@ -666,7 +669,7 @@ queue = TS_SHARED_QUEUE; if (!atomic_cmpset_rel_ptr(&rw->rw_lock, RW_READERS_LOCK(1) | v, x)) { - turnstile_chain_unlock(&rw->lock_object); + turnstile_chain_unlock(&rw->lock_object, &me); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) @@ -684,7 +687,7 @@ MPASS(ts != NULL); turnstile_broadcast(ts, queue); turnstile_unpend(ts, TS_SHARED_LOCK); - turnstile_chain_unlock(&rw->lock_object); + turnstile_chain_unlock(&rw->lock_object, &me); break; } LOCKSTAT_PROFILE_RELEASE_LOCK(LS_RW_RUNLOCK_RELEASE, rw); @@ -702,7 +705,8 @@ int line) { struct rwlock *rw; - struct turnstile *ts; + struct turnstile *ts; + struct spinqueue me; #ifdef ADAPTIVE_RWLOCKS volatile struct thread *owner; int spintries = 0; @@ -789,7 +793,7 @@ continue; } #endif - ts = turnstile_trywait(&rw->lock_object); + ts = turnstile_trywait(&rw->lock_object, &me); v = rw->rw_lock; #ifdef ADAPTIVE_RWLOCKS @@ -803,7 +807,7 @@ if (!(v & RW_LOCK_READ)) { owner = (struct thread *)RW_OWNER(v); if (TD_IS_RUNNING(owner)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } } @@ -820,12 +824,12 @@ x &= ~RW_LOCK_WRITE_SPINNER; if (atomic_cmpset_acq_ptr(&rw->rw_lock, v, tid | x)) { if (x) - turnstile_claim(ts); + turnstile_claim(ts, &me); else - turnstile_cancel(ts); + turnstile_cancel(ts, &me); break; } - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } /* @@ -836,7 +840,7 @@ if (!(v & RW_LOCK_WRITE_WAITERS)) { if (!atomic_cmpset_ptr(&rw->rw_lock, v, v | RW_LOCK_WRITE_WAITERS)) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); continue; } if (LOCK_LOG_TEST(&rw->lock_object, 0)) @@ -853,7 +857,7 @@ #ifdef KDTRACE_HOOKS sleep_time -= lockstat_nsecs(); #endif - turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE); + turnstile_wait(ts, rw_owner(rw), TS_EXCLUSIVE_QUEUE, &me); #ifdef KDTRACE_HOOKS sleep_time += lockstat_nsecs(); sleep_cnt++; @@ -890,6 +894,7 @@ { struct rwlock *rw; struct turnstile *ts; + struct spinqueue me; uintptr_t v; int queue; @@ -911,7 +916,7 @@ if (LOCK_LOG_TEST(&rw->lock_object, 0)) CTR2(KTR_LOCK, "%s: %p contested", __func__, rw); - turnstile_chain_lock(&rw->lock_object); + turnstile_chain_lock(&rw->lock_object, &me); ts = turnstile_lookup(&rw->lock_object); MPASS(ts != NULL); @@ -945,7 +950,7 @@ turnstile_broadcast(ts, queue); atomic_store_rel_ptr(&rw->rw_lock, v); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); - turnstile_chain_unlock(&rw->lock_object); + turnstile_chain_unlock(&rw->lock_object, &me); } /* @@ -958,6 +963,7 @@ { struct rwlock *rw; uintptr_t v, x, tid; + struct spinqueue me; struct turnstile *ts; int success; @@ -993,10 +999,10 @@ /* * Ok, we think we have waiters, so lock the turnstile. */ - ts = turnstile_trywait(&rw->lock_object); + ts = turnstile_trywait(&rw->lock_object, &me); v = rw->rw_lock; if (RW_READERS(v) > 1) { - turnstile_cancel(ts); + turnstile_cancel(ts, &me); break; } /* @@ -1009,12 +1015,12 @@ success = atomic_cmpset_ptr(&rw->rw_lock, v, tid | x); if (success) { if (x) - turnstile_claim(ts); + turnstile_claim(ts, &me); else - turnstile_cancel(ts); + turnstile_cancel(ts, &me); break; } - turnstile_cancel(ts); + turnstile_cancel(ts, &me); } LOCK_LOG_TRY("WUPGRADE", &rw->lock_object, 0, success, file, line); if (success) { @@ -1034,6 +1040,7 @@ { struct rwlock *rw; struct turnstile *ts; + struct spinqueue me; uintptr_t tid, v; int rwait, wwait; @@ -1065,7 +1072,7 @@ * Ok, we think we have waiters, so lock the turnstile so we can * read the waiter flags without any races. */ - turnstile_chain_lock(&rw->lock_object); + turnstile_chain_lock(&rw->lock_object, &me); v = rw->rw_lock & RW_LOCK_WAITERS; rwait = v & RW_LOCK_READ_WAITERS; wwait = v & RW_LOCK_WRITE_WAITERS; @@ -1089,7 +1096,7 @@ turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); } else turnstile_disown(ts); - turnstile_chain_unlock(&rw->lock_object); + turnstile_chain_unlock(&rw->lock_object, &me); out: curthread->td_rw_rlocks++; LOCK_LOG_LOCK("WDOWNGRADE", &rw->lock_object, 0, 0, file, line); Index: sys/kern/kern_spinqueue.c =================================================================== --- sys/kern/kern_spinqueue.c (revision 0) +++ sys/kern/kern_spinqueue.c (working copy) @@ -0,0 +1,108 @@ +/*- + * Copyright (c) 2013 Davide Italiano + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +void +spinqueue_init(struct spinqueue **spq) +{ + + /* Initialize spinqueue */ + *spq = NULL; +} + +void +spinqueue_lock(struct spinqueue **spq, struct spinqueue *me) +{ + struct spinqueue *tail; + + /* + * Initialize the lock structure. + */ + me->next = NULL; + me->spin = 0; + + /* + * Fast path: try to acquire the lock assuming it's uncontested. + * We need to disable interrupts before trying in order to avoid + * deadlocks. + * XXXDAVIDE: The fast path should be probably inlined. + */ + spinlock_enter(); + tail = (struct spinqueue *)atomic_swap_ptr((uintptr_t *)spq, + (uintptr_t)me); + if (tail == NULL) + return; + + /* + * Slow path: if we're here, somebody else has already gotten + * the mutex. + */ + tail->next = me; + + /* + * XXXDAVIDE: Give interrupts a chance while we spin as the old + * spinlock implementation does? + */ + while (atomic_load_acq_int(&me->spin) == 0) + cpu_spinwait(); +} + +void +spinqueue_unlock(struct spinqueue **spq, struct spinqueue *me) +{ + struct spinqueue *next; + + next = me->next; + if (next == NULL) { + + /* + * Fast path. + * XXXDAVIDE: should be inlined? + */ + if (atomic_cmpset_rel_ptr((uintptr_t *)spq, (uintptr_t)me, + (uintptr_t)0)) { + spinlock_exit(); + return; + } + + /* + * Slow path. Let's spin until the next waiter appears. + */ + while ((next = me->next) == NULL) + cpu_spinwait(); + } + atomic_store_rel_int(&next->spin, 1); + spinlock_exit(); +} Property changes on: sys/kern/kern_spinqueue.c ___________________________________________________________________ Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Index: sys/kern/subr_turnstile.c =================================================================== --- sys/kern/subr_turnstile.c (revision 268894) +++ sys/kern/subr_turnstile.c (working copy) @@ -76,6 +76,7 @@ #include #include #include +#include #include @@ -130,7 +131,7 @@ struct turnstile_chain { LIST_HEAD(, turnstile) tc_turnstiles; /* List of turnstiles. */ - struct mtx tc_lock; /* Spin lock for this chain. */ + struct spinqueue *tc_lock; /* Spin lock for this chain. */ #ifdef TURNSTILE_PROFILING u_int tc_depth; /* Length of tc_queues. */ u_int tc_max_depth; /* Max length of tc_queues. */ @@ -362,8 +363,7 @@ for (i = 0; i < TC_TABLESIZE; i++) { LIST_INIT(&turnstile_chains[i].tc_turnstiles); - mtx_init(&turnstile_chains[i].tc_lock, "turnstile chain", - NULL, MTX_SPIN); + spinqueue_init(&turnstile_chains[i].tc_lock); } mtx_init(&td_contested_lock, "td_contested", NULL, MTX_SPIN); LIST_INIT(&thread0.td_contested); @@ -533,22 +533,22 @@ * Lock the turnstile chain associated with the specified lock. */ void -turnstile_chain_lock(struct lock_object *lock) +turnstile_chain_lock(struct lock_object *lock, struct spinqueue *me) { struct turnstile_chain *tc; tc = TC_LOOKUP(lock); - mtx_lock_spin(&tc->tc_lock); + spinqueue_lock(&tc->tc_lock, me); } struct turnstile * -turnstile_trywait(struct lock_object *lock) +turnstile_trywait(struct lock_object *lock, struct spinqueue *me) { struct turnstile_chain *tc; struct turnstile *ts; tc = TC_LOOKUP(lock); - mtx_lock_spin(&tc->tc_lock); + spinqueue_lock(&tc->tc_lock, me); LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) if (ts->ts_lockobj == lock) { mtx_lock_spin(&ts->ts_lock); @@ -565,7 +565,7 @@ } void -turnstile_cancel(struct turnstile *ts) +turnstile_cancel(struct turnstile *ts, struct spinqueue *me) { struct turnstile_chain *tc; struct lock_object *lock; @@ -577,7 +577,7 @@ if (ts == curthread->td_turnstile) ts->ts_lockobj = NULL; tc = TC_LOOKUP(lock); - mtx_unlock_spin(&tc->tc_lock); + spinqueue_unlock(&tc->tc_lock, me); } /* @@ -592,7 +592,7 @@ struct turnstile *ts; tc = TC_LOOKUP(lock); - mtx_assert(&tc->tc_lock, MA_OWNED); + //mtx_assert(&tc->tc_lock, MA_OWNED); LIST_FOREACH(ts, &tc->tc_turnstiles, ts_hash) if (ts->ts_lockobj == lock) { mtx_lock_spin(&ts->ts_lock); @@ -605,12 +605,12 @@ * Unlock the turnstile chain associated with a given lock. */ void -turnstile_chain_unlock(struct lock_object *lock) +turnstile_chain_unlock(struct lock_object *lock, struct spinqueue *me) { struct turnstile_chain *tc; tc = TC_LOOKUP(lock); - mtx_unlock_spin(&tc->tc_lock); + spinqueue_unlock(&tc->tc_lock, me); } /* @@ -634,7 +634,7 @@ * owner appropriately. */ void -turnstile_claim(struct turnstile *ts) +turnstile_claim(struct turnstile *ts, struct spinqueue *me) { struct thread *td, *owner; struct turnstile_chain *tc; @@ -661,7 +661,7 @@ thread_unlock(owner); tc = TC_LOOKUP(ts->ts_lockobj); mtx_unlock_spin(&ts->ts_lock); - mtx_unlock_spin(&tc->tc_lock); + spinqueue_unlock(&tc->tc_lock, me); } /* @@ -671,7 +671,7 @@ * turnstile chain locked and will return with it unlocked. */ void -turnstile_wait(struct turnstile *ts, struct thread *owner, int queue) +turnstile_wait(struct turnstile *ts, struct thread *owner, int queue, struct spinqueue *me) { struct turnstile_chain *tc; struct thread *td, *td1; @@ -689,7 +689,7 @@ * turnstile already in use by this lock. */ tc = TC_LOOKUP(ts->ts_lockobj); - mtx_assert(&tc->tc_lock, MA_OWNED); + //mtx_assert(&tc->tc_lock, MA_OWNED); if (ts == td->td_turnstile) { #ifdef TURNSTILE_PROFILING tc->tc_depth++; @@ -738,7 +738,7 @@ td->td_lockname = lock->lo_name; td->td_blktick = ticks; TD_SET_LOCK(td); - mtx_unlock_spin(&tc->tc_lock); + spinqueue_unlock(&tc->tc_lock, me); propagate_priority(td); if (LOCK_LOG_TEST(lock, 0)) @@ -793,7 +793,7 @@ TAILQ_EMPTY(&ts->ts_blocked[TS_SHARED_QUEUE]); if (empty) { tc = TC_LOOKUP(ts->ts_lockobj); - mtx_assert(&tc->tc_lock, MA_OWNED); + //mtx_assert(&tc->tc_lock, MA_OWNED); MPASS(LIST_EMPTY(&ts->ts_free)); #ifdef TURNSTILE_PROFILING tc->tc_depth--; @@ -827,7 +827,7 @@ * turnstile from the hash queue. */ tc = TC_LOOKUP(ts->ts_lockobj); - mtx_assert(&tc->tc_lock, MA_OWNED); + //mtx_assert(&tc->tc_lock, MA_OWNED); MPASS(queue == TS_SHARED_QUEUE || queue == TS_EXCLUSIVE_QUEUE); /* Index: sys/sys/spinqueue.h =================================================================== --- sys/sys/spinqueue.h (revision 0) +++ sys/sys/spinqueue.h (working copy) @@ -0,0 +1,39 @@ +/*- + * Copyright (c) 2013 Davide Italiano + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_SPINQUEUE_H_ +#define _SYS_SPINQUEUE_H_ + +struct spinqueue { + struct spinqueue *next; + int spin; +}; + +void spinqueue_init(struct spinqueue **spq); +void spinqueue_lock(struct spinqueue **spq, struct spinqueue *self); +void spinqueue_unlock(struct spinqueue **spq, struct spinqueue *self); + +#endif /* _SYS_SPINQUEUE_H_ */ Property changes on: sys/sys/spinqueue.h ___________________________________________________________________ Added: svn:mime-type ## -0,0 +1 ## +text/plain \ No newline at end of property Added: svn:keywords ## -0,0 +1 ## +FreeBSD=%H \ No newline at end of property Added: svn:eol-style ## -0,0 +1 ## +native \ No newline at end of property Index: sys/sys/turnstile.h =================================================================== --- sys/sys/turnstile.h (revision 268894) +++ sys/sys/turnstile.h (working copy) @@ -74,6 +74,7 @@ struct lock_object; struct thread; struct turnstile; +struct spinqueue; #ifdef _KERNEL @@ -89,10 +90,10 @@ void turnstile_adjust(struct thread *, u_char); struct turnstile *turnstile_alloc(void); void turnstile_broadcast(struct turnstile *, int); -void turnstile_cancel(struct turnstile *); -void turnstile_chain_lock(struct lock_object *); -void turnstile_chain_unlock(struct lock_object *); -void turnstile_claim(struct turnstile *); +void turnstile_cancel(struct turnstile *, struct spinqueue *); +void turnstile_chain_lock(struct lock_object *, struct spinqueue *); +void turnstile_chain_unlock(struct lock_object *, struct spinqueue *); +void turnstile_claim(struct turnstile *, struct spinqueue *); void turnstile_disown(struct turnstile *); int turnstile_empty(struct turnstile *ts, int queue); void turnstile_free(struct turnstile *); @@ -99,9 +100,10 @@ struct thread *turnstile_head(struct turnstile *, int); struct turnstile *turnstile_lookup(struct lock_object *); int turnstile_signal(struct turnstile *, int); -struct turnstile *turnstile_trywait(struct lock_object *); +struct turnstile *turnstile_trywait(struct lock_object *, struct spinqueue *); void turnstile_unpend(struct turnstile *, int); -void turnstile_wait(struct turnstile *, struct thread *, int); +void turnstile_wait(struct turnstile *, struct thread *, int, + struct spinqueue *); #endif /* _KERNEL */ #endif /* _SYS_TURNSTILE_H_ */