Index: conf/files =================================================================== --- conf/files (revision 256184) +++ conf/files (working copy) @@ -2829,6 +2829,7 @@ kern/kern_sema.c standard kern/kern_sharedpage.c standard kern/kern_shutdown.c standard kern/kern_sig.c standard +kern/kern_spinqueue.c standard kern/kern_switch.c standard kern/kern_sx.c standard kern/kern_synch.c standard Index: kern/kern_spinqueue.c =================================================================== --- kern/kern_spinqueue.c (revision 0) +++ kern/kern_spinqueue.c (working copy) @@ -0,0 +1,122 @@ +/*- + * Copyright (c) 2013 Davide Italiano + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include + +#include +#include + +/* + * TODO: inline fast path for acquisition. + */ + +void +spinqueue_init(struct spinqueue *spq) +{ + + /* Initialize spinqueue */ + spq = NULL; +} + +void +spinqueue_lock(struct spinqueue **spq, struct spinqueue *me) +{ + struct spinqueue *tail; + int spincycles; + + if ((*spq)->lock_owner == curthread) { + (*spq)->spinqueue_recurse++; + return; + } + me->next = NULL; + me->spin = 0; + me->lock_owner = curthread; + me->spinqueue_recurse = 0; + tail = (struct spinqueue *)atomic_swap_ptr((uintptr_t *)spq, + (uintptr_t)me); + if (tail == NULL) { + spinlock_enter(); + return; + } + tail->next = me; + mb(); + spincycles = 0; + while (me->spin == 0) { +// if (spincycles++ < 60000000) + cpu_spinwait(); +// else +// panic("Cannot lock"); + } + spinlock_enter(); + return; +} + +int +spinqueue_trylock(struct spinqueue **spq, struct spinqueue *me) +{ + + me->next = NULL; + me->spin = 0; + if (atomic_cmpset_acq_ptr((uintptr_t *)spq, + (uintptr_t)0, (uintptr_t)me)) { + spinlock_enter(); + return (0); + } + else + return (EBUSY); +} + +void +spinqueue_unlock(struct spinqueue **spq, struct spinqueue *me) +{ + int spincycles; + + if ((*spq)->spinqueue_recurse > 0) { + (*spq)->spinqueue_recurse--; + return; + } + if (me->next == NULL) { + if (atomic_cmpset_rel_ptr((uintptr_t *)spq, (uintptr_t)me, + (uintptr_t)0)) { + spinlock_exit(); + return; + } + spincycles = 0; + while (me->next == NULL) { +// if (spincycles++ < 60000000) + cpu_spinwait(); +// else +// panic("Cannot unlock"); + } + } + me->next->spin = 1; + spinlock_exit(); +} Index: sys/spinqueue.h =================================================================== --- sys/spinqueue.h (revision 0) +++ sys/spinqueue.h (working copy) @@ -0,0 +1,43 @@ +/*- + * Copyright (c) 2013 Davide Italiano + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _SYS_SPINQUEUE_H_ +#define _SYS_SPINQUEUE_H_ + +struct spinqueue { + /* struct lock_object lock_object */ + struct spinqueue *next; + struct thread *lock_owner; + int spin; + int spinqueue_recurse; +}; + +void spinqueue_init(struct spinqueue *spq); +void spinqueue_lock(struct spinqueue **spq, struct spinqueue *self); +void spinqueue_unlock(struct spinqueue **spq, struct spinqueue *self); +int spinqueue_trylock(struct spinqueue **spq, struct spinqueue *self); + +#endif /* _SYS_SPINQUEUE_H_ */