--- comparison/sys/kern/kern_thread.c 2010-07-07 22:27:18.000000000 +0200 +++ vm6/sys/kern/kern_thread.c 2010-07-05 03:59:21.000000000 +0200 @@ -164,6 +164,7 @@ thread_init(void *mem, int size, int fla td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); + td->td_rlqe = rlqentry_alloc(); EVENTHANDLER_INVOKE(thread_init, td); td->td_sched = (struct td_sched *)&td[1]; umtx_thread_init(td); @@ -181,6 +182,7 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; EVENTHANDLER_INVOKE(thread_fini, td); + rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); --- comparison/sys/kern/kern_rangelock.c 2010-07-08 18:29:52.000000000 +0200 +++ vm6/sys/kern/kern_rangelock.c 2010-07-07 00:16:51.000000000 +0200 @@ -33,7 +33,13 @@ __FBSDID("$FreeBSD$"); #include #include -uma_zone_t rl_entry_zone; +struct rl_q_entry { + TAILQ_ENTRY(rl_q_entry) rl_q_link; + size_t rl_q_start, rl_q_end; + int rl_q_flags; +}; + +static uma_zone_t rl_entry_zone; static void rangelock_sys_init(void) @@ -41,9 +47,24 @@ rangelock_sys_init(void) rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry), NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); + thread0.td_rlqe = rlqentry_alloc(); } SYSINIT(vfs, SI_SUB_VFS, SI_ORDER_ANY, rangelock_sys_init, NULL); +struct rl_q_entry * +rlqentry_alloc() +{ + + return (uma_zalloc(rl_entry_zone, M_WAITOK)); +} + +void +rlqentry_free(struct rl_q_entry *rleq) +{ + + uma_zfree(rl_entry_zone, rleq); +} + void rangelock_init(struct rangelock *lock) { @@ -113,7 +134,10 @@ rangelock_unlock_vp_locked(struct vnode TAILQ_REMOVE(&vp->v_rl.rl_waiters, entry, rl_q_link); rangelock_calc_block(&vp->v_rl); VI_UNLOCK(vp); - uma_zfree(rl_entry_zone, entry); + if (td->td_rlqe == NULL) + td->td_rlqe = entry; + else + rlqentry_free(entry); } void @@ -166,7 +190,11 @@ rangelock_rlock(struct vnode *vp, off_t { struct rl_q_entry *entry; - entry = uma_zalloc(rl_entry_zone, M_WAITOK); + if (td->td_rlqe != NULL) { + entry = td->td_rlqe; + td->td_rlqe = NULL; + } else + entry = rlqentry_alloc(); entry->rl_q_flags = RL_LOCK_READ; entry->rl_q_start = base; entry->rl_q_end = base + len; @@ -178,7 +206,11 @@ rangelock_wlock(struct vnode *vp, off_t { struct rl_q_entry *entry; - entry = uma_zalloc(rl_entry_zone, M_WAITOK); + if (td->td_rlqe != NULL) { + entry = td->td_rlqe; + td->td_rlqe = NULL; + } else + entry = rlqentry_alloc(); entry->rl_q_flags = RL_LOCK_WRITE; entry->rl_q_start = base; entry->rl_q_end = base + len; --- comparison/sys/sys/rangelock.h 2010-07-08 18:29:53.000000000 +0200 +++ vm6/sys/sys/rangelock.h 2010-07-07 00:15:19.000000000 +0200 @@ -16,33 +16,30 @@ #ifdef _KERNEL -struct vnode; - -struct rl_q_entry -{ - TAILQ_ENTRY(rl_q_entry) rl_q_link; - size_t rl_q_start, rl_q_end; - int rl_q_flags; -}; - #define RL_LOCK_READ 0x0001 #define RL_LOCK_WRITE 0x0002 #define RL_LOCK_TYPE_MASK 0x0003 #define RL_LOCK_GRANTED 0x0004 +struct vnode; +struct rl_q_entry; + struct rangelock { TAILQ_HEAD(, rl_q_entry) rl_waiters; - struct rl_q_entry *rl_currdep; + struct rl_q_entry *rl_currdep; }; -void rangelock_init(struct rangelock *lock); -void rangelock_destroy(struct rangelock *lock); -void rangelock_unlock(struct vnode *vp, void *cookie); -void *rangelock_unlock_range(struct vnode *vp, void *cookie, off_t base, - size_t len); -void *rangelock_rlock(struct vnode *vp, off_t base, size_t len); -void *rangelock_wlock(struct vnode *vp, off_t base, size_t len); +void rangelock_init(struct rangelock *lock); +void rangelock_destroy(struct rangelock *lock); +void rangelock_unlock(struct vnode *vp, void *cookie); +void *rangelock_unlock_range(struct vnode *vp, void *cookie, off_t base, + size_t len); +void *rangelock_rlock(struct vnode *vp, off_t base, size_t len); +void *rangelock_wlock(struct vnode *vp, off_t base, size_t len); + +struct rl_q_entry *rlqentry_alloc(void); +void rlqentry_free(struct rl_q_entry *rlqe); #endif #endif --- comparison/sys/sys/proc.h 2010-07-08 18:29:53.000000000 +0200 +++ vm6/sys/sys/proc.h 2010-07-05 03:57:14.000000000 +0200 @@ -209,6 +209,7 @@ struct thread { struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ + struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */