diff --git a/sys/conf/files b/sys/conf/files index 57de9ca..854dd63 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -2380,6 +2380,7 @@ kern/kern_poll.c optional device_polling kern/kern_priv.c standard kern/kern_proc.c standard kern/kern_prot.c standard +kern/kern_rangelock.c standard kern/kern_racct.c standard kern/kern_rctl.c standard kern/kern_resource.c standard diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c index 82a3692..03b1504 100644 --- a/sys/fs/nfsclient/nfs_clbio.c +++ b/sys/fs/nfsclient/nfs_clbio.c @@ -820,7 +820,10 @@ do_sync: t_uio->uio_segflg = UIO_SYSSPACE; t_uio->uio_rw = UIO_WRITE; t_uio->uio_td = td; - bcopy(uiop->uio_iov->iov_base, t_iov->iov_base, size); + error = copyin(uiop->uio_iov->iov_base, + t_iov->iov_base, size); + if (error != 0) + goto err_free; bp->b_flags |= B_DIRECT; bp->b_iocmd = BIO_WRITE; if (cred != NOCRED) { @@ -831,6 +834,7 @@ do_sync: bp->b_caller1 = (void *)t_uio; bp->b_vp = vp; error = ncl_asyncio(nmp, bp, NOCRED, td); +err_free: if (error) { free(t_iov->iov_base, M_NFSDIRECTIO); free(t_iov, M_NFSDIRECTIO); @@ -874,8 +878,9 @@ ncl_write(struct vop_write_args *ap) struct nfsmount *nmp = VFSTONFS(vp->v_mount); daddr_t lbn; int bcount; - int n, on, error = 0; - off_t tmp_off; + int bp_cached, n, on, error = 0; + size_t orig_resid, local_resid; + off_t orig_size, tmp_off; KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode")); KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, @@ -927,6 +932,11 @@ flush_and_restart: mtx_unlock(&np->n_mtx); } + orig_resid = uio->uio_resid; + mtx_lock(&np->n_mtx); + orig_size = np->n_size; + mtx_unlock(&np->n_mtx); + /* * If IO_APPEND then load uio_offset. We restart here if we cannot * get the append lock. @@ -1104,7 +1114,10 @@ again: * normally. */ + bp_cached = 1; if (on == 0 && n == bcount) { + if ((bp->b_flags & B_CACHE) == 0) + bp_cached = 0; bp->b_flags |= B_CACHE; bp->b_flags &= ~B_INVAL; bp->b_ioflags &= ~BIO_ERROR; @@ -1155,7 +1168,7 @@ again: * significant cache coherency problems with multiple clients, * especially if locking is implemented later on. * - * as an optimization we could theoretically maintain + * As an optimization we could theoretically maintain * a linked list of discontinuous areas, but we would still * have to commit them separately so there isn't much * advantage to it except perhaps a bit of asynchronization. @@ -1170,8 +1183,24 @@ again: goto again; } + local_resid = uio->uio_resid; error = uiomove((char *)bp->b_data + on, n, uio); + if (error != 0 && !bp_cached) { + /* + * This block has no other content then what + * possibly was written by faulty uiomove. + * Release it, forgetting the data pages, to + * prevent the leak of uninitialized data to + * usermode. + */ + bp->b_ioflags |= BIO_ERROR; + brelse(bp); + uio->uio_offset -= local_resid - uio->uio_resid; + uio->uio_resid = local_resid; + break; + } + /* * Since this block is being modified, it must be written * again and not just committed. Since write clustering does @@ -1180,17 +1209,18 @@ again: */ bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK); - if (error) { - bp->b_ioflags |= BIO_ERROR; - brelse(bp); - break; - } + /* + * Get the partial update on the progress made from + * uiomove, if error occured. + */ + if (error != 0) + n = local_resid - uio->uio_resid; /* * Only update dirtyoff/dirtyend if not a degenerate * condition. */ - if (n) { + if (n > 0) { if (bp->b_dirtyend > 0) { bp->b_dirtyoff = min(on, bp->b_dirtyoff); bp->b_dirtyend = max((on + n), bp->b_dirtyend); @@ -1219,8 +1249,22 @@ again: } else { bdwrite(bp); } + + if (error != 0) + break; } while (uio->uio_resid > 0 && n > 0); + if (error != 0) { + if (ioflag & IO_UNIT) { + VATTR_NULL(&vattr); + vattr.va_size = orig_size; + /* IO_SYNC is handled implicitely */ + (void)VOP_SETATTR(vp, &vattr, cred); + uio->uio_offset -= orig_resid - uio->uio_resid; + uio->uio_resid = orig_resid; + } + } + return (error); } diff --git a/sys/fs/nfsclient/nfs_clvfsops.c b/sys/fs/nfsclient/nfs_clvfsops.c index 04d8e67..ba9380a 100644 --- a/sys/fs/nfsclient/nfs_clvfsops.c +++ b/sys/fs/nfsclient/nfs_clvfsops.c @@ -1136,7 +1136,8 @@ nfs_mount(struct mount *mp) out: if (!error) { MNT_ILOCK(mp); - mp->mnt_kern_flag |= (MNTK_MPSAFE|MNTK_LOOKUP_SHARED); + mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | + MNTK_NO_IOPF; MNT_IUNLOCK(mp); } return (error); diff --git a/sys/kern/kern_rangelock.c b/sys/kern/kern_rangelock.c new file mode 100644 index 0000000..5e02717 --- /dev/null +++ b/sys/kern/kern_rangelock.c @@ -0,0 +1,246 @@ +/*- + * Copyright (c) 2009 Konstantin Belousov + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include +#include + +#include + +struct rl_q_entry { + TAILQ_ENTRY(rl_q_entry) rl_q_link; + off_t rl_q_start, rl_q_end; + int rl_q_flags; +}; + +static uma_zone_t rl_entry_zone; + +static void +rangelock_sys_init(void) +{ + + rl_entry_zone = uma_zcreate("rl_entry", sizeof(struct rl_q_entry), + NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0); +} +SYSINIT(vfs, SI_SUB_LOCK, SI_ORDER_ANY, rangelock_sys_init, NULL); + +static struct rl_q_entry * +rlqentry_alloc(void) +{ + + return (uma_zalloc(rl_entry_zone, M_WAITOK)); +} + +void +rlqentry_free(struct rl_q_entry *rleq) +{ + + uma_zfree(rl_entry_zone, rleq); +} + +void +rangelock_init(struct rangelock *lock) +{ + + TAILQ_INIT(&lock->rl_waiters); + lock->rl_currdep = NULL; +} + +void +rangelock_destroy(struct rangelock *lock) +{ + + KASSERT(TAILQ_EMPTY(&lock->rl_waiters), ("Dangling waiters")); +} + +/* + * Verifies the supplied rl_q_entries for compatibility. Returns true + * if rangelock queue entries are not compatible, false if they are. + * + * Two entries are compatible if their ranges do not overlap, or both + * entries are for read. + */ +static int +rangelock_incompatible(const struct rl_q_entry *e1, + const struct rl_q_entry *e2) +{ + + if ((e1->rl_q_flags & RL_LOCK_TYPE_MASK) == RL_LOCK_READ && + (e2->rl_q_flags & RL_LOCK_TYPE_MASK) == RL_LOCK_READ) + return (0); + if (e1->rl_q_start < e2->rl_q_end && e1->rl_q_end > e2->rl_q_start) + return (1); + return (0); +} + +/* + * Recalculate the lock->rl_currdep after an unlock. + */ +static void +rangelock_calc_block(struct rangelock *lock) +{ + struct rl_q_entry *entry, *entry1, *whead; + + if (lock->rl_currdep == TAILQ_FIRST(&lock->rl_waiters) && + lock->rl_currdep != NULL) + lock->rl_currdep = TAILQ_NEXT(lock->rl_currdep, rl_q_link); + for (entry = lock->rl_currdep; entry != NULL; + entry = TAILQ_NEXT(entry, rl_q_link)) { + TAILQ_FOREACH(entry1, &lock->rl_waiters, rl_q_link) { + if (rangelock_incompatible(entry, entry1)) + goto out; + if (entry1 == entry) + break; + } + } +out: + lock->rl_currdep = entry; + TAILQ_FOREACH(whead, &lock->rl_waiters, rl_q_link) { + if (whead == lock->rl_currdep) + break; + if (!(whead->rl_q_flags & RL_LOCK_GRANTED)) { + whead->rl_q_flags |= RL_LOCK_GRANTED; + wakeup(whead); + } + } +} + +static void +rangelock_unlock_locked(struct rangelock *lock, struct rl_q_entry *entry, + struct mtx *ilk) +{ + + MPASS(lock != NULL && entry != NULL && ilk != NULL); + mtx_assert(ilk, MA_OWNED); + KASSERT(entry != lock->rl_currdep, ("stuck currdep")); + + TAILQ_REMOVE(&lock->rl_waiters, entry, rl_q_link); + rangelock_calc_block(lock); + mtx_unlock(ilk); + if (curthread->td_rlqe == NULL) + curthread->td_rlqe = entry; + else + rlqentry_free(entry); +} + +void +rangelock_unlock(struct rangelock *lock, void *cookie, struct mtx *ilk) +{ + + MPASS(lock != NULL && cookie != NULL && ilk != NULL); + + mtx_lock(ilk); + rangelock_unlock_locked(lock, cookie, ilk); +} + +/* + * Unlock the sub-range of granted lock. + */ +void * +rangelock_unlock_range(struct rangelock *lock, void *cookie, off_t start, + off_t end, struct mtx *ilk) +{ + struct rl_q_entry *entry; + + MPASS(lock != NULL && cookie != NULL && ilk != NULL); + entry = cookie; + KASSERT(entry->rl_q_flags & RL_LOCK_GRANTED, + ("Unlocking non-granted lock")); + KASSERT(entry->rl_q_start == start, ("wrong start")); + KASSERT(entry->rl_q_end >= end, ("wrong end")); + + mtx_lock(ilk); + if (entry->rl_q_end == end) { + rangelock_unlock_locked(lock, cookie, ilk); + return (NULL); + } + entry->rl_q_end = end; + rangelock_calc_block(lock); + mtx_unlock(ilk); + return (cookie); +} + +/* + * Add the lock request to the queue of the pending requests for + * rangelock. Sleeps until the request can be granted. + */ +static void * +rangelock_enqueue(struct rangelock *lock, off_t start, off_t end, int mode, + struct mtx *ilk) +{ + struct rl_q_entry *entry; + struct thread *td; + + MPASS(lock != NULL && ilk != NULL); + + td = curthread; + if (td->td_rlqe != NULL) { + entry = td->td_rlqe; + td->td_rlqe = NULL; + } else + entry = rlqentry_alloc(); + MPASS(entry != NULL); + entry->rl_q_flags = mode; + entry->rl_q_start = start; + entry->rl_q_end = end; + + mtx_lock(ilk); + /* + * XXXKIB TODO. Check that thread does not try to enqueue a + * lock which is incompatible with other request from the same + * thread. + */ + + TAILQ_INSERT_TAIL(&lock->rl_waiters, entry, rl_q_link); + if (lock->rl_currdep == NULL) + lock->rl_currdep = entry; + rangelock_calc_block(lock); + while (!(entry->rl_q_flags & RL_LOCK_GRANTED)) + msleep(entry, ilk, 0, "range", 0); + mtx_unlock(ilk); + return (entry); +} + +void * +rangelock_rlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) +{ + + return (rangelock_enqueue(lock, start, end, RL_LOCK_READ, ilk)); +} + +void * +rangelock_wlock(struct rangelock *lock, off_t start, off_t end, struct mtx *ilk) +{ + + return (rangelock_enqueue(lock, start, end, RL_LOCK_WRITE, ilk)); +} diff --git a/sys/kern/kern_thread.c b/sys/kern/kern_thread.c index d4c5c4c..8116c15 100644 --- a/sys/kern/kern_thread.c +++ b/sys/kern/kern_thread.c @@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -199,6 +200,7 @@ thread_init(void *mem, int size, int flags) td->td_sleepqueue = sleepq_alloc(); td->td_turnstile = turnstile_alloc(); + td->td_rlqe = NULL; EVENTHANDLER_INVOKE(thread_init, td); td->td_sched = (struct td_sched *)&td[1]; umtx_thread_init(td); @@ -216,6 +218,7 @@ thread_fini(void *mem, int size) td = (struct thread *)mem; EVENTHANDLER_INVOKE(thread_fini, td); + rlqentry_free(td->td_rlqe); turnstile_free(td->td_turnstile); sleepq_free(td->td_sleepqueue); umtx_thread_fini(td); diff --git a/sys/kern/subr_syscall.c b/sys/kern/subr_syscall.c index 9c4dd48..d04593e 100644 --- a/sys/kern/subr_syscall.c +++ b/sys/kern/subr_syscall.c @@ -181,6 +181,12 @@ syscallret(struct thread *td, int error, struct syscall_args *sa __unused) KASSERT(td->td_locks == 0, ("System call %s returning with %d locks held", syscallname(p, sa->code), td->td_locks)); + KASSERT((td->td_pflags & TDP_NOFAULTING) == 0, + ("System call %s returning with pagefaults disabled", + syscallname(p, sa->code))); + KASSERT((td->td_pflags & TDP_NOSLEEPING) == 0, + ("System call %s returning with sleep disabled", + syscallname(p, sa->code))); /* * Handle reschedule and other end-of-syscall issues diff --git a/sys/kern/vfs_subr.c b/sys/kern/vfs_subr.c index 28562e6..37226da 100644 --- a/sys/kern/vfs_subr.c +++ b/sys/kern/vfs_subr.c @@ -913,6 +913,7 @@ vdestroy(struct vnode *vp) /* XXX Elsewhere we can detect an already freed vnode via NULL v_op. */ vp->v_op = NULL; #endif + rangelock_destroy(&vp->v_rl); lockdestroy(vp->v_vnlock); mtx_destroy(&vp->v_interlock); mtx_destroy(BO_MTX(bo)); @@ -1067,6 +1068,7 @@ alloc: if ((mp->mnt_kern_flag & MNTK_NOKNOTE) != 0) vp->v_vflag |= VV_NOKNOTE; } + rangelock_init(&vp->v_rl); *vpp = vp; return (0); diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c index f94bc12..f31a193 100644 --- a/sys/kern/vfs_vnops.c +++ b/sys/kern/vfs_vnops.c @@ -65,10 +65,15 @@ __FBSDID("$FreeBSD$"); #include #include +#include +#include +#include #include +#include static fo_rdwr_t vn_read; static fo_rdwr_t vn_write; +static fo_rdwr_t vn_io_fault; static fo_truncate_t vn_truncate; static fo_ioctl_t vn_ioctl; static fo_poll_t vn_poll; @@ -77,8 +82,8 @@ static fo_stat_t vn_statfile; static fo_close_t vn_closefile; struct fileops vnops = { - .fo_read = vn_read, - .fo_write = vn_write, + .fo_read = vn_io_fault, + .fo_write = vn_io_fault, .fo_truncate = vn_truncate, .fo_ioctl = vn_ioctl, .fo_poll = vn_poll, @@ -362,57 +367,56 @@ sequential_heuristic(struct uio *uio, struct file *fp) * Package up an I/O request on a vnode into a uio and do it. */ int -vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, - aresid, td) - enum uio_rw rw; - struct vnode *vp; - void *base; - int len; - off_t offset; - enum uio_seg segflg; - int ioflg; - struct ucred *active_cred; - struct ucred *file_cred; - int *aresid; - struct thread *td; +vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, + enum uio_seg segflg, int ioflg, struct ucred *active_cred, + struct ucred *file_cred, int *aresid, struct thread *td) { struct uio auio; struct iovec aiov; struct mount *mp; struct ucred *cred; + void *rl_cookie; int error, lock_flags; VFS_ASSERT_GIANT(vp->v_mount); + auio.uio_iov = &aiov; + auio.uio_iovcnt = 1; + aiov.iov_base = base; + aiov.iov_len = len; + auio.uio_resid = len; + auio.uio_offset = offset; + auio.uio_segflg = segflg; + auio.uio_rw = rw; + auio.uio_td = td; + error = 0; + if ((ioflg & IO_NODELOCKED) == 0) { + if (rw == UIO_READ) { + rl_cookie = vn_rangelock_rlock(vp, offset, + offset + len); + } else { + rl_cookie = vn_rangelock_wlock(vp, offset, + offset + len); + } mp = NULL; if (rw == UIO_WRITE) { if (vp->v_type != VCHR && (error = vn_start_write(vp, &mp, V_WAIT | PCATCH)) != 0) - return (error); + goto out; if (MNT_SHARED_WRITES(mp) || - ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) { + ((mp == NULL) && MNT_SHARED_WRITES(vp->v_mount))) lock_flags = LK_SHARED; - } else { + else lock_flags = LK_EXCLUSIVE; - } - vn_lock(vp, lock_flags | LK_RETRY); } else - vn_lock(vp, LK_SHARED | LK_RETRY); + lock_flags = LK_SHARED; + vn_lock(vp, lock_flags | LK_RETRY); + } else + rl_cookie = NULL; - } ASSERT_VOP_LOCKED(vp, "IO_NODELOCKED with no vp lock held"); - auio.uio_iov = &aiov; - auio.uio_iovcnt = 1; - aiov.iov_base = base; - aiov.iov_len = len; - auio.uio_resid = len; - auio.uio_offset = offset; - auio.uio_segflg = segflg; - auio.uio_rw = rw; - auio.uio_td = td; - error = 0; #ifdef MAC if ((ioflg & IO_NOMACCHECK) == 0) { if (rw == UIO_READ) @@ -424,7 +428,7 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, } #endif if (error == 0) { - if (file_cred) + if (file_cred != NULL) cred = file_cred; else cred = active_cred; @@ -439,10 +443,13 @@ vn_rdwr(rw, vp, base, len, offset, segflg, ioflg, active_cred, file_cred, if (auio.uio_resid && error == 0) error = EIO; if ((ioflg & IO_NODELOCKED) == 0) { - if (rw == UIO_WRITE && vp->v_type != VCHR) - vn_finished_write(mp); VOP_UNLOCK(vp, 0); + if (mp != NULL) + vn_finished_write(mp); } + out: + if (rl_cookie != NULL) + vn_rangelock_unlock(vp, rl_cookie); return (error); } @@ -683,29 +690,191 @@ unlock: return (error); } +static const int io_hold_cnt = 16; + +/* + * The vn_io_fault() is a wrapper around vn_read() and vn_write() to + * prevent the following deadlock: + * + * Assume that the thread A reads from the vnode vp1 into userspace + * buffer buf1 backed by the pages of vnode vp2. If a page in buf1 is + * currently not resident, then system ends up with the call chain + * vn_read() -> VOP_READ(vp1) -> uiomove() -> [Page Fault] -> + * vm_fault(buf1) -> vnode_pager_getpages(vp2) -> VOP_GETPAGES(vp2) + * which establishes lock order vp1->vn_lock, then vp2->vn_lock. + * If, at the same time, thread B reads from vnode vp2 into buffer buf2 + * backed by the pages of vnode vp1, and some page in buf2 is not + * resident, we get a reversed order vp2->vn_lock, then vp1->vn_lock. + * + * To prevent the lock order reversal and deadlock, vn_io_fault() does + * not allow page faults to happen during VOP_READ() or VOP_WRITE(). + * Instead, it first tries to do the whole range i/o with pagefaults + * disabled. If all pages in the i/o buffer are resident and mapped, + * VOP will succeed (ignoring the genuine filesystem errors). + * Otherwise, we get back EFAULT, and vn_io_fault() falls back to do + * i/o in chunks, with all pages in the chunk prefaulted and held + * using vm_fault_quick_hold_pages(). + * + * Note that other thread might unmap the buffer' address range, or + * create other mapping over the buffer, which causes the faults on + * access to the buffer. During the chunked i/o, we still run with + * the disabled page faults, and treat EFAULT as any other error. + * + * Since vnode locks do not cover the whole i/o anymore , rangelocks + * make the current i/o request atomic with respect to other i/os and + * truncations, + */ +static int +vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred, + int flags, struct thread *td) +{ + vm_page_t ma[io_hold_cnt + 2]; + struct uio *uio_clone, short_uio; + struct iovec short_iovec[1]; + fo_rdwr_t *doio; + struct vnode *vp; + void *rl_cookie; + struct mount *mp; + int cnt, error, save; + vm_offset_t addr, end; + vm_prot_t prot; + size_t len, resid; + ssize_t adv; + + if (uio->uio_rw == UIO_READ) + doio = vn_read; + else + doio = vn_write; + vp = fp->f_vnode; + if (uio->uio_segflg != UIO_USERSPACE || vp->v_type != VREG || + ((mp = vp->v_mount) != NULL && + (mp->mnt_kern_flag & MNTK_NO_IOPF) == 0)) + return (doio(fp, uio, active_cred, flags, td)); + + /* + * The UFS follows IO_UNIT directive and replays back both + * uio_offset and uio_resid if error encountered during the + * operation. But, since the iovec may be already advanced, + * uio is still in the inconsistent state. + * + * Cache a copy of the original uio, which is advanced to redo + * point using UIO_NOCOPY below. + */ + uio_clone = cloneuio(uio); + resid = uio->uio_resid; + + short_uio.uio_segflg = UIO_USERSPACE; + short_uio.uio_rw = uio->uio_rw; + short_uio.uio_td = uio->uio_td; + + if (uio->uio_rw == UIO_READ) { + prot = VM_PROT_WRITE; + rl_cookie = vn_rangelock_rlock(vp, uio->uio_offset, + uio->uio_offset + uio->uio_resid); + } else { + prot = VM_PROT_READ; + if ((fp->f_flag & O_APPEND) != 0 || (flags & FOF_OFFSET) == 0) + /* For appenders, punt and lock the whole range. */ + rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); + else + rl_cookie = vn_rangelock_wlock(vp, uio->uio_offset, + uio->uio_offset + uio->uio_resid); + } + + save = vm_fault_disable_pagefaults(); + error = doio(fp, uio, active_cred, flags, td); + if (error != EFAULT) + goto out; + + uio_clone->uio_segflg = UIO_NOCOPY; + uiomove(NULL, resid - uio->uio_resid, uio_clone); + uio_clone->uio_segflg = uio->uio_segflg; + + while (uio_clone->uio_resid != 0) { + len = uio_clone->uio_iov->iov_len; + if (len == 0) { + KASSERT(uio_clone->uio_iovcnt >= 1, + ("iovcnt underflow")); + uio_clone->uio_iov++; + uio_clone->uio_iovcnt--; + continue; + } + + addr = (vm_offset_t)uio_clone->uio_iov->iov_base; + end = round_page(addr + len); + cnt = howmany(end - trunc_page(addr), PAGE_SIZE); + /* + * Perfectly misaligned address and lenght could cause + * both start and end of the chunk to use partial + * page. +2 accounts for such situation. + */ + if (cnt > io_hold_cnt + 2) { + len = io_hold_cnt * PAGE_SIZE; + KASSERT(howmany(round_page(addr + len) - + trunc_page(addr), PAGE_SIZE) <= io_hold_cnt + 2, + ("cnt overflow")); + } + cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map, + addr, len, prot, ma, io_hold_cnt + 2); + if (cnt == -1) { + error = EFAULT; + break; + } + short_uio.uio_iov = &short_iovec[0]; + short_iovec[0].iov_base = (void *)addr; + short_uio.uio_iovcnt = 1; + short_uio.uio_resid = short_iovec[0].iov_len = len; + short_uio.uio_offset = uio_clone->uio_offset; + + error = doio(fp, &short_uio, active_cred, flags, td); + vm_page_unhold_pages(ma, cnt); + adv = len - short_uio.uio_resid; + + uio_clone->uio_iov->iov_base = + (char *)uio_clone->uio_iov->iov_base + adv; + uio_clone->uio_iov->iov_len -= adv; + uio_clone->uio_resid -= adv; + uio_clone->uio_offset += adv; + + uio->uio_resid -= adv; + uio->uio_offset += adv; + + if (error != 0 || adv == 0) + break; + } +out: + vm_fault_enable_pagefaults(save); + vn_rangelock_unlock(vp, rl_cookie); + free(uio_clone, M_IOV); + return (error); +} + /* * File table truncate routine. */ static int -vn_truncate(fp, length, active_cred, td) - struct file *fp; - off_t length; - struct ucred *active_cred; - struct thread *td; +vn_truncate(struct file *fp, off_t length, struct ucred *active_cred, + struct thread *td) { struct vattr vattr; struct mount *mp; struct vnode *vp; + void *rl_cookie; int vfslocked; int error; vp = fp->f_vnode; + + /* + * Lock the whole range for truncation. Otherwise splitted + * i/o might partially happen before, partially after the + * truncation. + */ + rl_cookie = vn_rangelock_wlock(vp, 0, OFF_MAX); vfslocked = VFS_LOCK_GIANT(vp->v_mount); error = vn_start_write(vp, &mp, V_WAIT | PCATCH); - if (error) { - VFS_UNLOCK_GIANT(vfslocked); - return (error); - } + if (error) + goto out1; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); if (vp->v_type == VDIR) { error = EISDIR; @@ -725,7 +894,9 @@ vn_truncate(fp, length, active_cred, td) out: VOP_UNLOCK(vp, 0); vn_finished_write(mp); +out1: VFS_UNLOCK_GIANT(vfslocked); + vn_rangelock_unlock(vp, rl_cookie); return (error); } diff --git a/sys/sys/mount.h b/sys/sys/mount.h index f68a046..9eff919 100644 --- a/sys/sys/mount.h +++ b/sys/sys/mount.h @@ -325,6 +325,9 @@ void __mnt_vnode_markerfree(struct vnode **mvp, struct mount *mp); #define MNTK_REFEXPIRE 0x00000020 /* refcount expiring is happening */ #define MNTK_EXTENDED_SHARED 0x00000040 /* Allow shared locking for more ops */ #define MNTK_SHARED_WRITES 0x00000080 /* Allow shared locking for writes */ +#define MNTK_NO_IOPF 0x00000100 /* Disallow page faults during reads + and writes. Filesystem shall properly + handle i/o state on EFAULT. */ #define MNTK_UNMOUNT 0x01000000 /* unmount in progress */ #define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */ #define MNTK_SUSPEND 0x08000000 /* request write suspension */ diff --git a/sys/sys/proc.h b/sys/sys/proc.h index 0245e88..fc34dd0 100644 --- a/sys/sys/proc.h +++ b/sys/sys/proc.h @@ -213,6 +213,7 @@ struct thread { struct seltd *td_sel; /* Select queue/channel. */ struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */ struct turnstile *td_turnstile; /* (k) Associated turnstile. */ + struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */ struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */ lwpid_t td_tid; /* (b) Thread ID. */ sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */ diff --git a/sys/sys/rangelock.h b/sys/sys/rangelock.h new file mode 100644 index 0000000..bf82183 --- /dev/null +++ b/sys/sys/rangelock.h @@ -0,0 +1,78 @@ +/*- + * Copyright (c) 2009 Konstantin Belousov + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice unmodified, this list of conditions, and the following + * disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _SYS_RANGELOCK_H +#define _SYS_RANGELOCK_H + +#include + +#define RL_LOCK_READ 0x0001 +#define RL_LOCK_WRITE 0x0002 +#define RL_LOCK_TYPE_MASK 0x0003 +#define RL_LOCK_GRANTED 0x0004 + +struct rl_q_entry; + +/* + * The structure representing the range lock. Caller may request the + * read or write access to the range of bytes. Access is granted if + * all existing lock owners are compatible with the request. Two lock + * owners are compatible if their ranges do not overlap, or both + * owners are for read. + * + * Access to the structure itself is synchronized with the externally + * supplied mutex. + * + * rl_waiters is the queue of lock requests in the order of arrival. + * rl_currdep is the first lock request that cannot be granted now due + * to the preceeding requests conflicting with it. + */ +struct rangelock { + TAILQ_HEAD(, rl_q_entry) rl_waiters; + struct rl_q_entry *rl_currdep; +}; + +#ifdef _KERNEL + +struct mtx; + +void rangelock_init(struct rangelock *lock); +void rangelock_destroy(struct rangelock *lock); +void rangelock_unlock(struct rangelock *lock, void *cookie, + struct mtx *ilk); +void *rangelock_unlock_range(struct rangelock *lock, void *cookie, + off_t start, off_t end, struct mtx *ilk); +void *rangelock_rlock(struct rangelock *lock, off_t start, off_t end, + struct mtx *ilk); +void *rangelock_wlock(struct rangelock *lock, off_t start, off_t end, + struct mtx *ilk); +void rlqentry_free(struct rl_q_entry *rlqe); + +#endif /* _KERNEL */ + +#endif /* _SYS_RANGELOCK_H */ diff --git a/sys/sys/vnode.h b/sys/sys/vnode.h index 4fd5a28..1fe5a1b 100644 --- a/sys/sys/vnode.h +++ b/sys/sys/vnode.h @@ -38,6 +38,7 @@ #include #include #include +#include #include #include #include @@ -164,7 +165,8 @@ struct vnode { */ struct vpollinfo *v_pollinfo; /* i Poll events, p for *v_pi */ struct label *v_label; /* MAC label for vnode */ - struct lockf *v_lockf; /* Byte-level lock list */ + struct lockf *v_lockf; /* Byte-level adv lock list */ + struct rangelock v_rl; /* Byte-range lock */ }; #endif /* defined(_KERNEL) || defined(_KVM_VNODE) */ @@ -676,6 +678,15 @@ int vn_extattr_rm(struct vnode *vp, int ioflg, int attrnamespace, int vn_vget_ino(struct vnode *vp, ino_t ino, int lkflags, struct vnode **rvp); +#define vn_rangelock_unlock(vp, cookie) \ + rangelock_unlock(&(vp)->v_rl, (cookie), VI_MTX(vp)) +#define vn_rangelock_unlock_range(vp, cookie, start, end) \ + rangelock_unlock_range(&(vp)->v_rl, (cookie), (start), (end), \ + VI_MTX(vp)) +#define vn_rangelock_rlock(vp, start, end) \ + rangelock_rlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) +#define vn_rangelock_wlock(vp, start, end) \ + rangelock_wlock(&(vp)->v_rl, (start), (end), VI_MTX(vp)) int vfs_cache_lookup(struct vop_lookup_args *ap); void vfs_timestamp(struct timespec *); diff --git a/sys/ufs/ffs/ffs_vfsops.c b/sys/ufs/ffs/ffs_vfsops.c index a12fd83..e9282f7 100644 --- a/sys/ufs/ffs/ffs_vfsops.c +++ b/sys/ufs/ffs/ffs_vfsops.c @@ -1059,7 +1059,7 @@ ffs_mountfs(devvp, mp, td) */ MNT_ILOCK(mp); mp->mnt_kern_flag |= MNTK_MPSAFE | MNTK_LOOKUP_SHARED | - MNTK_EXTENDED_SHARED; + MNTK_EXTENDED_SHARED | MNTK_NO_IOPF; MNT_IUNLOCK(mp); #ifdef UFS_EXTATTR #ifdef UFS_EXTATTR_AUTOSTART