aboutsummaryrefslogtreecommitdiff
path: root/sys/fs
diff options
context:
space:
mode:
Diffstat (limited to 'sys/fs')
-rw-r--r--sys/fs/nfsclient/nfs_clbio.c56
1 files changed, 48 insertions, 8 deletions
diff --git a/sys/fs/nfsclient/nfs_clbio.c b/sys/fs/nfsclient/nfs_clbio.c
index 153f81a007b9..4369912653b1 100644
--- a/sys/fs/nfsclient/nfs_clbio.c
+++ b/sys/fs/nfsclient/nfs_clbio.c
@@ -897,8 +897,9 @@ ncl_write(struct vop_write_args *ap)
struct nfsmount *nmp = VFSTONFS(vp->v_mount);
daddr_t lbn;
int bcount;
- int n, on, error = 0;
- off_t tmp_off;
+ int bp_cached, n, on, error = 0;
+ size_t orig_resid, local_resid;
+ off_t orig_size, tmp_off;
KASSERT(uio->uio_rw == UIO_WRITE, ("ncl_write mode"));
KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread,
@@ -950,6 +951,11 @@ flush_and_restart:
mtx_unlock(&np->n_mtx);
}
+ orig_resid = uio->uio_resid;
+ mtx_lock(&np->n_mtx);
+ orig_size = np->n_size;
+ mtx_unlock(&np->n_mtx);
+
/*
* If IO_APPEND then load uio_offset. We restart here if we cannot
* get the append lock.
@@ -1127,7 +1133,10 @@ again:
* normally.
*/
+ bp_cached = 1;
if (on == 0 && n == bcount) {
+ if ((bp->b_flags & B_CACHE) == 0)
+ bp_cached = 0;
bp->b_flags |= B_CACHE;
bp->b_flags &= ~B_INVAL;
bp->b_ioflags &= ~BIO_ERROR;
@@ -1193,8 +1202,24 @@ again:
goto again;
}
+ local_resid = uio->uio_resid;
error = uiomove((char *)bp->b_data + on, n, uio);
+ if (error != 0 && !bp_cached) {
+ /*
+ * This block has no other content then what
+ * possibly was written by the faulty uiomove.
+ * Release it, forgetting the data pages, to
+ * prevent the leak of uninitialized data to
+ * usermode.
+ */
+ bp->b_ioflags |= BIO_ERROR;
+ brelse(bp);
+ uio->uio_offset -= local_resid - uio->uio_resid;
+ uio->uio_resid = local_resid;
+ break;
+ }
+
/*
* Since this block is being modified, it must be written
* again and not just committed. Since write clustering does
@@ -1203,17 +1228,18 @@ again:
*/
bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);
- if (error) {
- bp->b_ioflags |= BIO_ERROR;
- brelse(bp);
- break;
- }
+ /*
+ * Get the partial update on the progress made from
+ * uiomove, if an error occured.
+ */
+ if (error != 0)
+ n = local_resid - uio->uio_resid;
/*
* Only update dirtyoff/dirtyend if not a degenerate
* condition.
*/
- if (n) {
+ if (n > 0) {
if (bp->b_dirtyend > 0) {
bp->b_dirtyoff = min(on, bp->b_dirtyoff);
bp->b_dirtyend = max((on + n), bp->b_dirtyend);
@@ -1242,8 +1268,22 @@ again:
} else {
bdwrite(bp);
}
+
+ if (error != 0)
+ break;
} while (uio->uio_resid > 0 && n > 0);
+ if (error != 0) {
+ if (ioflag & IO_UNIT) {
+ VATTR_NULL(&vattr);
+ vattr.va_size = orig_size;
+ /* IO_SYNC is handled implicitely */
+ (void)VOP_SETATTR(vp, &vattr, cred);
+ uio->uio_offset -= orig_resid - uio->uio_resid;
+ uio->uio_resid = orig_resid;
+ }
+ }
+
return (error);
}