aboutsummaryrefslogtreecommitdiff
path: root/sys/kern/vfs_vnops.c
diff options
context:
space:
mode:
authorKonstantin Belousov <kib@FreeBSD.org>2013-11-20 08:45:26 +0000
committerKonstantin Belousov <kib@FreeBSD.org>2013-11-20 08:45:26 +0000
commit7e14088d93b50525315bbc05c000357aa61917e7 (patch)
tree41810a15d0682deabcae46627f1aca2d0e0da675 /sys/kern/vfs_vnops.c
parenta2c0f474c4d419d7b03ff8b645e309f5ebda31b3 (diff)
downloadsrc-7e14088d93b50525315bbc05c000357aa61917e7.tar.gz
src-7e14088d93b50525315bbc05c000357aa61917e7.zip
Revert back to use int for the page counts. In vn_io_fault(), the i/o
is chunked to pieces limited by integer io_hold_cnt tunable, while vm_fault_quick_hold_pages() takes integer max_count as the upper bound. Rearrange the checks to correctly handle overflowing address arithmetic. Submitted by: bde Tested by: pho Discussed with: alc MFC after: 1 week
Notes
Notes: svn path=/head/; revision=258365
Diffstat (limited to 'sys/kern/vfs_vnops.c')
-rw-r--r--sys/kern/vfs_vnops.c20
1 files changed, 9 insertions, 11 deletions
diff --git a/sys/kern/vfs_vnops.c b/sys/kern/vfs_vnops.c
index 1e34bd50cb96..da4a914be35a 100644
--- a/sys/kern/vfs_vnops.c
+++ b/sys/kern/vfs_vnops.c
@@ -933,9 +933,8 @@ vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
void *rl_cookie;
struct mount *mp;
vm_page_t *prev_td_ma;
- int error, save, saveheld, prev_td_ma_cnt;
+ int error, cnt, save, saveheld, prev_td_ma_cnt;
vm_offset_t addr, end;
- vm_size_t cnt;
vm_prot_t prot;
size_t len, resid;
ssize_t adv;
@@ -1008,21 +1007,20 @@ vn_io_fault(struct file *fp, struct uio *uio, struct ucred *active_cred,
uio_clone->uio_iovcnt--;
continue;
}
-
- addr = (vm_offset_t)uio_clone->uio_iov->iov_base;
+ if (len > io_hold_cnt * PAGE_SIZE)
+ len = io_hold_cnt * PAGE_SIZE;
+ addr = (uintptr_t)uio_clone->uio_iov->iov_base;
end = round_page(addr + len);
- cnt = howmany(end - trunc_page(addr), PAGE_SIZE);
+ if (end < addr) {
+ error = EFAULT;
+ break;
+ }
+ cnt = atop(end - trunc_page(addr));
/*
* A perfectly misaligned address and length could cause
* both the start and the end of the chunk to use partial
* page. +2 accounts for such a situation.
*/
- if (cnt > io_hold_cnt + 2) {
- len = io_hold_cnt * PAGE_SIZE;
- KASSERT(howmany(round_page(addr + len) -
- trunc_page(addr), PAGE_SIZE) <= io_hold_cnt + 2,
- ("cnt overflow"));
- }
cnt = vm_fault_quick_hold_pages(&td->td_proc->p_vmspace->vm_map,
addr, len, prot, ma, io_hold_cnt + 2);
if (cnt == -1) {