aboutsummaryrefslogtreecommitdiff
path: root/sys
diff options
context:
space:
mode:
authorMatthew Dillon <dillon@FreeBSD.org>2003-01-15 23:54:35 +0000
committerMatthew Dillon <dillon@FreeBSD.org>2003-01-15 23:54:35 +0000
commitf5979003290ee119a14b29c2c99bbce53f196942 (patch)
treee69a67ab93e65ba985b3b22b14b0ac7873b09ec6 /sys
parent603e003ba0baadd7eae95bfc97f7fb5b9fea4ed8 (diff)
downloadsrc-f5979003290ee119a14b29c2c99bbce53f196942.tar.gz
src-f5979003290ee119a14b29c2c99bbce53f196942.zip
Merge all the various copies of vmapbuf() and vunmapbuf() into a single
portable copy. Note that pmap_extract() must be used instead of pmap_kextract(). This is precursor work to a reorganization of vmapbuf() to close remaining user/kernel races (which can lead to a panic).
Notes
Notes: svn path=/head/; revision=109340
Diffstat (limited to 'sys')
-rw-r--r--sys/alpha/alpha/vm_machdep.c69
-rw-r--r--sys/amd64/amd64/vm_machdep.c74
-rw-r--r--sys/i386/i386/vm_machdep.c74
-rw-r--r--sys/ia64/ia64/vm_machdep.c69
-rw-r--r--sys/kern/vfs_bio.c76
-rw-r--r--sys/powerpc/aim/vm_machdep.c74
-rw-r--r--sys/powerpc/powerpc/vm_machdep.c74
-rw-r--r--sys/sparc64/sparc64/vm_machdep.c73
8 files changed, 76 insertions, 507 deletions
diff --git a/sys/alpha/alpha/vm_machdep.c b/sys/alpha/alpha/vm_machdep.c
index 8fdc15b07770..0ab8ca09f8b6 100644
--- a/sys/alpha/alpha/vm_machdep.c
+++ b/sys/alpha/alpha/vm_machdep.c
@@ -338,75 +338,6 @@ cpu_wait(p)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr, v, kva;
- vm_offset_t pa;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, v += PAGE_SIZE) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- vm_page_hold(PHYS_TO_VM_PAGE(pa));
- pmap_kenter((vm_offset_t) v, pa);
- }
-
- kva = bp->b_saveaddr;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr;
- vm_offset_t pa;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- vm_page_lock_queues();
- for (addr = (caddr_t)trunc_page(bp->b_data);
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE) {
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- pmap_kremove((vm_offset_t) addr);
- vm_page_unhold(PHYS_TO_VM_PAGE(pa));
- }
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Reset back to firmware.
*/
void
diff --git a/sys/amd64/amd64/vm_machdep.c b/sys/amd64/amd64/vm_machdep.c
index 4f1734a4a0ab..eaa117430795 100644
--- a/sys/amd64/amd64/vm_machdep.c
+++ b/sys/amd64/amd64/vm_machdep.c
@@ -436,80 +436,6 @@ kvtop(void *addr)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr, kva;
- vm_offset_t pa;
- int pidx;
- struct vm_page *m;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, pidx++) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_hold(m);
- bp->b_pages[pidx] = m;
- }
- if (pidx > btoc(MAXPHYS))
- panic("vmapbuf: mapped more than MAXPHYS");
- pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
-
- kva = bp->b_saveaddr;
- bp->b_npages = pidx;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(bp)
- register struct buf *bp;
-{
- int pidx;
- int npages;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
- vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Force reset the processor by invalidating the entire address space!
*/
diff --git a/sys/i386/i386/vm_machdep.c b/sys/i386/i386/vm_machdep.c
index 4f1734a4a0ab..eaa117430795 100644
--- a/sys/i386/i386/vm_machdep.c
+++ b/sys/i386/i386/vm_machdep.c
@@ -436,80 +436,6 @@ kvtop(void *addr)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr, kva;
- vm_offset_t pa;
- int pidx;
- struct vm_page *m;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, pidx++) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_hold(m);
- bp->b_pages[pidx] = m;
- }
- if (pidx > btoc(MAXPHYS))
- panic("vmapbuf: mapped more than MAXPHYS");
- pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
-
- kva = bp->b_saveaddr;
- bp->b_npages = pidx;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(bp)
- register struct buf *bp;
-{
- int pidx;
- int npages;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
- vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Force reset the processor by invalidating the entire address space!
*/
diff --git a/sys/ia64/ia64/vm_machdep.c b/sys/ia64/ia64/vm_machdep.c
index 6c64f587488b..d9488353c752 100644
--- a/sys/ia64/ia64/vm_machdep.c
+++ b/sys/ia64/ia64/vm_machdep.c
@@ -351,75 +351,6 @@ cpu_wait(p)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr, v, kva;
- vm_offset_t pa;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- for (v = bp->b_saveaddr, addr = (caddr_t)trunc_page(bp->b_data);
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, v += PAGE_SIZE) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- vm_page_hold(PHYS_TO_VM_PAGE(pa));
- pmap_kenter((vm_offset_t) v, pa);
- }
-
- kva = bp->b_saveaddr;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(bp)
- register struct buf *bp;
-{
- register caddr_t addr;
- vm_offset_t pa;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- vm_page_lock_queues();
- for (addr = (caddr_t)trunc_page(bp->b_data);
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE) {
- pa = trunc_page(pmap_kextract((vm_offset_t) addr));
- pmap_kremove((vm_offset_t) addr);
- vm_page_unhold(PHYS_TO_VM_PAGE(pa));
- }
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Force reset the processor by invalidating the entire address space!
*/
void
diff --git a/sys/kern/vfs_bio.c b/sys/kern/vfs_bio.c
index 3293ab433c62..01f17955bca3 100644
--- a/sys/kern/vfs_bio.c
+++ b/sys/kern/vfs_bio.c
@@ -3541,6 +3541,82 @@ vm_hold_free_pages(struct buf * bp, vm_offset_t from, vm_offset_t to)
bp->b_npages = newnpages;
}
+/*
+ * Map an IO request into kernel virtual address space.
+ *
+ * All requests are (re)mapped into kernel VA space.
+ * Notice that we use b_bufsize for the size of the buffer
+ * to be mapped. b_bcount might be modified by the driver.
+ */
+void
+vmapbuf(struct buf *bp)
+{
+ caddr_t addr, kva;
+ vm_offset_t pa;
+ int pidx;
+ struct vm_page *m;
+ struct pmap *pmap = &curproc->p_vmspace->vm_pmap;
+
+ GIANT_REQUIRED;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vmapbuf");
+
+ for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
+ addr < bp->b_data + bp->b_bufsize;
+ addr += PAGE_SIZE, pidx++) {
+ /*
+ * Do the vm_fault if needed; do the copy-on-write thing
+ * when reading stuff off device into memory.
+ *
+ * NOTE! Must use pmap_extract() because addr may be in
+ * the userland address space, and kextract is only guarenteed
+ * to work for the kernland address space (see: sparc64 port).
+ */
+ vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
+ (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
+ pa = trunc_page(pmap_extract(pmap, (vm_offset_t) addr));
+ if (pa == 0)
+ panic("vmapbuf: page not present");
+ m = PHYS_TO_VM_PAGE(pa);
+ vm_page_hold(m);
+ bp->b_pages[pidx] = m;
+ }
+ if (pidx > btoc(MAXPHYS))
+ panic("vmapbuf: mapped more than MAXPHYS");
+ pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
+
+ kva = bp->b_saveaddr;
+ bp->b_npages = pidx;
+ bp->b_saveaddr = bp->b_data;
+ bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
+}
+
+/*
+ * Free the io map PTEs associated with this IO operation.
+ * We also invalidate the TLB entries and restore the original b_addr.
+ */
+void
+vunmapbuf(struct buf *bp)
+{
+ int pidx;
+ int npages;
+
+ GIANT_REQUIRED;
+
+ if ((bp->b_flags & B_PHYS) == 0)
+ panic("vunmapbuf");
+
+ npages = bp->b_npages;
+ pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
+ npages);
+ vm_page_lock_queues();
+ for (pidx = 0; pidx < npages; pidx++)
+ vm_page_unhold(bp->b_pages[pidx]);
+ vm_page_unlock_queues();
+
+ bp->b_data = bp->b_saveaddr;
+}
#include "opt_ddb.h"
#ifdef DDB
diff --git a/sys/powerpc/aim/vm_machdep.c b/sys/powerpc/aim/vm_machdep.c
index c5e4fa5ef3c1..fc0a30f8918d 100644
--- a/sys/powerpc/aim/vm_machdep.c
+++ b/sys/powerpc/aim/vm_machdep.c
@@ -231,80 +231,6 @@ cpu_throw(void)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(struct buf *bp)
-{
- caddr_t addr, kva;
- vm_offset_t pa;
- int pidx;
- struct vm_page *m;
- pmap_t pmap;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- pmap = &curproc->p_vmspace->vm_pmap;
- for (addr = (caddr_t)trunc_page(bp->b_data), pidx = 0;
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, pidx++) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_extract(pmap, (vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_hold(m);
- bp->b_pages[pidx] = m;
- }
- if (pidx > btoc(MAXPHYS))
- panic("vmapbuf: mapped more than MAXPHYS");
- pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
-
- kva = bp->b_saveaddr;
- bp->b_npages = pidx;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(struct buf *bp)
-{
- int pidx;
- int npages;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
- vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Reset back to firmware.
*/
void
diff --git a/sys/powerpc/powerpc/vm_machdep.c b/sys/powerpc/powerpc/vm_machdep.c
index c5e4fa5ef3c1..fc0a30f8918d 100644
--- a/sys/powerpc/powerpc/vm_machdep.c
+++ b/sys/powerpc/powerpc/vm_machdep.c
@@ -231,80 +231,6 @@ cpu_throw(void)
}
/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(struct buf *bp)
-{
- caddr_t addr, kva;
- vm_offset_t pa;
- int pidx;
- struct vm_page *m;
- pmap_t pmap;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- pmap = &curproc->p_vmspace->vm_pmap;
- for (addr = (caddr_t)trunc_page(bp->b_data), pidx = 0;
- addr < bp->b_data + bp->b_bufsize;
- addr += PAGE_SIZE, pidx++) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ)?(VM_PROT_READ|VM_PROT_WRITE):VM_PROT_READ);
- pa = trunc_page(pmap_extract(pmap, (vm_offset_t) addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_hold(m);
- bp->b_pages[pidx] = m;
- }
- if (pidx > btoc(MAXPHYS))
- panic("vmapbuf: mapped more than MAXPHYS");
- pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
-
- kva = bp->b_saveaddr;
- bp->b_npages = pidx;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t) bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(struct buf *bp)
-{
- int pidx;
- int npages;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
- vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}
-
-/*
* Reset back to firmware.
*/
void
diff --git a/sys/sparc64/sparc64/vm_machdep.c b/sys/sparc64/sparc64/vm_machdep.c
index 344d7c5394c4..ad86559338e7 100644
--- a/sys/sparc64/sparc64/vm_machdep.c
+++ b/sys/sparc64/sparc64/vm_machdep.c
@@ -377,76 +377,3 @@ vm_fault_quick(caddr_t v, int prot)
return(r);
}
-/*
- * Map an IO request into kernel virtual address space.
- *
- * All requests are (re)mapped into kernel VA space.
- * Notice that we use b_bufsize for the size of the buffer
- * to be mapped. b_bcount might be modified by the driver.
- */
-void
-vmapbuf(struct buf *bp)
-{
- caddr_t addr, kva;
- vm_offset_t pa;
- int pidx;
- struct vm_page *m;
- pmap_t pmap;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vmapbuf");
-
- pmap = &curproc->p_vmspace->vm_pmap;
- for (addr = (caddr_t)trunc_page((vm_offset_t)bp->b_data), pidx = 0;
- addr < bp->b_data + bp->b_bufsize; addr += PAGE_SIZE, pidx++) {
- /*
- * Do the vm_fault if needed; do the copy-on-write thing
- * when reading stuff off device into memory.
- */
- vm_fault_quick((addr >= bp->b_data) ? addr : bp->b_data,
- (bp->b_iocmd == BIO_READ) ? (VM_PROT_READ | VM_PROT_WRITE) :
- VM_PROT_READ);
- pa = trunc_page(pmap_extract(pmap, (vm_offset_t)addr));
- if (pa == 0)
- panic("vmapbuf: page not present");
- m = PHYS_TO_VM_PAGE(pa);
- vm_page_hold(m);
- bp->b_pages[pidx] = m;
- }
- if (pidx > btoc(MAXPHYS))
- panic("vmapbuf: mapped more than MAXPHYS");
- pmap_qenter((vm_offset_t)bp->b_saveaddr, bp->b_pages, pidx);
-
- kva = bp->b_saveaddr;
- bp->b_npages = pidx;
- bp->b_saveaddr = bp->b_data;
- bp->b_data = kva + (((vm_offset_t)bp->b_data) & PAGE_MASK);
-}
-
-/*
- * Free the io map PTEs associated with this IO operation.
- * We also invalidate the TLB entries and restore the original b_addr.
- */
-void
-vunmapbuf(struct buf *bp)
-{
- int pidx;
- int npages;
-
- GIANT_REQUIRED;
-
- if ((bp->b_flags & B_PHYS) == 0)
- panic("vunmapbuf");
-
- npages = bp->b_npages;
- pmap_qremove(trunc_page((vm_offset_t)bp->b_data),
- npages);
- vm_page_lock_queues();
- for (pidx = 0; pidx < npages; pidx++)
- vm_page_unhold(bp->b_pages[pidx]);
- vm_page_unlock_queues();
-
- bp->b_data = bp->b_saveaddr;
-}