aboutsummaryrefslogtreecommitdiff
path: root/sys/kern
diff options
context:
space:
mode:
Diffstat (limited to 'sys/kern')
-rw-r--r--sys/kern/vfs_default.c160
1 files changed, 0 insertions, 160 deletions
diff --git a/sys/kern/vfs_default.c b/sys/kern/vfs_default.c
index c3b7a19af202..d227bf6c22e1 100644
--- a/sys/kern/vfs_default.c
+++ b/sys/kern/vfs_default.c
@@ -369,166 +369,6 @@ vop_stdpoll(ap)
}
/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_sharedlock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
-#ifdef DEBUG_VFS_LOCKS
- /*
- * Normally, we use shared locks here, but that confuses
- * the locking assertions.
- */
- vnflags = LK_EXCLUSIVE;
- break;
-#endif
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_sharedlock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
-#ifndef DEBUG_LOCKS
- return (lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else
- return (debuglockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td,
- "vop_sharedlock", vp->filename, vp->line));
-#endif
-}
-
-/*
- * Stubs to use when there is no locking to be done on the underlying object.
- * A minimal shared lock is necessary to ensure that the underlying object
- * is not revoked while an operation is in progress. So, an active shared
- * count is maintained in an auxillary vnode lock structure.
- */
-int
-vop_nolock(ap)
- struct vop_lock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-#ifdef notyet
- /*
- * This code cannot be used until all the non-locking filesystems
- * (notably NFS) are converted to properly lock and release nodes.
- * Also, certain vnode operations change the locking state within
- * the operation (create, mknod, remove, link, rename, mkdir, rmdir,
- * and symlink). Ideally these operations should not change the
- * lock state, but should be changed to let the caller of the
- * function unlock them. Otherwise all intermediate vnode layers
- * (such as union, umapfs, etc) must catch these functions to do
- * the necessary locking at their layer. Note that the inactive
- * and lookup operations also change their lock state, but this
- * cannot be avoided, so these two operations will always need
- * to be handled in intermediate layers.
- */
- struct vnode *vp = ap->a_vp;
- int vnflags, flags = ap->a_flags;
-
- switch (flags & LK_TYPE_MASK) {
- case LK_DRAIN:
- vnflags = LK_DRAIN;
- break;
- case LK_EXCLUSIVE:
- case LK_SHARED:
- vnflags = LK_SHARED;
- break;
- case LK_UPGRADE:
- case LK_EXCLUPGRADE:
- case LK_DOWNGRADE:
- return (0);
- case LK_RELEASE:
- default:
- panic("vop_nolock: bad operation %d", flags & LK_TYPE_MASK);
- }
- vnflags |= flags & (LK_INTERLOCK | LK_EXTFLG_MASK);
- return(lockmgr(vp->v_vnlock, vnflags, VI_MTX(vp), ap->a_td));
-#else /* for now */
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-#endif
-}
-
-/*
- * Do the inverse of vop_nolock, handling the interlock in a compatible way.
- */
-int
-vop_nounlock(ap)
- struct vop_unlock_args /* {
- struct vnode *a_vp;
- int a_flags;
- struct thread *a_td;
- } */ *ap;
-{
-
- /*
- * Since we are not using the lock manager, we must clear
- * the interlock here.
- */
- if (ap->a_flags & LK_INTERLOCK)
- VI_UNLOCK(ap->a_vp);
- return (0);
-}
-
-/*
- * Return whether or not the node is in use.
- */
-int
-vop_noislocked(ap)
- struct vop_islocked_args /* {
- struct vnode *a_vp;
- struct thread *a_td;
- } */ *ap;
-{
-
- return (0);
-}
-
-/*
* Return our mount point, as we will take charge of the writes.
*/
int