Don't call VOP_CREATEVOBJECT(), it's the responsibility of the

filesystem which owns the vnode.
This commit is contained in:
Poul-Henning Kamp 2005-01-24 23:53:54 +00:00
parent f74b3b1f6c
commit dcff5b1440
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=140779
8 changed files with 2 additions and 138 deletions

View file

@ -266,13 +266,6 @@ coda_open(struct vop_open_args *ap)
return (error);
}
/* grab (above) does this when it calls newvnode unless it's in the cache*/
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_open: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
return(error);
}
@ -431,17 +424,6 @@ printf("coda_rdwr: Internally Opening %p\n", vp);
printf("coda_rdwr: VOP_OPEN on container failed %d\n", error);
return (error);
}
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_rdwr: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
if (error) {
MARK_INT_FAIL(CODA_RDWR_STATS);
return(error);
}
cfvp = cp->c_ovp;
}
}
@ -1562,14 +1544,6 @@ printf("coda_readdir: Internally Opening %p\n", vp);
printf("coda_readdir: VOP_OPEN on container failed %d\n", error);
return (error);
}
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_readdir: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
if (error) return(error);
}
/* Have UFS handle the call. */

View file

@ -266,13 +266,6 @@ coda_open(struct vop_open_args *ap)
return (error);
}
/* grab (above) does this when it calls newvnode unless it's in the cache*/
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_open: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
return(error);
}
@ -431,17 +424,6 @@ printf("coda_rdwr: Internally Opening %p\n", vp);
printf("coda_rdwr: VOP_OPEN on container failed %d\n", error);
return (error);
}
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_rdwr: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
if (error) {
MARK_INT_FAIL(CODA_RDWR_STATS);
return(error);
}
cfvp = cp->c_ovp;
}
}
@ -1562,14 +1544,6 @@ printf("coda_readdir: Internally Opening %p\n", vp);
printf("coda_readdir: VOP_OPEN on container failed %d\n", error);
return (error);
}
if (vp->v_type == VREG) {
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error != 0) {
printf("coda_readdir: VOP_CREATEVOBJECT() returns %d\n", error);
vput(vp);
}
}
if (error) return(error);
}
/* Have UFS handle the call. */

View file

@ -781,8 +781,6 @@ union_copyup(un, docopy, cred, td)
*/
vn_lock(lvp, LK_EXCLUSIVE | LK_RETRY, td);
error = VOP_OPEN(lvp, FREAD, cred, td, -1);
if (error == 0 && vn_canvmio(lvp) == TRUE)
error = VOP_CREATEVOBJECT(lvp, cred, td);
if (error == 0) {
error = union_copyfile(lvp, uvp, cred, td);
VOP_UNLOCK(lvp, 0, td);
@ -813,10 +811,6 @@ union_copyup(un, docopy, cred, td)
(void) VOP_CLOSE(lvp, FREAD, cred, td);
(void) VOP_OPEN(uvp, FREAD, cred, td, -1);
}
if (un->un_openl) {
if (vn_canvmio(uvp) == TRUE)
error = VOP_CREATEVOBJECT(uvp, cred, td);
}
un->un_openl = 0;
}
@ -1127,8 +1121,6 @@ union_vn_create(vpp, un, td)
return (error);
error = VOP_OPEN(vp, fmode, cred, td, -1);
if (error == 0 && vn_canvmio(vp) == TRUE)
error = VOP_CREATEVOBJECT(vp, cred, td);
if (error) {
vput(vp);
return (error);
@ -1330,8 +1322,6 @@ union_dircheck(struct thread *td, struct vnode **vp, struct file *fp)
if (lvp != NULLVP) {
error = VOP_OPEN(lvp, FREAD, fp->f_cred, td, -1);
if (error == 0 && vn_canvmio(lvp) == TRUE)
error = VOP_CREATEVOBJECT(lvp, fp->f_cred, td);
if (error) {
vput(lvp);
return (error);

View file

@ -782,12 +782,6 @@ union_open(ap)
if (error == 0)
error = VOP_OPEN(tvp, mode, cred, td, -1);
/*
* This is absolutely necessary or UFS will blow up.
*/
if (error == 0 && vn_canvmio(tvp) == TRUE)
error = VOP_CREATEVOBJECT(tvp, cred, td);
/*
* Release any locks held.
*/

View file

@ -1071,10 +1071,6 @@ kern_open(struct thread *td, char *path, enum uio_seg pathseg, int flags,
FILE_UNLOCK(fp);
FILEDESC_UNLOCK(fdp);
/* assert that vn_open created a backing object if one is needed */
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
VOP_UNLOCK(vp, 0, td);
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
@ -4119,13 +4115,7 @@ fhopen(td, uap)
error = VOP_OPEN(vp, fmode, td->td_ucred, td, -1);
if (error)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
if ((error = VOP_CREATEVOBJECT(vp, td->td_ucred, td)) != 0)
goto bad;
}
if (fmode & FWRITE)
vp->v_writecount++;
@ -4176,8 +4166,6 @@ fhopen(td, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
VOP_CREATEVOBJECT(vp, td->td_ucred, td);
VOP_UNLOCK(vp, 0, td);
fdrop(fp, td);

View file

@ -210,12 +210,6 @@ namei(ndp)
} else
cnp->cn_flags |= HASBUF;
if (vn_canvmio(ndp->ni_vp) == TRUE &&
(cnp->cn_nameiop != DELETE) &&
((cnp->cn_flags & (NOOBJ|LOCKLEAF)) ==
LOCKLEAF))
VOP_CREATEVOBJECT(ndp->ni_vp,
ndp->ni_cnd.cn_cred, td);
if ((cnp->cn_flags & MPSAFE) == 0) {
VFS_UNLOCK_GIANT(vfslocked);
} else if (vfslocked)
@ -800,10 +794,6 @@ relookup(dvp, vpp, cnp)
if (!wantparent)
vrele(dvp);
if (vn_canvmio(dp) == TRUE &&
((cnp->cn_flags & (NOOBJ|LOCKLEAF)) == LOCKLEAF))
VOP_CREATEVOBJECT(dp, cnp->cn_cred, td);
if ((cnp->cn_flags & LOCKLEAF) == 0)
VOP_UNLOCK(dp, 0, td);
return (0);

View file

@ -1071,10 +1071,6 @@ kern_open(struct thread *td, char *path, enum uio_seg pathseg, int flags,
FILE_UNLOCK(fp);
FILEDESC_UNLOCK(fdp);
/* assert that vn_open created a backing object if one is needed */
KASSERT(!vn_canvmio(vp) || VOP_GETVOBJECT(vp, NULL) == 0,
("open: vmio vnode has no backing object after vn_open"));
VOP_UNLOCK(vp, 0, td);
if (flags & (O_EXLOCK | O_SHLOCK)) {
lf.l_whence = SEEK_SET;
@ -4119,13 +4115,7 @@ fhopen(td, uap)
error = VOP_OPEN(vp, fmode, td->td_ucred, td, -1);
if (error)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
if ((error = VOP_CREATEVOBJECT(vp, td->td_ucred, td)) != 0)
goto bad;
}
if (fmode & FWRITE)
vp->v_writecount++;
@ -4176,8 +4166,6 @@ fhopen(td, uap)
vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
fp->f_flag |= FHASLOCK;
}
if ((vp->v_type == VREG) && (VOP_GETVOBJECT(vp, NULL) != 0))
VOP_CREATEVOBJECT(vp, td->td_ucred, td);
VOP_UNLOCK(vp, 0, td);
fdrop(fp, td);

View file

@ -240,40 +240,6 @@ vn_open_cred(ndp, flagp, cmode, cred, fdidx)
}
if ((error = VOP_OPEN(vp, fmode, cred, td, fdidx)) != 0)
goto bad;
/*
* Make sure that a VM object is created for VMIO support.
*/
if (vn_canvmio(vp) == TRUE) {
#ifdef LOOKUP_SHARED
int flock;
if (!exclusive && VOP_GETVOBJECT(vp, NULL) != 0)
VOP_LOCK(vp, LK_UPGRADE, td);
/*
* In cases where the object is marked as dead object_create
* will unlock and relock exclusive. It is safe to call in
* here with a shared lock because we only examine fields that
* the shared lock guarantees will be stable. In the UPGRADE
* case it is not likely that anyone has used this vnode yet
* so there will be no contention. The logic after this call
* restores the requested locking state.
*/
#endif
if ((error = VOP_CREATEVOBJECT(vp, cred, td)) != 0) {
VOP_UNLOCK(vp, 0, td);
VOP_CLOSE(vp, fmode, cred, td);
NDFREE(ndp, NDF_ONLY_PNBUF);
vrele(vp);
VFS_UNLOCK_GIANT(vfslocked);
*flagp = fmode;
return (error);
}
#ifdef LOOKUP_SHARED
flock = VOP_ISLOCKED(vp, td);
if (!exclusive && flock == LK_EXCLUSIVE)
VOP_LOCK(vp, LK_DOWNGRADE, td);
#endif
}
if (fmode & FWRITE)
vp->v_writecount++;