Move the stuff related to select and poll out of struct vnode.

The use of the zone allocator may or may not be overkill.
There is an XXX: over in ufs/ufs/ufs_vnops.c that jlemon may need
to revisit.

This shaves about 60 bytes of struct vnode which on my laptop means
600k less RAM used for vnodes.
This commit is contained in:
Poul-Henning Kamp 2002-02-17 21:15:36 +00:00
parent e8b26e995e
commit 4b55dbe36b
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=90791
3 changed files with 71 additions and 40 deletions

View file

@ -189,6 +189,7 @@ struct nfs_public nfs_pub;
/* Zone for allocation of new vnodes - used exclusively by getnewvnode() */
static vm_zone_t vnode_zone;
static vm_zone_t vnodepoll_zone;
/* Set to 1 to print out reclaim of active vnodes */
int prtactive;
@ -253,6 +254,15 @@ static int vnlru_nowhere;
SYSCTL_INT(_debug, OID_AUTO, vnlru_nowhere, CTLFLAG_RW, &vnlru_nowhere, 0,
"Number of times the vnlru process ran without success");
static __inline void
v_addpollinfo(struct vnode *vp)
{
if (vp->v_pollinfo != NULL)
return;
vp->v_pollinfo = zalloc(vnodepoll_zone);
mtx_init(&vp->v_pollinfo->vpi_lock, "vnode pollinfo", MTX_DEF);
}
/*
* Initialize the vnode management data structures.
*/
@ -269,6 +279,7 @@ vntblinit(void *dummy __unused)
TAILQ_INIT(&vnode_free_list);
mtx_init(&vnode_free_list_mtx, "vnode_free_list", MTX_DEF);
vnode_zone = zinit("VNODE", sizeof (struct vnode), 0, 0, 5);
vnodepoll_zone = zinit("VNODEPOLL", sizeof (struct vpollinfo), 0, 0, 5);
/*
* Initialize the filesystem syncer.
*/
@ -764,7 +775,6 @@ getnewvnode(tag, mp, vops, vpp)
freevnodes--;
mtx_unlock(&vnode_free_list_mtx);
cache_purge(vp);
vp->v_lease = NULL;
if (vp->v_type != VBAD) {
vgonel(vp, td);
} else {
@ -786,6 +796,11 @@ getnewvnode(tag, mp, vops, vpp)
panic("Non-zero write count");
}
#endif
if (vp->v_pollinfo) {
mtx_destroy(&vp->v_pollinfo->vpi_lock);
zfree(vnodepoll_zone, vp->v_pollinfo);
}
vp->v_pollinfo = NULL;
vp->v_flag = 0;
vp->v_lastw = 0;
vp->v_lasta = 0;
@ -798,7 +813,6 @@ getnewvnode(tag, mp, vops, vpp)
bzero((char *) vp, sizeof *vp);
mtx_init(&vp->v_interlock, "vnode interlock", MTX_DEF);
vp->v_dd = vp;
mtx_init(&vp->v_pollinfo.vpi_lock, "vnode pollinfo", MTX_DEF);
cache_purge(vp);
LIST_INIT(&vp->v_cache_src);
TAILQ_INIT(&vp->v_cache_dst);
@ -2093,7 +2107,8 @@ vclean(vp, flags, td)
* Done with purge, notify sleepers of the grim news.
*/
vp->v_op = dead_vnodeop_p;
vn_pollgone(vp);
if (vp->v_pollinfo != NULL)
vn_pollgone(vp);
vp->v_tag = VT_NON;
vp->v_flag &= ~VXLOCK;
vp->v_vxproc = NULL;
@ -2709,8 +2724,10 @@ vn_pollrecord(vp, td, events)
struct thread *td;
short events;
{
mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_revents & events) {
v_addpollinfo(vp);
mtx_lock(&vp->v_pollinfo->vpi_lock);
if (vp->v_pollinfo->vpi_revents & events) {
/*
* This leaves events we are not interested
* in available for the other process which
@ -2718,15 +2735,15 @@ vn_pollrecord(vp, td, events)
* (otherwise they would never have been
* recorded).
*/
events &= vp->v_pollinfo.vpi_revents;
vp->v_pollinfo.vpi_revents &= ~events;
events &= vp->v_pollinfo->vpi_revents;
vp->v_pollinfo->vpi_revents &= ~events;
mtx_unlock(&vp->v_pollinfo.vpi_lock);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
return events;
}
vp->v_pollinfo.vpi_events |= events;
selrecord(td, &vp->v_pollinfo.vpi_selinfo);
mtx_unlock(&vp->v_pollinfo.vpi_lock);
vp->v_pollinfo->vpi_events |= events;
selrecord(td, &vp->v_pollinfo->vpi_selinfo);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
return 0;
}
@ -2741,8 +2758,10 @@ vn_pollevent(vp, events)
struct vnode *vp;
short events;
{
mtx_lock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo.vpi_events & events) {
v_addpollinfo(vp);
mtx_lock(&vp->v_pollinfo->vpi_lock);
if (vp->v_pollinfo->vpi_events & events) {
/*
* We clear vpi_events so that we don't
* call selwakeup() twice if two events are
@ -2754,15 +2773,15 @@ vn_pollevent(vp, events)
* a time. (Perhaps we should only clear those
* event bits which we note?) XXX
*/
vp->v_pollinfo.vpi_events = 0; /* &= ~events ??? */
vp->v_pollinfo.vpi_revents |= events;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
vp->v_pollinfo->vpi_events = 0; /* &= ~events ??? */
vp->v_pollinfo->vpi_revents |= events;
selwakeup(&vp->v_pollinfo->vpi_selinfo);
}
mtx_unlock(&vp->v_pollinfo.vpi_lock);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
}
#define VN_KNOTE(vp, b) \
KNOTE((struct klist *)&vp->v_pollinfo.vpi_selinfo.si_note, (b))
KNOTE((struct klist *)&vp->v_pollinfo->vpi_selinfo.si_note, (b))
/*
* Wake up anyone polling on vp because it is being revoked.
@ -2773,13 +2792,14 @@ void
vn_pollgone(vp)
struct vnode *vp;
{
mtx_lock(&vp->v_pollinfo.vpi_lock);
mtx_lock(&vp->v_pollinfo->vpi_lock);
VN_KNOTE(vp, NOTE_REVOKE);
if (vp->v_pollinfo.vpi_events) {
vp->v_pollinfo.vpi_events = 0;
selwakeup(&vp->v_pollinfo.vpi_selinfo);
if (vp->v_pollinfo->vpi_events) {
vp->v_pollinfo->vpi_events = 0;
selwakeup(&vp->v_pollinfo->vpi_selinfo);
}
mtx_unlock(&vp->v_pollinfo.vpi_lock);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
}

View file

@ -82,6 +82,13 @@ TAILQ_HEAD(buflists, buf);
typedef int vop_t __P((void *));
struct namecache;
struct vpollinfo {
struct mtx vpi_lock; /* lock to protect below */
struct selinfo vpi_selinfo; /* identity of poller(s) */
short vpi_events; /* what they are looking for */
short vpi_revents; /* what has happened */
};
/*
* Reading or writing any of these items requires holding the appropriate lock.
* v_freelist is locked by the global vnode_free_list mutex.
@ -116,7 +123,7 @@ struct vnode {
} v_un;
daddr_t v_lastw; /* last write (write cluster) */
daddr_t v_cstart; /* start block of cluster */
daddr_t v_lasta; /* last allocation */
daddr_t v_lasta; /* last allocation (cluster) */
int v_clen; /* length of current cluster */
struct vm_object *v_object; /* Place to store VM object */
struct mtx v_interlock; /* lock on usecount and flag */
@ -128,12 +135,7 @@ struct vnode {
TAILQ_HEAD(, namecache) v_cache_dst; /* Cache entries to us */
struct vnode *v_dd; /* .. vnode */
u_long v_ddid; /* .. capability identifier */
struct {
struct mtx vpi_lock; /* lock to protect below */
struct selinfo vpi_selinfo; /* identity of poller(s) */
short vpi_events; /* what they are looking for */
short vpi_revents; /* what has happened */
} v_pollinfo;
struct vpollinfo *v_pollinfo;
struct thread *v_vxproc; /* thread owning VXLOCK */
#ifdef DEBUG_LOCKS
const char *filename; /* Source file doing locking */
@ -148,13 +150,16 @@ struct vnode {
#define VN_POLLEVENT(vp, events) \
do { \
if ((vp)->v_pollinfo.vpi_events & (events)) \
if ((vp)->v_pollinfo != NULL && \
(vp)->v_pollinfo->vpi_events & (events)) \
vn_pollevent((vp), (events)); \
} while (0)
#define VN_KNOTE(vp, b) \
KNOTE(&vp->v_pollinfo.vpi_selinfo.si_note, (b))
#define VN_KNOTE(vp, b) \
do { \
if ((vp)->v_pollinfo != NULL) \
KNOTE(&vp->v_pollinfo->vpi_selinfo.si_note, (b)); \
} while (0)
/*
* Vnode flags.

View file

@ -2473,9 +2473,14 @@ ufs_kqfilter(ap)
kn->kn_hook = (caddr_t)vp;
mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_INSERT_HEAD(&vp->v_pollinfo.vpi_selinfo.si_note, kn, kn_selnext);
mtx_unlock(&vp->v_pollinfo.vpi_lock);
if (vp->v_pollinfo == NULL) {
/* XXX: call v_addpollinfo(vp) ? */
printf("ufs_kqfilter: vnode with no v_pollinfo\n");
return (1);
}
mtx_lock(&vp->v_pollinfo->vpi_lock);
SLIST_INSERT_HEAD(&vp->v_pollinfo->vpi_selinfo.si_note, kn, kn_selnext);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
return (0);
}
@ -2485,10 +2490,11 @@ filt_ufsdetach(struct knote *kn)
{
struct vnode *vp = (struct vnode *)kn->kn_hook;
mtx_lock(&vp->v_pollinfo.vpi_lock);
SLIST_REMOVE(&vp->v_pollinfo.vpi_selinfo.si_note,
KASSERT(vp->v_pollinfo != NULL, ("Mising v_pollinfo"));
mtx_lock(&vp->v_pollinfo->vpi_lock);
SLIST_REMOVE(&vp->v_pollinfo->vpi_selinfo.si_note,
kn, knote, kn_selnext);
mtx_unlock(&vp->v_pollinfo.vpi_lock);
mtx_unlock(&vp->v_pollinfo->vpi_lock);
}
/*ARGSUSED*/