Refactor the existing machine-dependent sf_buf_free() into a machine-

dependent function by the same name and a machine-independent function,
sf_buf_mext().  Aside from the virtue of making more of the code machine-
independent, this change also makes the interface more logical.  Before,
sf_buf_free() did more than simply undo an sf_buf_alloc(); it also
unwired and if necessary freed the page.  That is now the purpose of
sf_buf_mext().  Thus, sf_buf_alloc() and sf_buf_free() can now be used
as a general-purpose emphemeral map cache.
This commit is contained in:
Alan Cox 2004-03-16 19:04:28 +00:00
parent 27de234992
commit 90ecfebd82
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=127086
10 changed files with 48 additions and 127 deletions

View file

@ -437,27 +437,12 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -490,27 +490,12 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -653,36 +653,24 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Remove a reference from the given sf_buf, adding it to the free
* list when its reference count reaches zero. A freed sf_buf still,
* however, retains its virtual-to-physical mapping until it is
* recycled or reactivated by sf_buf_alloc(9).
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
mtx_lock(&sf_buf_lock);
m = sf->m;
sf->ref_count--;
if (sf->ref_count == 0) {
nsfbufsused--;
TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
nsfbufsused--;
if (sf_buf_alloc_want > 0)
wakeup_one(&sf_buf_freelist);
}
mtx_unlock(&sf_buf_lock);
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
}
/*

View file

@ -374,27 +374,12 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detach mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can happen since we
* don't hold a reference to it. If so, we're responsible for freeing
* the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -85,7 +85,7 @@ socow_iodone(void *addr, void *args)
vm_page_unlock_queues();
splx(s);
/* note that sf_buf_free() unwires the page for us*/
sf_buf_free(addr, args);
sf_buf_mext(addr, args);
socow_stats.iodone++;
}

View file

@ -1631,6 +1631,28 @@ getsockaddr(namp, uaddr, len)
return (error);
}
/*
* Detatch mapped page and release resources back to the system.
*/
void
sf_buf_mext(void *addr, void *args)
{
vm_page_t m;
m = sf_buf_page(args);
sf_buf_free(args);
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
}
/*
* sendfile(2)
*
@ -1917,14 +1939,14 @@ do_sendfile(struct thread *td, struct sendfile_args *uap, int compat)
MGETHDR(m, M_TRYWAIT, MT_DATA);
if (m == NULL) {
error = ENOBUFS;
sf_buf_free((void *)sf_buf_kva(sf), sf);
sf_buf_mext((void *)sf_buf_kva(sf), sf);
sbunlock(&so->so_snd);
goto done;
}
/*
* Setup external storage for mbuf.
*/
MEXTADD(m, sf_buf_kva(sf), PAGE_SIZE, sf_buf_free, sf, M_RDONLY,
MEXTADD(m, sf_buf_kva(sf), PAGE_SIZE, sf_buf_mext, sf, M_RDONLY,
EXT_SFBUF);
m->m_data = (char *)sf_buf_kva(sf) + pgoff;
m->m_pkthdr.len = m->m_len = xfsize;

View file

@ -292,28 +292,13 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
pmap_qremove((vm_offset_t)addr, 1);
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
pmap_qremove(sf->kva, 1);
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -292,28 +292,13 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
pmap_qremove((vm_offset_t)addr, 1);
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
pmap_qremove(sf->kva, 1);
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -419,28 +419,13 @@ sf_buf_alloc(struct vm_page *m)
}
/*
* Detatch mapped page and release resources back to the system.
* Release resources back to the system.
*/
void
sf_buf_free(void *addr, void *args)
sf_buf_free(struct sf_buf *sf)
{
struct sf_buf *sf;
struct vm_page *m;
sf = args;
pmap_qremove((vm_offset_t)addr, 1);
m = sf->m;
vm_page_lock_queues();
vm_page_unwire(m, 0);
/*
* Check for the object going away on us. This can
* happen since we don't hold a reference to it.
* If so, we're responsible for freeing the page.
*/
if (m->wire_count == 0 && m->object == NULL)
vm_page_free(m);
vm_page_unlock_queues();
sf->m = NULL;
pmap_qremove(sf->kva, 1);
mtx_lock(&sf_freelist.sf_lock);
SLIST_INSERT_HEAD(&sf_freelist.sf_head, sf, free_list);
nsfbufsused--;

View file

@ -39,6 +39,7 @@ extern int nsfbufsused; /* Number of sendfile(2) bufs in use */
struct sf_buf *
sf_buf_alloc(struct vm_page *m);
void sf_buf_free(void *addr, void *args);
void sf_buf_free(struct sf_buf *sf);
void sf_buf_mext(void *addr, void *args);
#endif /* !_SYS_SF_BUF_H_ */