- Change the msleep()s to condition variables.

The mbuf and mcluster free lists now each "own" a condition variable,
  m_starved.

- Clean up minor indentention issues in sys/mbuf.h caused by previous
  commit.
This commit is contained in:
Bosko Milekic 2001-04-03 04:50:13 +00:00
parent 1a92ad3d67
commit 4b8ae40a7c
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=75112
2 changed files with 58 additions and 54 deletions

View file

@ -40,6 +40,7 @@
#include <sys/malloc.h>
#include <sys/mbuf.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
#include <sys/kernel.h>
#include <sys/sysctl.h>
#include <sys/domain.h>
@ -95,9 +96,11 @@ SYSCTL_INT(_kern_ipc, OID_AUTO, nmbufs, CTLFLAG_RD, &nmbufs, 0,
"Maximum number of mbufs available");
SYSCTL_INT(_kern_ipc, OID_AUTO, nmbcnt, CTLFLAG_RD, &nmbcnt, 0,
"Maximum number of ext_buf counters available");
#ifndef NMBCLUSTERS
#define NMBCLUSTERS (512 + MAXUSERS * 16)
#endif
TUNABLE_INT_DECL("kern.ipc.nmbclusters", NMBCLUSTERS, nmbclusters);
TUNABLE_INT_DECL("kern.ipc.nmbufs", NMBCLUSTERS * 4, nmbufs);
TUNABLE_INT_DECL("kern.ipc.nmbcnt", EXT_COUNTERS, nmbcnt);
@ -137,6 +140,8 @@ mbinit(void *dummy)
mclfree.m_head = NULL;
mcntfree.m_head = NULL;
mtx_init(&mbuf_mtx, "mbuf free list lock", MTX_DEF);
cv_init(&mmbfree.m_starved, "mbuf free list starved cv");
cv_init(&mclfree.m_starved, "mbuf cluster free list starved cv");
/*
* Initialize mbuf subsystem (sysctl exported) statistics structure.
@ -283,7 +288,7 @@ m_mballoc(int nmb, int how)
*
* Here we request for the protocols to free up some resources and, if we
* still cannot get anything, then we wait for an mbuf to be freed for a
* designated (mbuf_wait) time.
* designated (mbuf_wait) time, at most.
*
* Must be called with the mmbfree mutex held.
*/
@ -309,30 +314,24 @@ m_mballoc_wait(void)
_MGET(p, M_DONTWAIT);
if (p == NULL) {
int retval;
m_mballoc_wid++;
msleep(&m_mballoc_wid, &mbuf_mtx, PVM, "mballc",
retval = cv_timedwait(&mmbfree.m_starved, &mbuf_mtx,
mbuf_wait);
m_mballoc_wid--;
/*
* Try again (one last time).
*
* We retry to fetch _even_ if the sleep timed out. This
* is left this way, purposely, in the [unlikely] case
* that an mbuf was freed but the sleep was not awoken
* in time.
*
* If the sleep didn't time out (i.e. we got woken up) then
* we have the lock so we just grab an mbuf, hopefully.
* If we got signaled (i.e. didn't time out), allocate.
*/
_MGET(p, M_DONTWAIT);
if (retval == 0)
_MGET(p, M_DONTWAIT);
}
/* If we waited and got something... */
if (p != NULL) {
mbstat.m_wait++;
if (mmbfree.m_head != NULL)
MBWAKEUP(m_mballoc_wid);
MBWAKEUP(m_mballoc_wid, &mmbfree.m_starved);
}
return (p);
@ -389,8 +388,8 @@ m_clalloc(int ncl, int how)
/*
* Once the mb_map submap has been exhausted and the allocation is called with
* M_TRYWAIT, we rely on the mclfree list. If nothing is free, we will
* sleep for a designated amount of time (mbuf_wait) or until we're woken up
* due to sudden mcluster availability.
* block on a cv for a designated amount of time (mbuf_wait) or until we're
* signaled due to sudden mcluster availability.
*
* Must be called with the mclfree lock held.
*/
@ -398,21 +397,22 @@ caddr_t
m_clalloc_wait(void)
{
caddr_t p = NULL;
int retval;
m_clalloc_wid++;
msleep(&m_clalloc_wid, &mbuf_mtx, PVM, "mclalc", mbuf_wait);
retval = cv_timedwait(&mclfree.m_starved, &mbuf_mtx, mbuf_wait);
m_clalloc_wid--;
/*
* Now that we (think) that we've got something, try again.
*/
_MCLALLOC(p, M_DONTWAIT);
if (retval == 0)
_MCLALLOC(p, M_DONTWAIT);
/* If we waited and got something ... */
if (p != NULL) {
mbstat.m_wait++;
if (mclfree.m_head != NULL)
MBWAKEUP(m_clalloc_wid);
MBWAKEUP(m_clalloc_wid, &mclfree.m_starved);
}
return (p);
@ -433,7 +433,7 @@ m_reclaim(void)
struct protosw *pr;
#ifdef WITNESS
KASSERT(witness_list(CURPROC) == 0,
KASSERT(witness_list(curproc) == 0,
("m_reclaim called with locks held"));
#endif

View file

@ -39,6 +39,7 @@
#include <sys/lock.h>
#include <sys/mutex.h>
#include <sys/condvar.h>
/*
* Mbufs are of a single size, MSIZE (machine/param.h), which
@ -254,10 +255,12 @@ union mext_refcnt {
*/
struct mbffree_lst {
struct mbuf *m_head;
struct cv m_starved;
};
struct mclfree_lst {
union mcluster *m_head;
struct cv m_starved;
};
struct mcntfree_lst {
@ -265,14 +268,15 @@ struct mcntfree_lst {
};
/*
* Wake up the next instance (if any) of a sleeping allocation - which is
* waiting for a {cluster, mbuf} to be freed.
* Signal a single instance (if any) blocked on a m_starved cv (i.e. an
* instance waiting for an {mbuf, cluster} to be freed to the global
* cache lists).
*
* Must be called with the appropriate mutex held.
* Must be called with mbuf_mtx held.
*/
#define MBWAKEUP(m_wid) do { \
if ((m_wid)) \
wakeup_one(&(m_wid)); \
#define MBWAKEUP(m_wid, m_cv) do { \
if ((m_wid) > 0) \
cv_signal((m_cv)); \
} while (0)
/*
@ -298,7 +302,7 @@ struct mcntfree_lst {
#define _MEXT_ALLOC_CNT(m_cnt, how) do { \
union mext_refcnt *__mcnt; \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
if (mcntfree.m_head == NULL) \
m_alloc_ref(1, (how)); \
__mcnt = mcntfree.m_head; \
@ -307,18 +311,18 @@ struct mcntfree_lst {
mbstat.m_refree--; \
__mcnt->refcnt = 0; \
} \
mtx_unlock(&mbuf_mtx); \
mtx_unlock(&mbuf_mtx); \
(m_cnt) = __mcnt; \
} while (0)
#define _MEXT_DEALLOC_CNT(m_cnt) do { \
union mext_refcnt *__mcnt = (m_cnt); \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
__mcnt->next_ref = mcntfree.m_head; \
mcntfree.m_head = __mcnt; \
mbstat.m_refree++; \
mtx_unlock(&mbuf_mtx); \
mtx_unlock(&mbuf_mtx); \
} while (0)
#define MEXT_INIT_REF(m, how) do { \
@ -369,15 +373,15 @@ struct mcntfree_lst {
int _mhow = (how); \
int _mtype = (type); \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
_MGET(_mm, _mhow); \
if (_mm != NULL) { \
mbtypes[_mtype]++; \
mtx_unlock(&mbuf_mtx); \
mtx_unlock(&mbuf_mtx); \
_MGET_SETUP(_mm, _mtype); \
} else { \
mbstat.m_drops++; \
mtx_unlock(&mbuf_mtx); \
mbstat.m_drops++; \
mtx_unlock(&mbuf_mtx); \
} \
(m) = _mm; \
} while (0)
@ -398,15 +402,15 @@ struct mcntfree_lst {
int _mhow = (how); \
int _mtype = (type); \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
_MGET(_mm, _mhow); \
if (_mm != NULL) { \
mbtypes[_mtype]++; \
mtx_unlock(&mbuf_mtx); \
mtx_unlock(&mbuf_mtx); \
_MGETHDR_SETUP(_mm, _mtype); \
} else { \
mbstat.m_drops++; \
mtx_unlock(&mbuf_mtx); \
mbstat.m_drops++; \
mtx_unlock(&mbuf_mtx); \
} \
(m) = _mm; \
} while (0)
@ -439,7 +443,7 @@ struct mcntfree_lst {
#define MCLGET(m, how) do { \
struct mbuf *_mm = (m); \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
_MCLALLOC(_mm->m_ext.ext_buf, (how)); \
if (_mm->m_ext.ext_buf != NULL) { \
mtx_unlock(&mbuf_mtx); \
@ -455,10 +459,10 @@ struct mcntfree_lst {
_mm->m_ext.ext_size = MCLBYTES; \
_mm->m_ext.ext_type = EXT_CLUSTER; \
} \
} else { \
mbstat.m_drops++; \
} else { \
mbstat.m_drops++; \
mtx_unlock(&mbuf_mtx); \
} \
} \
} while (0)
#define MEXTADD(m, buf, size, free, args, flags, type) do { \
@ -479,12 +483,12 @@ struct mcntfree_lst {
#define _MCLFREE(p) do { \
union mcluster *_mp = (union mcluster *)(p); \
\
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
_mp->mcl_next = mclfree.m_head; \
mclfree.m_head = _mp; \
mbstat.m_clfree++; \
MBWAKEUP(m_clalloc_wid); \
mtx_unlock(&mbuf_mtx); \
MBWAKEUP(m_clalloc_wid, &mclfree.m_starved); \
mtx_unlock(&mbuf_mtx); \
} while (0)
/* MEXTFREE:
@ -519,15 +523,15 @@ struct mcntfree_lst {
KASSERT(_mm->m_type != MT_FREE, ("freeing free mbuf")); \
if (_mm->m_flags & M_EXT) \
MEXTFREE(_mm); \
mtx_lock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
mbtypes[_mm->m_type]--; \
_mm->m_type = MT_FREE; \
mbtypes[MT_FREE]++; \
(n) = _mm->m_next; \
_mm->m_next = mmbfree.m_head; \
mmbfree.m_head = _mm; \
MBWAKEUP(m_mballoc_wid); \
mtx_unlock(&mbuf_mtx); \
MBWAKEUP(m_mballoc_wid, &mmbfree.m_starved); \
mtx_unlock(&mbuf_mtx); \
} while (0)
/*
@ -618,10 +622,10 @@ struct mcntfree_lst {
struct mbuf *_mm = (m); \
int _mt = (t); \
\
mtx_lock(&mbuf_mtx); \
mbtypes[_mm->m_type]--; \
mbtypes[_mt]++; \
mtx_unlock(&mbuf_mtx); \
mtx_lock(&mbuf_mtx); \
mbtypes[_mm->m_type]--; \
mbtypes[_mt]++; \
mtx_unlock(&mbuf_mtx); \
_mm->m_type = (_mt); \
} while (0)
@ -653,7 +657,7 @@ extern struct mbuf *mbutl; /* virtual address of mclusters */
extern struct mclfree_lst mclfree;
extern struct mbffree_lst mmbfree;
extern struct mcntfree_lst mcntfree;
extern struct mtx mbuf_mtx;
extern struct mtx mbuf_mtx;
extern int nmbclusters;
extern int nmbufs;
extern int nsfbufs;