mirror of
https://github.com/freebsd/freebsd-src
synced 2024-09-30 13:44:55 +00:00
rangelocks: stop caching per-thread rl_q_entry
This should reduce the frequency of smr_synchronize() calls, that otherwise occur on almost each rangelock unlock. Reviewed by: markj Sponsored by: The FreeBSD Foundation Differential revision: https://reviews.freebsd.org/D46482
This commit is contained in:
parent
41e016289f
commit
7e49f04c88
|
@ -313,15 +313,8 @@ static struct rl_q_entry *
|
|||
rlqentry_alloc(vm_ooffset_t start, vm_ooffset_t end, int flags)
|
||||
{
|
||||
struct rl_q_entry *e;
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
if (td->td_rlqe != NULL) {
|
||||
e = td->td_rlqe;
|
||||
td->td_rlqe = NULL;
|
||||
} else {
|
||||
e = uma_zalloc_smr(rl_entry_zone, M_WAITOK);
|
||||
}
|
||||
e = uma_zalloc_smr(rl_entry_zone, M_WAITOK);
|
||||
e->rl_q_next = NULL;
|
||||
e->rl_q_free = NULL;
|
||||
e->rl_q_start = start;
|
||||
|
@ -333,12 +326,6 @@ rlqentry_alloc(vm_ooffset_t start, vm_ooffset_t end, int flags)
|
|||
return (e);
|
||||
}
|
||||
|
||||
void
|
||||
rangelock_entry_free(struct rl_q_entry *e)
|
||||
{
|
||||
uma_zfree_smr(rl_entry_zone, e);
|
||||
}
|
||||
|
||||
void
|
||||
rangelock_init(struct rangelock *lock)
|
||||
{
|
||||
|
@ -401,19 +388,12 @@ static void
|
|||
rangelock_free_free(struct rl_q_entry *free)
|
||||
{
|
||||
struct rl_q_entry *x, *xp;
|
||||
struct thread *td;
|
||||
|
||||
td = curthread;
|
||||
for (x = free; x != NULL; x = xp) {
|
||||
MPASS(!rl_e_is_marked(x));
|
||||
xp = x->rl_q_free;
|
||||
MPASS(!rl_e_is_marked(xp));
|
||||
if (td->td_rlqe == NULL) {
|
||||
smr_synchronize(rl_smr);
|
||||
td->td_rlqe = x;
|
||||
} else {
|
||||
uma_zfree_smr(rl_entry_zone, x);
|
||||
}
|
||||
uma_zfree_smr(rl_entry_zone, x);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -460,7 +460,6 @@ thread_init(void *mem, int size, int flags)
|
|||
td->td_allocdomain = vm_phys_domain(vtophys(td));
|
||||
td->td_sleepqueue = sleepq_alloc();
|
||||
td->td_turnstile = turnstile_alloc();
|
||||
td->td_rlqe = NULL;
|
||||
EVENTHANDLER_DIRECT_INVOKE(thread_init, td);
|
||||
umtx_thread_init(td);
|
||||
td->td_kstack = 0;
|
||||
|
@ -480,7 +479,6 @@ thread_fini(void *mem, int size)
|
|||
EVENTHANDLER_DIRECT_INVOKE(thread_fini, td);
|
||||
turnstile_free(td->td_turnstile);
|
||||
sleepq_free(td->td_sleepqueue);
|
||||
rangelock_entry_free(td->td_rlqe);
|
||||
umtx_thread_fini(td);
|
||||
MPASS(td->td_sel == NULL);
|
||||
}
|
||||
|
|
|
@ -247,7 +247,7 @@ struct thread {
|
|||
struct seltd *td_sel; /* Select queue/channel. */
|
||||
struct sleepqueue *td_sleepqueue; /* (k) Associated sleep queue. */
|
||||
struct turnstile *td_turnstile; /* (k) Associated turnstile. */
|
||||
struct rl_q_entry *td_rlqe; /* (k) Associated range lock entry. */
|
||||
void *td_pad1; /* Available */
|
||||
struct umtx_q *td_umtxq; /* (c?) Link for when we're blocked. */
|
||||
lwpid_t td_tid; /* (b) Thread ID. */
|
||||
sigqueue_t td_sigqueue; /* (c) Sigs arrived, not delivered. */
|
||||
|
|
|
@ -65,7 +65,6 @@ void *rangelock_wlock(struct rangelock *lock, vm_ooffset_t start,
|
|||
vm_ooffset_t end);
|
||||
void *rangelock_trywlock(struct rangelock *lock, vm_ooffset_t start,
|
||||
vm_ooffset_t end);
|
||||
void rangelock_entry_free(struct rl_q_entry *e);
|
||||
void rangelock_may_recurse(struct rangelock *lock);
|
||||
#if defined(INVARIANTS) || defined(INVARIANT_SUPPORT)
|
||||
void _rangelock_cookie_assert(void *cookie, int what, const char *file,
|
||||
|
|
Loading…
Reference in a new issue