- If our user_pri doesn't match our actual priority our priority has been

elevated either due to priority propagation or because we're in the
   kernel in either case, put us on the current queue so that we dont
   stop others from using important resources.  At some point the priority
   elevations from sleeping in the kernel should go away.
 - Remove an optimization in sched_userret().  Before we would only set
   NEEDRESCHED if there was something of a higher priority available.  This
   is a trivial optimization and it breaks priority propagation because it
   doesn't take threads which we may be blocking into account.  Notice that
   the thread which is blocking others gets up to one tick of cpu time before
   we honor this NEEDRESCHED in sched_clock().
This commit is contained in:
Jeff Roberson 2003-10-15 07:47:06 +00:00
parent 0bea487cfa
commit 08fd6713b2
Notes: svn2git 2020-12-20 02:59:44 +00:00
svn path=/head/; revision=121107

View file

@ -182,9 +182,8 @@ struct td_sched *thread0_sched = &td_sched;
#define SCHED_INTERACTIVE(kg) \
(sched_interact_score(kg) < SCHED_INTERACT_THRESH)
#define SCHED_CURR(kg, ke) \
(ke->ke_thread->td_priority < PRI_MIN_TIMESHARE || \
SCHED_INTERACTIVE(kg) || \
mtx_ownedby(&Giant, (ke)->ke_thread))
(ke->ke_thread->td_priority != kg->kg_user_pri || \
SCHED_INTERACTIVE(kg))
/*
* Cpu percentage computation macros and defines.
@ -1152,14 +1151,21 @@ void
sched_userret(struct thread *td)
{
struct ksegrp *kg;
#if 0
struct kseq *kseq;
struct kse *ke;
#endif
kg = td->td_ksegrp;
if (td->td_priority != kg->kg_user_pri) {
mtx_lock_spin(&sched_lock);
td->td_priority = kg->kg_user_pri;
/*
* This optimization is temporarily disabled because it
* breaks priority propagation.
*/
#if 0
kseq = KSEQ_SELF();
if (td->td_ksegrp->kg_pri_class == PRI_TIMESHARE &&
#ifdef SMP
@ -1169,6 +1175,7 @@ sched_userret(struct thread *td)
#endif
(ke = kseq_choose(kseq, 0)) != NULL &&
ke->ke_thread->td_priority < td->td_priority)
#endif
curthread->td_flags |= TDF_NEEDRESCHED;
mtx_unlock_spin(&sched_lock);
}